summaryrefslogtreecommitdiffstatshomepage
path: root/3rdparty/jsoncpp
diff options
context:
space:
mode:
author Cowering <cowering@users.noreply.github.com>2015-07-02 16:53:38 -0500
committer Cowering <cowering@users.noreply.github.com>2015-07-08 14:31:26 -0500
commit21b5ebeba7dc4ef9d8b1f809741ad73418989a16 (patch)
treed163ac2b76b5362c8da954ee8221922c6eb126dc /3rdparty/jsoncpp
parent1e37ed88936371ede63ada5b7aea9dd1d0bc287c (diff)
github version of jsoncpp with some fixes: pedantic and C++98 and others. please try to compile for all platforms
Diffstat (limited to '3rdparty/jsoncpp')
-rw-r--r--3rdparty/jsoncpp/.gitignore25
-rw-r--r--3rdparty/jsoncpp/.travis.yml17
-rw-r--r--3rdparty/jsoncpp/CMakeLists.txt50
-rw-r--r--3rdparty/jsoncpp/NEWS.txt2
-rw-r--r--3rdparty/jsoncpp/README.md104
-rw-r--r--3rdparty/jsoncpp/SConstruct2
-rw-r--r--3rdparty/jsoncpp/amalgamate.py178
-rw-r--r--3rdparty/jsoncpp/appveyor.yml34
-rw-r--r--3rdparty/jsoncpp/dev.makefile27
-rw-r--r--3rdparty/jsoncpp/devtools/__init__.py7
-rw-r--r--3rdparty/jsoncpp/devtools/agent_vmw7.json4
-rw-r--r--3rdparty/jsoncpp/devtools/agent_vmxp.json4
-rw-r--r--3rdparty/jsoncpp/devtools/antglob.py111
-rw-r--r--3rdparty/jsoncpp/devtools/batchbuild.py173
-rw-r--r--3rdparty/jsoncpp/devtools/fixeol.py29
-rw-r--r--3rdparty/jsoncpp/devtools/licenseupdater.py26
-rw-r--r--3rdparty/jsoncpp/devtools/tarball.py47
-rw-r--r--3rdparty/jsoncpp/doc/doxyfile.in5
-rw-r--r--3rdparty/jsoncpp/doc/header.html2
-rw-r--r--3rdparty/jsoncpp/doc/jsoncpp.dox123
-rw-r--r--3rdparty/jsoncpp/doc/web_doxyfile.in2301
-rw-r--r--3rdparty/jsoncpp/doxybuild.py144
-rw-r--r--3rdparty/jsoncpp/include/json/assertions.h39
-rw-r--r--3rdparty/jsoncpp/include/json/config.h19
-rw-r--r--3rdparty/jsoncpp/include/json/forwards.h6
-rw-r--r--3rdparty/jsoncpp/include/json/reader.h131
-rw-r--r--3rdparty/jsoncpp/include/json/value.h606
-rw-r--r--3rdparty/jsoncpp/include/json/version.h6
-rw-r--r--3rdparty/jsoncpp/include/json/writer.h116
-rw-r--r--3rdparty/jsoncpp/makefiles/vs71/lib_json.vcproj9
-rw-r--r--3rdparty/jsoncpp/makerelease.py202
-rw-r--r--3rdparty/jsoncpp/pkg-config/jsoncpp.pc.in4
-rw-r--r--3rdparty/jsoncpp/scons-tools/globtool.py33
-rw-r--r--3rdparty/jsoncpp/scons-tools/srcdist.py20
-rw-r--r--3rdparty/jsoncpp/scons-tools/substinfile.py7
-rw-r--r--3rdparty/jsoncpp/scons-tools/targz.py21
-rw-r--r--3rdparty/jsoncpp/src/jsontestrunner/CMakeLists.txt17
-rw-r--r--3rdparty/jsoncpp/src/jsontestrunner/main.cpp186
-rw-r--r--3rdparty/jsoncpp/src/lib_json/CMakeLists.txt80
-rw-r--r--3rdparty/jsoncpp/src/lib_json/json_batchallocator.h121
-rw-r--r--3rdparty/jsoncpp/src/lib_json/json_internalarray.inl360
-rw-r--r--3rdparty/jsoncpp/src/lib_json/json_internalmap.inl473
-rw-r--r--3rdparty/jsoncpp/src/lib_json/json_reader.cpp1272
-rw-r--r--3rdparty/jsoncpp/src/lib_json/json_value.cpp662
-rw-r--r--3rdparty/jsoncpp/src/lib_json/json_valueiterator.inl129
-rw-r--r--3rdparty/jsoncpp/src/lib_json/json_writer.cpp642
-rw-r--r--3rdparty/jsoncpp/src/test_lib_json/CMakeLists.txt32
-rw-r--r--3rdparty/jsoncpp/src/test_lib_json/jsontest.cpp2
-rw-r--r--3rdparty/jsoncpp/src/test_lib_json/jsontest.h6
-rw-r--r--3rdparty/jsoncpp/src/test_lib_json/main.cpp777
-rw-r--r--3rdparty/jsoncpp/test/cleantests.py12
-rw-r--r--3rdparty/jsoncpp/test/data/test_comment_00.expected4
-rw-r--r--3rdparty/jsoncpp/test/data/test_comment_00.json5
-rw-r--r--3rdparty/jsoncpp/test/data/test_comment_01.expected2
-rw-r--r--3rdparty/jsoncpp/test/data/test_comment_01.json2
-rw-r--r--3rdparty/jsoncpp/test/data/test_comment_02.expected11
-rw-r--r--3rdparty/jsoncpp/test/data/test_comment_02.json11
-rw-r--r--3rdparty/jsoncpp/test/generate_expected.py9
-rw-r--r--3rdparty/jsoncpp/test/pyjsontestrunner.py57
-rw-r--r--3rdparty/jsoncpp/test/runjsontests.py119
-rw-r--r--3rdparty/jsoncpp/test/rununittests.py46
-rw-r--r--3rdparty/jsoncpp/travis.sh29
-rw-r--r--3rdparty/jsoncpp/version2
-rw-r--r--3rdparty/jsoncpp/version.in1
64 files changed, 6873 insertions, 2830 deletions
diff --git a/3rdparty/jsoncpp/.gitignore b/3rdparty/jsoncpp/.gitignore
index 60c4a0b9aee..ef226a8875a 100644
--- a/3rdparty/jsoncpp/.gitignore
+++ b/3rdparty/jsoncpp/.gitignore
@@ -10,4 +10,27 @@
/libs/
/doc/doxyfile
/dist/
-/include/json/version.h
+#/version
+#/include/json/version.h
+
+# MSVC project files:
+*.sln
+*.vcxproj
+*.filters
+*.user
+*.sdf
+*.opensdf
+*.suo
+
+# MSVC build files:
+*.lib
+*.obj
+*.tlog/
+*.pdb
+
+# CMake-generated files:
+CMakeFiles/
+CTestTestFile.cmake
+cmake_install.cmake
+pkg-config/jsoncpp.pc
+jsoncpp_lib_static.dir/
diff --git a/3rdparty/jsoncpp/.travis.yml b/3rdparty/jsoncpp/.travis.yml
index a913b095849..17e52dcf89b 100644
--- a/3rdparty/jsoncpp/.travis.yml
+++ b/3rdparty/jsoncpp/.travis.yml
@@ -2,17 +2,24 @@
# http://about.travis-ci.org/docs/user/build-configuration/
# This file can be validated on:
# http://lint.travis-ci.org/
-before_install: sudo apt-get install cmake
+
+#before_install: sudo apt-get install -y cmake
+# cmake is pre-installed in Travis for both linux and osx
+
+before_install:
+ - sudo apt-get update -qq
+ - sudo apt-get install -qq valgrind
+os:
+ - linux
language: cpp
compiler:
- gcc
- clang
-script: cmake -DJSONCPP_LIB_BUILD_SHARED=$SHARED_LIBRARY -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_VERBOSE_MAKEFILE=$VERBOSE_MAKE . && make
+script: ./travis.sh
env:
matrix:
- - SHARED_LIBRARY=ON BUILD_TYPE=release VERBOSE_MAKE=false
- - SHARED_LIBRARY=OFF BUILD_TYPE=release VERBOSE_MAKE=false
- - SHARED_LIBRARY=OFF BUILD_TYPE=debug VERBOSE VERBOSE_MAKE=true
+ - SHARED_LIB=ON STATIC_LIB=ON CMAKE_PKG=ON BUILD_TYPE=release VERBOSE_MAKE=false
+ - SHARED_LIB=OFF STATIC_LIB=ON CMAKE_PKG=OFF BUILD_TYPE=debug VERBOSE_MAKE=true VERBOSE
notifications:
email:
- aaronjjacobs@gmail.com
diff --git a/3rdparty/jsoncpp/CMakeLists.txt b/3rdparty/jsoncpp/CMakeLists.txt
index 3e8f96ecb90..90eb14e30d0 100644
--- a/3rdparty/jsoncpp/CMakeLists.txt
+++ b/3rdparty/jsoncpp/CMakeLists.txt
@@ -1,12 +1,16 @@
+# vim: et ts=4 sts=4 sw=4 tw=0
+
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.5)
PROJECT(jsoncpp)
ENABLE_TESTING()
-OPTION(JSONCPP_WITH_TESTS "Compile and run JsonCpp test executables" ON)
+OPTION(JSONCPP_WITH_TESTS "Compile and (for jsoncpp_check) run JsonCpp test executables" ON)
OPTION(JSONCPP_WITH_POST_BUILD_UNITTEST "Automatically run unit-tests as a post build step" ON)
OPTION(JSONCPP_WITH_WARNING_AS_ERROR "Force compilation to fail if a warning occurs" OFF)
OPTION(JSONCPP_WITH_PKGCONFIG_SUPPORT "Generate and install .pc files" ON)
OPTION(JSONCPP_WITH_CMAKE_PACKAGE "Generate and install cmake package files" OFF)
+OPTION(BUILD_SHARED_LIBS "Build jsoncpp_lib as a shared library." OFF)
+OPTION(BUILD_STATIC_LIBS "Build jsoncpp_lib static library." ON)
# Ensures that CMAKE_BUILD_TYPE is visible in cmake-gui on Unix
IF(NOT WIN32)
@@ -17,30 +21,21 @@ IF(NOT WIN32)
ENDIF(NOT CMAKE_BUILD_TYPE)
ENDIF(NOT WIN32)
+SET(DEBUG_LIBNAME_SUFFIX "" CACHE STRING "Optional suffix to append to the library name for a debug build")
SET(LIB_SUFFIX "" CACHE STRING "Optional arch-dependent suffix for the library installation directory")
SET(RUNTIME_INSTALL_DIR bin
CACHE PATH "Install dir for executables and dlls")
-SET(ARCHIVE_INSTALL_DIR lib${LIB_SUFFIX}
+SET(ARCHIVE_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}
CACHE PATH "Install dir for static libraries")
-SET(LIBRARY_INSTALL_DIR lib${LIB_SUFFIX}
+SET(LIBRARY_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}
CACHE PATH "Install dir for shared libraries")
-SET(INCLUDE_INSTALL_DIR include
+SET(INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/include
CACHE PATH "Install dir for headers")
SET(PACKAGE_INSTALL_DIR lib${LIB_SUFFIX}/cmake
CACHE PATH "Install dir for cmake package config files")
MARK_AS_ADVANCED( RUNTIME_INSTALL_DIR ARCHIVE_INSTALL_DIR INCLUDE_INSTALL_DIR PACKAGE_INSTALL_DIR )
-# This ensures shared DLL are in the same dir as executable on Windows.
-# Put all executables / libraries are in a project global directory.
-SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib
- CACHE PATH "Single directory for all static libraries.")
-SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib
- CACHE PATH "Single directory for all dynamic libraries on Unix.")
-SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin
- CACHE PATH "Single directory for all executable and dynamic libraries on Windows.")
-MARK_AS_ADVANCED( CMAKE_RUNTIME_OUTPUT_DIRECTORY CMAKE_LIBRARY_OUTPUT_DIRECTORY CMAKE_ARCHIVE_OUTPUT_DIRECTORY )
-
# Set variable named ${VAR_NAME} to value ${VALUE}
FUNCTION(set_using_dynamic_name VAR_NAME VALUE)
SET( "${VAR_NAME}" "${VALUE}" PARENT_SCOPE)
@@ -64,17 +59,24 @@ MACRO(jsoncpp_parse_version VERSION_TEXT OUPUT_PREFIX)
ENDMACRO(jsoncpp_parse_version)
# Read out version from "version" file
-FILE(STRINGS "version" JSONCPP_VERSION)
-
+#FILE(STRINGS "version" JSONCPP_VERSION)
+#SET( JSONCPP_VERSION_MAJOR X )
+#SET( JSONCPP_VERSION_MINOR Y )
+#SET( JSONCPP_VERSION_PATCH Z )
+SET( JSONCPP_VERSION 1.6.2 )
jsoncpp_parse_version( ${JSONCPP_VERSION} JSONCPP_VERSION )
-IF(NOT JSONCPP_VERSION_FOUND)
- MESSAGE(FATAL_ERROR "Failed to parse version string properly. Expect X.Y.Z")
-ENDIF(NOT JSONCPP_VERSION_FOUND)
+#IF(NOT JSONCPP_VERSION_FOUND)
+# MESSAGE(FATAL_ERROR "Failed to parse version string properly. Expect X.Y.Z")
+#ENDIF(NOT JSONCPP_VERSION_FOUND)
MESSAGE(STATUS "JsonCpp Version: ${JSONCPP_VERSION_MAJOR}.${JSONCPP_VERSION_MINOR}.${JSONCPP_VERSION_PATCH}")
# File version.h is only regenerated on CMake configure step
CONFIGURE_FILE( "${PROJECT_SOURCE_DIR}/src/lib_json/version.h.in"
- "${PROJECT_SOURCE_DIR}/include/json/version.h" )
+ "${PROJECT_SOURCE_DIR}/include/json/version.h"
+ NEWLINE_STYLE UNIX )
+CONFIGURE_FILE( "${PROJECT_SOURCE_DIR}/version.in"
+ "${PROJECT_SOURCE_DIR}/version"
+ NEWLINE_STYLE UNIX )
macro(UseCompilationWarningAsError)
if ( MSVC )
@@ -93,6 +95,14 @@ if ( MSVC )
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /W4 ")
endif( MSVC )
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ # using regular Clang or AppleClang
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wshorten-64-to-32")
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x -Wall -Wextra -pedantic")
+endif()
+
IF(JSONCPP_WITH_WARNING_AS_ERROR)
UseCompilationWarningAsError()
ENDIF(JSONCPP_WITH_WARNING_AS_ERROR)
diff --git a/3rdparty/jsoncpp/NEWS.txt b/3rdparty/jsoncpp/NEWS.txt
index 1be7b8ef894..5733fcd5ef4 100644
--- a/3rdparty/jsoncpp/NEWS.txt
+++ b/3rdparty/jsoncpp/NEWS.txt
@@ -80,7 +80,7 @@ New in SVN
(e.g. MSVC 2008 command prompt in start menu) before running scons.
- Added support for amalgamated source and header generation (a la sqlite).
- Refer to README.txt section "Generating amalgamated source and header"
+ Refer to README.md section "Generating amalgamated source and header"
for detail.
* Value
diff --git a/3rdparty/jsoncpp/README.md b/3rdparty/jsoncpp/README.md
index 099f17fa2fd..93c8d1f593a 100644
--- a/3rdparty/jsoncpp/README.md
+++ b/3rdparty/jsoncpp/README.md
@@ -7,34 +7,62 @@ pairs.
[json-org]: http://json.org/
-JsonCpp is a C++ library that allows manipulating JSON values, including
+[JsonCpp][] is a C++ library that allows manipulating JSON values, including
serialization and deserialization to and from strings. It can also preserve
existing comment in unserialization/serialization steps, making it a convenient
format to store user input files.
+[JsonCpp]: http://open-source-parsers.github.io/jsoncpp-docs/doxygen/index.html
+
## A note on backward-compatibility
-Very soon, we are switching to C++11 only. For older compilers, try the `pre-C++11` branch.
+* `1.y.z` is built with C++11.
+* `0.y.z` can be used with older compilers.
+* Major versions maintain binary-compatibility.
-Using JsonCpp in your project
+# Using JsonCpp in your project
-----------------------------
-
-The recommended approach to integrating JsonCpp in your project is to build
-the amalgamated source (a single `.cpp` file) with your own build system. This
-ensures consistency of compilation flags and ABI compatibility. See the section
-"Generating amalgamated source and header" for instructions.
+The recommended approach to integrating JsonCpp in your project is to include
+the [amalgamated source](#generating-amalgamated-source-and-header) (a single
+`.cpp` file and two `.h` files) in your project, and compile and build as you
+would any other source file. This ensures consistency of compilation flags and
+ABI compatibility, issues which arise when building shared or static
+libraries. See the next section for instructions.
The `include/` should be added to your compiler include path. Jsoncpp headers
should be included as follow:
#include <json/json.h>
-If JsonCpp was build as a dynamic library on Windows, then your project needs to
+If JsonCpp was built as a dynamic library on Windows, then your project needs to
define the macro `JSON_DLL`.
+Generating amalgamated source and header
+----------------------------------------
+JsonCpp is provided with a script to generate a single header and a single
+source file to ease inclusion into an existing project. The amalgamated source
+can be generated at any time by running the following command from the
+top-directory (this requires Python 2.6):
+
+ python amalgamate.py
+
+It is possible to specify header name. See the `-h` option for detail.
+
+By default, the following files are generated:
+* `dist/jsoncpp.cpp`: source file that needs to be added to your project.
+* `dist/json/json.h`: corresponding header file for use in your project. It is
+ equivalent to including `json/json.h` in non-amalgamated source. This header
+ only depends on standard headers.
+* `dist/json/json-forwards.h`: header that provides forward declaration of all
+ JsonCpp types.
+
+The amalgamated sources are generated by concatenating JsonCpp source in the
+correct order and defining the macro `JSON_IS_AMALGAMATION` to prevent inclusion
+of other headers.
-Building and testing with new CMake
------------------------------------
+# Contributing to JsonCpp
+Building and testing with CMake
+-------------------------------
[CMake][] is a C++ Makefiles/Solution generator. It is usually available on most
Linux system as package. On Ubuntu:
@@ -57,7 +85,7 @@ Steps for generating solution/makefiles using `cmake-gui`:
* Make "source code" point to the source directory.
* Make "where to build the binary" point to the directory to use for the build.
* Click on the "Grouped" check box.
-* Review JsonCpp build options (tick `JSONCPP_LIB_BUILD_SHARED` to build as a
+* Review JsonCpp build options (tick `BUILD_SHARED_LIBS` to build as a
dynamic library).
* Click the configure button at the bottom, then the generate button.
* The generated solution/makefiles can be found in the binary directory.
@@ -66,19 +94,17 @@ Alternatively, from the command-line on Unix in the source directory:
mkdir -p build/debug
cd build/debug
- cmake -DCMAKE_BUILD_TYPE=debug -DJSONCPP_LIB_BUILD_SHARED=OFF -G "Unix Makefiles" ../..
+ cmake -DCMAKE_BUILD_TYPE=debug -DBUILD_STATIC_LIBS=ON -DBUILD_SHARED_LIBS=OFF -DARCHIVE_INSTALL_DIR=. -G "Unix Makefiles" ../..
make
-Running `cmake -`" will display the list of available generators (passed using
+Running `cmake -h` will display the list of available generators (passed using
the `-G` option).
By default CMake hides compilation commands. This can be modified by specifying
`-DCMAKE_VERBOSE_MAKEFILE=true` when generating makefiles.
-
Building and testing with SCons
-------------------------------
-
**Note:** The SCons-based build system is deprecated. Please use CMake; see the
section above.
@@ -107,14 +133,7 @@ If you are building with Microsoft Visual Studio 2008, you need to set up the
environment by running `vcvars32.bat` (e.g. MSVC 2008 command prompt) before
running SCons.
-
-Running the tests manually
---------------------------
-
-Note that test can be run using SCons using the `check` target:
-
- scons platform=$PLATFORM check
-
+## Running the tests manually
You need to run tests manually only if you are troubleshooting an issue.
In the instructions below, replace `path/to/jsontest` with the path of the
@@ -137,45 +156,21 @@ In the instructions below, replace `path/to/jsontest` with the path of the
# You can run the tests using valgrind:
python rununittests.py --valgrind path/to/test_lib_json
+## Running the tests using scons
+Note that tests can be run using SCons using the `check` target:
+
+ scons platform=$PLATFORM check
Building the documentation
--------------------------
-
Run the Python script `doxybuild.py` from the top directory:
python doxybuild.py --doxygen=$(which doxygen) --open --with-dot
See `doxybuild.py --help` for options.
-
-Generating amalgamated source and header
-----------------------------------------
-
-JsonCpp is provided with a script to generate a single header and a single
-source file to ease inclusion into an existing project. The amalgamated source
-can be generated at any time by running the following command from the
-top-directory (this requires Python 2.6):
-
- python amalgamate.py
-
-It is possible to specify header name. See the `-h` option for detail.
-
-By default, the following files are generated:
-* `dist/jsoncpp.cpp`: source file that needs to be added to your project.
-* `dist/json/json.h`: corresponding header file for use in your project. It is
- equivalent to including `json/json.h` in non-amalgamated source. This header
- only depends on standard headers.
-* `dist/json/json-forwards.h`: header that provides forward declaration of all
- JsonCpp types.
-
-The amalgamated sources are generated by concatenating JsonCpp source in the
-correct order and defining the macro `JSON_IS_AMALGAMATION` to prevent inclusion
-of other headers.
-
-
Adding a reader/writer test
---------------------------
-
To add a test, you need to create two files in test/data:
* a `TESTNAME.json` file, that contains the input document in JSON format.
@@ -195,10 +190,8 @@ The `TESTNAME.expected` file format is as follows:
See the examples `test_complex_01.json` and `test_complex_01.expected` to better
understand element paths.
-
Understanding reader/writer test output
---------------------------------------
-
When a test is run, output files are generated beside the input test files.
Below is a short description of the content of each file:
@@ -215,10 +208,7 @@ Below is a short description of the content of each file:
* `test_complex_01.process-output`: `jsontest` output, typically useful for
understanding parsing errors.
-
License
-------
-
See the `LICENSE` file for details. In summary, JsonCpp is licensed under the
MIT license, or public domain if desired and recognized in your jurisdiction.
-
diff --git a/3rdparty/jsoncpp/SConstruct b/3rdparty/jsoncpp/SConstruct
index 1c55bcd0986..f3a73f773bf 100644
--- a/3rdparty/jsoncpp/SConstruct
+++ b/3rdparty/jsoncpp/SConstruct
@@ -237,7 +237,7 @@ RunUnitTests = ActionFactory(runUnitTests_action, runUnitTests_string )
env.Alias( 'check' )
srcdist_cmd = env['SRCDIST_ADD']( source = """
- AUTHORS README.txt SConstruct
+ AUTHORS README.md SConstruct
""".split() )
env.Alias( 'src-dist', srcdist_cmd )
diff --git a/3rdparty/jsoncpp/amalgamate.py b/3rdparty/jsoncpp/amalgamate.py
index 550f6a67624..1916bb0d5e7 100644
--- a/3rdparty/jsoncpp/amalgamate.py
+++ b/3rdparty/jsoncpp/amalgamate.py
@@ -1,6 +1,6 @@
"""Amalgate json-cpp library sources into a single source and header file.
-Requires Python 2.6
+Works with python2.6+ and python3.4+.
Example of invocation (must be invoked from json-cpp top directory):
python amalgate.py
@@ -10,46 +10,46 @@ import os.path
import sys
class AmalgamationFile:
- def __init__( self, top_dir ):
+ def __init__(self, top_dir):
self.top_dir = top_dir
self.blocks = []
- def add_text( self, text ):
- if not text.endswith( "\n" ):
+ def add_text(self, text):
+ if not text.endswith("\n"):
text += "\n"
- self.blocks.append( text )
-
- def add_file( self, relative_input_path, wrap_in_comment=False ):
- def add_marker( prefix ):
- self.add_text( "" )
- self.add_text( "// " + "/"*70 )
- self.add_text( "// %s of content of file: %s" % (prefix, relative_input_path.replace("\\","/")) )
- self.add_text( "// " + "/"*70 )
- self.add_text( "" )
- add_marker( "Beginning" )
- f = open( os.path.join( self.top_dir, relative_input_path ), "rt" )
+ self.blocks.append(text)
+
+ def add_file(self, relative_input_path, wrap_in_comment=False):
+ def add_marker(prefix):
+ self.add_text("")
+ self.add_text("// " + "/"*70)
+ self.add_text("// %s of content of file: %s" % (prefix, relative_input_path.replace("\\","/")))
+ self.add_text("// " + "/"*70)
+ self.add_text("")
+ add_marker("Beginning")
+ f = open(os.path.join(self.top_dir, relative_input_path), "rt")
content = f.read()
if wrap_in_comment:
content = "/*\n" + content + "\n*/"
- self.add_text( content )
+ self.add_text(content)
f.close()
- add_marker( "End" )
- self.add_text( "\n\n\n\n" )
-
- def get_value( self ):
- return "".join( self.blocks ).replace("\r\n","\n")
-
- def write_to( self, output_path ):
- output_dir = os.path.dirname( output_path )
- if output_dir and not os.path.isdir( output_dir ):
- os.makedirs( output_dir )
- f = open( output_path, "wb" )
- f.write( str.encode(self.get_value(), 'UTF-8') )
+ add_marker("End")
+ self.add_text("\n\n\n\n")
+
+ def get_value(self):
+ return "".join(self.blocks).replace("\r\n","\n")
+
+ def write_to(self, output_path):
+ output_dir = os.path.dirname(output_path)
+ if output_dir and not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
+ f = open(output_path, "wb")
+ f.write(str.encode(self.get_value(), 'UTF-8'))
f.close()
-def amalgamate_source( source_top_dir=None,
+def amalgamate_source(source_top_dir=None,
target_source_path=None,
- header_include_path=None ):
+ header_include_path=None):
"""Produces amalgated source.
Parameters:
source_top_dir: top-directory
@@ -57,69 +57,73 @@ def amalgamate_source( source_top_dir=None,
header_include_path: generated header path relative to target_source_path.
"""
print("Amalgating header...")
- header = AmalgamationFile( source_top_dir )
- header.add_text( "/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/)." )
- header.add_text( "/// It is intented to be used with #include <%s>" % header_include_path )
- header.add_file( "LICENSE", wrap_in_comment=True )
- header.add_text( "#ifndef JSON_AMALGATED_H_INCLUDED" )
- header.add_text( "# define JSON_AMALGATED_H_INCLUDED" )
- header.add_text( "/// If defined, indicates that the source file is amalgated" )
- header.add_text( "/// to prevent private header inclusion." )
- header.add_text( "#define JSON_IS_AMALGAMATION" )
- header.add_file( "include/json/version.h" )
- header.add_file( "include/json/config.h" )
- header.add_file( "include/json/forwards.h" )
- header.add_file( "include/json/features.h" )
- header.add_file( "include/json/value.h" )
- header.add_file( "include/json/reader.h" )
- header.add_file( "include/json/writer.h" )
- header.add_file( "include/json/assertions.h" )
- header.add_text( "#endif //ifndef JSON_AMALGATED_H_INCLUDED" )
-
- target_header_path = os.path.join( os.path.dirname(target_source_path), header_include_path )
+ header = AmalgamationFile(source_top_dir)
+ header.add_text("/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/).")
+ header.add_text('/// It is intended to be used with #include "%s"' % header_include_path)
+ header.add_file("LICENSE", wrap_in_comment=True)
+ header.add_text("#ifndef JSON_AMALGATED_H_INCLUDED")
+ header.add_text("# define JSON_AMALGATED_H_INCLUDED")
+ header.add_text("/// If defined, indicates that the source file is amalgated")
+ header.add_text("/// to prevent private header inclusion.")
+ header.add_text("#define JSON_IS_AMALGAMATION")
+ header.add_file("include/json/version.h")
+ header.add_file("include/json/config.h")
+ header.add_file("include/json/forwards.h")
+ header.add_file("include/json/features.h")
+ header.add_file("include/json/value.h")
+ header.add_file("include/json/reader.h")
+ header.add_file("include/json/writer.h")
+ header.add_file("include/json/assertions.h")
+ header.add_text("#endif //ifndef JSON_AMALGATED_H_INCLUDED")
+
+ target_header_path = os.path.join(os.path.dirname(target_source_path), header_include_path)
print("Writing amalgated header to %r" % target_header_path)
- header.write_to( target_header_path )
+ header.write_to(target_header_path)
- base, ext = os.path.splitext( header_include_path )
+ base, ext = os.path.splitext(header_include_path)
forward_header_include_path = base + "-forwards" + ext
print("Amalgating forward header...")
- header = AmalgamationFile( source_top_dir )
- header.add_text( "/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/)." )
- header.add_text( "/// It is intented to be used with #include <%s>" % forward_header_include_path )
- header.add_text( "/// This header provides forward declaration for all JsonCpp types." )
- header.add_file( "LICENSE", wrap_in_comment=True )
- header.add_text( "#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED" )
- header.add_text( "# define JSON_FORWARD_AMALGATED_H_INCLUDED" )
- header.add_text( "/// If defined, indicates that the source file is amalgated" )
- header.add_text( "/// to prevent private header inclusion." )
- header.add_text( "#define JSON_IS_AMALGAMATION" )
- header.add_file( "include/json/config.h" )
- header.add_file( "include/json/forwards.h" )
- header.add_text( "#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED" )
-
- target_forward_header_path = os.path.join( os.path.dirname(target_source_path),
- forward_header_include_path )
+ header = AmalgamationFile(source_top_dir)
+ header.add_text("/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/).")
+ header.add_text('/// It is intended to be used with #include "%s"' % forward_header_include_path)
+ header.add_text("/// This header provides forward declaration for all JsonCpp types.")
+ header.add_file("LICENSE", wrap_in_comment=True)
+ header.add_text("#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED")
+ header.add_text("# define JSON_FORWARD_AMALGATED_H_INCLUDED")
+ header.add_text("/// If defined, indicates that the source file is amalgated")
+ header.add_text("/// to prevent private header inclusion.")
+ header.add_text("#define JSON_IS_AMALGAMATION")
+ header.add_file("include/json/config.h")
+ header.add_file("include/json/forwards.h")
+ header.add_text("#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED")
+
+ target_forward_header_path = os.path.join(os.path.dirname(target_source_path),
+ forward_header_include_path)
print("Writing amalgated forward header to %r" % target_forward_header_path)
- header.write_to( target_forward_header_path )
+ header.write_to(target_forward_header_path)
print("Amalgating source...")
- source = AmalgamationFile( source_top_dir )
- source.add_text( "/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/)." )
- source.add_text( "/// It is intented to be used with #include <%s>" % header_include_path )
- source.add_file( "LICENSE", wrap_in_comment=True )
- source.add_text( "" )
- source.add_text( "#include <%s>" % header_include_path )
- source.add_text( "" )
+ source = AmalgamationFile(source_top_dir)
+ source.add_text("/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/).")
+ source.add_text('/// It is intended to be used with #include "%s"' % header_include_path)
+ source.add_file("LICENSE", wrap_in_comment=True)
+ source.add_text("")
+ source.add_text('#include "%s"' % header_include_path)
+ source.add_text("""
+#ifndef JSON_IS_AMALGAMATION
+#error "Compile with -I PATH_TO_JSON_DIRECTORY"
+#endif
+""")
+ source.add_text("")
lib_json = "src/lib_json"
- source.add_file( os.path.join(lib_json, "json_tool.h") )
- source.add_file( os.path.join(lib_json, "json_reader.cpp") )
- source.add_file( os.path.join(lib_json, "json_batchallocator.h") )
- source.add_file( os.path.join(lib_json, "json_valueiterator.inl") )
- source.add_file( os.path.join(lib_json, "json_value.cpp") )
- source.add_file( os.path.join(lib_json, "json_writer.cpp") )
+ source.add_file(os.path.join(lib_json, "json_tool.h"))
+ source.add_file(os.path.join(lib_json, "json_reader.cpp"))
+ source.add_file(os.path.join(lib_json, "json_valueiterator.inl"))
+ source.add_file(os.path.join(lib_json, "json_value.cpp"))
+ source.add_file(os.path.join(lib_json, "json_writer.cpp"))
print("Writing amalgated source to %r" % target_source_path)
- source.write_to( target_source_path )
+ source.write_to(target_source_path)
def main():
usage = """%prog [options]
@@ -137,12 +141,12 @@ Generate a single amalgated source and header file from the sources.
parser.enable_interspersed_args()
options, args = parser.parse_args()
- msg = amalgamate_source( source_top_dir=options.top_dir,
+ msg = amalgamate_source(source_top_dir=options.top_dir,
target_source_path=options.target_source_path,
- header_include_path=options.header_include_path )
+ header_include_path=options.header_include_path)
if msg:
- sys.stderr.write( msg + "\n" )
- sys.exit( 1 )
+ sys.stderr.write(msg + "\n")
+ sys.exit(1)
else:
print("Source succesfully amalagated")
diff --git a/3rdparty/jsoncpp/appveyor.yml b/3rdparty/jsoncpp/appveyor.yml
new file mode 100644
index 00000000000..546cb7ed13e
--- /dev/null
+++ b/3rdparty/jsoncpp/appveyor.yml
@@ -0,0 +1,34 @@
+# This is a comment.
+
+version: build.{build}
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\projects\jsoncpp
+
+platform:
+ - Win32
+ - x64
+
+configuration:
+ - Debug
+ - Release
+
+# scripts to run before build
+before_build:
+ - echo "Running cmake..."
+ - cd c:\projects\jsoncpp
+ - cmake --version
+ - if %PLATFORM% == Win32 cmake .
+ - if %PLATFORM% == x64 cmake -G "Visual Studio 12 2013 Win64" .
+
+build:
+ project: jsoncpp.sln # path to Visual Studio solution or project
+
+deploy:
+ provider: GitHub
+ auth_token:
+ secure: K2Tp1q8pIZ7rs0Ot24ZMWuwr12Ev6Tc6QkhMjGQxoQG3ng1pXtgPasiJ45IDXGdg
+ on:
+ branch: master
+ appveyor_repo_tag: true
diff --git a/3rdparty/jsoncpp/dev.makefile b/3rdparty/jsoncpp/dev.makefile
index dd16bdd6ef9..d288b166586 100644
--- a/3rdparty/jsoncpp/dev.makefile
+++ b/3rdparty/jsoncpp/dev.makefile
@@ -1,14 +1,35 @@
-all: build test-amalgamate
+# This is only for jsoncpp developers/contributors.
+# We use this to sign releases, generate documentation, etc.
+VER?=$(shell cat version)
+default:
+ @echo "VER=${VER}"
+sign: jsoncpp-${VER}.tar.gz
+ gpg --armor --detach-sign $<
+ gpg --verify $<.asc
+ # Then upload .asc to the release.
+jsoncpp-%.tar.gz:
+ curl https://github.com/open-source-parsers/jsoncpp/archive/$*.tar.gz -o $@
+dox:
+ python doxybuild.py --doxygen=$$(which doxygen) --in doc/web_doxyfile.in
+ rsync -va --delete dist/doxygen/jsoncpp-api-html-${VER}/ ../jsoncpp-docs/doxygen/
+ # Then 'git add -A' and 'git push' in jsoncpp-docs.
build:
mkdir -p build/debug
- cd build/debug; cmake -DCMAKE_BUILD_TYPE=debug -DJSONCPP_LIB_BUILD_SHARED=ON -G "Unix Makefiles" ../..
+ cd build/debug; cmake -DCMAKE_BUILD_TYPE=debug -DBUILD_SHARED_LIBS=ON -G "Unix Makefiles" ../..
make -C build/debug
# Currently, this depends on include/json/version.h generated
# by cmake.
-test-amalgamate: build
+test-amalgamate:
python2.7 amalgamate.py
python3.4 amalgamate.py
+ cd dist; gcc -I. -c jsoncpp.cpp
+
+valgrind:
+ valgrind --error-exitcode=42 --leak-check=full ./build/debug/src/test_lib_json/jsoncpp_test
+
+clean:
+ \rm -rf *.gz *.asc dist/
.PHONY: build
diff --git a/3rdparty/jsoncpp/devtools/__init__.py b/3rdparty/jsoncpp/devtools/__init__.py
index c944e7cb0c0..d18a5216853 100644
--- a/3rdparty/jsoncpp/devtools/__init__.py
+++ b/3rdparty/jsoncpp/devtools/__init__.py
@@ -1 +1,6 @@
-# module \ No newline at end of file
+# Copyright 2010 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+# module
diff --git a/3rdparty/jsoncpp/devtools/agent_vmw7.json b/3rdparty/jsoncpp/devtools/agent_vmw7.json
index a1db7db6f18..0810a99544e 100644
--- a/3rdparty/jsoncpp/devtools/agent_vmw7.json
+++ b/3rdparty/jsoncpp/devtools/agent_vmw7.json
@@ -19,8 +19,8 @@
},
{"name": "shared_dll",
"variables": [
- ["JSONCPP_LIB_BUILD_SHARED=true"],
- ["JSONCPP_LIB_BUILD_SHARED=false"]
+ ["BUILD_SHARED_LIBS=true"],
+ ["BUILD_SHARED_LIBS=false"]
]
},
{"name": "build_type",
diff --git a/3rdparty/jsoncpp/devtools/agent_vmxp.json b/3rdparty/jsoncpp/devtools/agent_vmxp.json
index d34cf86addf..b627a7221a7 100644
--- a/3rdparty/jsoncpp/devtools/agent_vmxp.json
+++ b/3rdparty/jsoncpp/devtools/agent_vmxp.json
@@ -12,8 +12,8 @@
},
{"name": "shared_dll",
"variables": [
- ["JSONCPP_LIB_BUILD_SHARED=true"],
- ["JSONCPP_LIB_BUILD_SHARED=false"]
+ ["BUILD_SHARED_LIBS=true"],
+ ["BUILD_SHARED_LIBS=false"]
]
},
{"name": "build_type",
diff --git a/3rdparty/jsoncpp/devtools/antglob.py b/3rdparty/jsoncpp/devtools/antglob.py
index 8b7b4ca297e..c272f66343f 100644
--- a/3rdparty/jsoncpp/devtools/antglob.py
+++ b/3rdparty/jsoncpp/devtools/antglob.py
@@ -1,6 +1,9 @@
#!/usr/bin/env python
# encoding: utf-8
-# Baptiste Lepilleur, 2009
+# Copyright 2009 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
from __future__ import print_function
from dircache import listdir
@@ -54,9 +57,9 @@ LINKS = DIR_LINK | FILE_LINK
ALL_NO_LINK = DIR | FILE
ALL = DIR | FILE | LINKS
-_ANT_RE = re.compile( r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)' )
+_ANT_RE = re.compile(r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)')
-def ant_pattern_to_re( ant_pattern ):
+def ant_pattern_to_re(ant_pattern):
"""Generates a regular expression from the ant pattern.
Matching convention:
**/a: match 'a', 'dir/a', 'dir1/dir2/a'
@@ -65,30 +68,30 @@ def ant_pattern_to_re( ant_pattern ):
"""
rex = ['^']
next_pos = 0
- sep_rex = r'(?:/|%s)' % re.escape( os.path.sep )
+ sep_rex = r'(?:/|%s)' % re.escape(os.path.sep)
## print 'Converting', ant_pattern
- for match in _ANT_RE.finditer( ant_pattern ):
+ for match in _ANT_RE.finditer(ant_pattern):
## print 'Matched', match.group()
## print match.start(0), next_pos
if match.start(0) != next_pos:
- raise ValueError( "Invalid ant pattern" )
+ raise ValueError("Invalid ant pattern")
if match.group(1): # /**/
- rex.append( sep_rex + '(?:.*%s)?' % sep_rex )
+ rex.append(sep_rex + '(?:.*%s)?' % sep_rex)
elif match.group(2): # **/
- rex.append( '(?:.*%s)?' % sep_rex )
+ rex.append('(?:.*%s)?' % sep_rex)
elif match.group(3): # /**
- rex.append( sep_rex + '.*' )
+ rex.append(sep_rex + '.*')
elif match.group(4): # *
- rex.append( '[^/%s]*' % re.escape(os.path.sep) )
+ rex.append('[^/%s]*' % re.escape(os.path.sep))
elif match.group(5): # /
- rex.append( sep_rex )
+ rex.append(sep_rex)
else: # somepath
- rex.append( re.escape(match.group(6)) )
+ rex.append(re.escape(match.group(6)))
next_pos = match.end()
rex.append('$')
- return re.compile( ''.join( rex ) )
+ return re.compile(''.join(rex))
-def _as_list( l ):
+def _as_list(l):
if isinstance(l, basestring):
return l.split()
return l
@@ -105,37 +108,37 @@ def glob(dir_path,
dir_path = dir_path.replace('/',os.path.sep)
entry_type_filter = entry_type
- def is_pruned_dir( dir_name ):
+ def is_pruned_dir(dir_name):
for pattern in prune_dirs:
- if fnmatch.fnmatch( dir_name, pattern ):
+ if fnmatch.fnmatch(dir_name, pattern):
return True
return False
- def apply_filter( full_path, filter_rexs ):
+ def apply_filter(full_path, filter_rexs):
"""Return True if at least one of the filter regular expression match full_path."""
for rex in filter_rexs:
- if rex.match( full_path ):
+ if rex.match(full_path):
return True
return False
- def glob_impl( root_dir_path ):
+ def glob_impl(root_dir_path):
child_dirs = [root_dir_path]
while child_dirs:
dir_path = child_dirs.pop()
- for entry in listdir( dir_path ):
- full_path = os.path.join( dir_path, entry )
+ for entry in listdir(dir_path):
+ full_path = os.path.join(dir_path, entry)
## print 'Testing:', full_path,
- is_dir = os.path.isdir( full_path )
- if is_dir and not is_pruned_dir( entry ): # explore child directory ?
+ is_dir = os.path.isdir(full_path)
+ if is_dir and not is_pruned_dir(entry): # explore child directory ?
## print '===> marked for recursion',
- child_dirs.append( full_path )
- included = apply_filter( full_path, include_filter )
- rejected = apply_filter( full_path, exclude_filter )
+ child_dirs.append(full_path)
+ included = apply_filter(full_path, include_filter)
+ rejected = apply_filter(full_path, exclude_filter)
if not included or rejected: # do not include entry ?
## print '=> not included or rejected'
continue
- link = os.path.islink( full_path )
- is_file = os.path.isfile( full_path )
+ link = os.path.islink(full_path)
+ is_file = os.path.isfile(full_path)
if not is_file and not is_dir:
## print '=> unknown entry type'
continue
@@ -146,57 +149,57 @@ def glob(dir_path,
## print '=> type: %d' % entry_type,
if (entry_type & entry_type_filter) != 0:
## print ' => KEEP'
- yield os.path.join( dir_path, entry )
+ yield os.path.join(dir_path, entry)
## else:
## print ' => TYPE REJECTED'
- return list( glob_impl( dir_path ) )
+ return list(glob_impl(dir_path))
if __name__ == "__main__":
import unittest
class AntPatternToRETest(unittest.TestCase):
-## def test_conversion( self ):
-## self.assertEqual( '^somepath$', ant_pattern_to_re( 'somepath' ).pattern )
+## def test_conversion(self):
+## self.assertEqual('^somepath$', ant_pattern_to_re('somepath').pattern)
- def test_matching( self ):
- test_cases = [ ( 'path',
+ def test_matching(self):
+ test_cases = [ ('path',
['path'],
- ['somepath', 'pathsuffix', '/path', '/path'] ),
- ( '*.py',
+ ['somepath', 'pathsuffix', '/path', '/path']),
+ ('*.py',
['source.py', 'source.ext.py', '.py'],
- ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c'] ),
- ( '**/path',
+ ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c']),
+ ('**/path',
['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'],
- ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath'] ),
- ( 'path/**',
+ ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath']),
+ ('path/**',
['path/a', 'path/path/a', 'path//'],
- ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a'] ),
- ( '/**/path',
+ ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a']),
+ ('/**/path',
['/path', '/a/path', '/a/b/path/path', '/path/path'],
- ['path', 'path/', 'a/path', '/pathsuffix', '/somepath'] ),
- ( 'a/b',
+ ['path', 'path/', 'a/path', '/pathsuffix', '/somepath']),
+ ('a/b',
['a/b'],
- ['somea/b', 'a/bsuffix', 'a/b/c'] ),
- ( '**/*.py',
+ ['somea/b', 'a/bsuffix', 'a/b/c']),
+ ('**/*.py',
['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'],
- ['script.pyc', 'script.pyo', 'a.py/b'] ),
- ( 'src/**/*.py',
+ ['script.pyc', 'script.pyo', 'a.py/b']),
+ ('src/**/*.py',
['src/a.py', 'src/dir/a.py'],
- ['a/src/a.py', '/src/a.py'] ),
+ ['a/src/a.py', '/src/a.py']),
]
for ant_pattern, accepted_matches, rejected_matches in list(test_cases):
- def local_path( paths ):
+ def local_path(paths):
return [ p.replace('/',os.path.sep) for p in paths ]
- test_cases.append( (ant_pattern, local_path(accepted_matches), local_path( rejected_matches )) )
+ test_cases.append((ant_pattern, local_path(accepted_matches), local_path(rejected_matches)))
for ant_pattern, accepted_matches, rejected_matches in test_cases:
- rex = ant_pattern_to_re( ant_pattern )
+ rex = ant_pattern_to_re(ant_pattern)
print('ant_pattern:', ant_pattern, ' => ', rex.pattern)
for accepted_match in accepted_matches:
print('Accepted?:', accepted_match)
- self.assertTrue( rex.match( accepted_match ) is not None )
+ self.assertTrue(rex.match(accepted_match) is not None)
for rejected_match in rejected_matches:
print('Rejected?:', rejected_match)
- self.assertTrue( rex.match( rejected_match ) is None )
+ self.assertTrue(rex.match(rejected_match) is None)
unittest.main()
diff --git a/3rdparty/jsoncpp/devtools/batchbuild.py b/3rdparty/jsoncpp/devtools/batchbuild.py
index 6f57945a7c3..0eb0690e8c6 100644
--- a/3rdparty/jsoncpp/devtools/batchbuild.py
+++ b/3rdparty/jsoncpp/devtools/batchbuild.py
@@ -18,62 +18,62 @@ class BuildDesc:
self.build_type = build_type
self.generator = generator
- def merged_with( self, build_desc ):
+ def merged_with(self, build_desc):
"""Returns a new BuildDesc by merging field content.
Prefer build_desc fields to self fields for single valued field.
"""
- return BuildDesc( self.prepend_envs + build_desc.prepend_envs,
+ return BuildDesc(self.prepend_envs + build_desc.prepend_envs,
self.variables + build_desc.variables,
build_desc.build_type or self.build_type,
- build_desc.generator or self.generator )
+ build_desc.generator or self.generator)
- def env( self ):
+ def env(self):
environ = os.environ.copy()
for values_by_name in self.prepend_envs:
for var, value in list(values_by_name.items()):
var = var.upper()
if type(value) is unicode:
- value = value.encode( sys.getdefaultencoding() )
+ value = value.encode(sys.getdefaultencoding())
if var in environ:
environ[var] = value + os.pathsep + environ[var]
else:
environ[var] = value
return environ
- def cmake_args( self ):
+ def cmake_args(self):
args = ["-D%s" % var for var in self.variables]
# skip build type for Visual Studio solution as it cause warning
if self.build_type and 'Visual' not in self.generator:
- args.append( "-DCMAKE_BUILD_TYPE=%s" % self.build_type )
+ args.append("-DCMAKE_BUILD_TYPE=%s" % self.build_type)
if self.generator:
- args.extend( ['-G', self.generator] )
+ args.extend(['-G', self.generator])
return args
- def __repr__( self ):
- return "BuildDesc( %s, build_type=%s )" % (" ".join( self.cmake_args()), self.build_type)
+ def __repr__(self):
+ return "BuildDesc(%s, build_type=%s)" % (" ".join(self.cmake_args()), self.build_type)
class BuildData:
- def __init__( self, desc, work_dir, source_dir ):
+ def __init__(self, desc, work_dir, source_dir):
self.desc = desc
self.work_dir = work_dir
self.source_dir = source_dir
- self.cmake_log_path = os.path.join( work_dir, 'batchbuild_cmake.log' )
- self.build_log_path = os.path.join( work_dir, 'batchbuild_build.log' )
+ self.cmake_log_path = os.path.join(work_dir, 'batchbuild_cmake.log')
+ self.build_log_path = os.path.join(work_dir, 'batchbuild_build.log')
self.cmake_succeeded = False
self.build_succeeded = False
def execute_build(self):
print('Build %s' % self.desc)
- self._make_new_work_dir( )
- self.cmake_succeeded = self._generate_makefiles( )
+ self._make_new_work_dir()
+ self.cmake_succeeded = self._generate_makefiles()
if self.cmake_succeeded:
- self.build_succeeded = self._build_using_makefiles( )
+ self.build_succeeded = self._build_using_makefiles()
return self.build_succeeded
def _generate_makefiles(self):
print(' Generating makefiles: ', end=' ')
- cmd = ['cmake'] + self.desc.cmake_args( ) + [os.path.abspath( self.source_dir )]
- succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.cmake_log_path )
+ cmd = ['cmake'] + self.desc.cmake_args() + [os.path.abspath(self.source_dir)]
+ succeeded = self._execute_build_subprocess(cmd, self.desc.env(), self.cmake_log_path)
print('done' if succeeded else 'FAILED')
return succeeded
@@ -82,58 +82,58 @@ class BuildData:
cmd = ['cmake', '--build', self.work_dir]
if self.desc.build_type:
cmd += ['--config', self.desc.build_type]
- succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.build_log_path )
+ succeeded = self._execute_build_subprocess(cmd, self.desc.env(), self.build_log_path)
print('done' if succeeded else 'FAILED')
return succeeded
def _execute_build_subprocess(self, cmd, env, log_path):
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir,
- env=env )
- stdout, _ = process.communicate( )
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir,
+ env=env)
+ stdout, _ = process.communicate()
succeeded = (process.returncode == 0)
- with open( log_path, 'wb' ) as flog:
- log = ' '.join( cmd ) + '\n' + stdout + '\nExit code: %r\n' % process.returncode
- flog.write( fix_eol( log ) )
+ with open(log_path, 'wb') as flog:
+ log = ' '.join(cmd) + '\n' + stdout + '\nExit code: %r\n' % process.returncode
+ flog.write(fix_eol(log))
return succeeded
def _make_new_work_dir(self):
- if os.path.isdir( self.work_dir ):
+ if os.path.isdir(self.work_dir):
print(' Removing work directory', self.work_dir)
- shutil.rmtree( self.work_dir, ignore_errors=True )
- if not os.path.isdir( self.work_dir ):
- os.makedirs( self.work_dir )
+ shutil.rmtree(self.work_dir, ignore_errors=True)
+ if not os.path.isdir(self.work_dir):
+ os.makedirs(self.work_dir)
-def fix_eol( stdout ):
+def fix_eol(stdout):
"""Fixes wrong EOL produced by cmake --build on Windows (\r\r\n instead of \r\n).
"""
- return re.sub( '\r*\n', os.linesep, stdout )
+ return re.sub('\r*\n', os.linesep, stdout)
-def load_build_variants_from_config( config_path ):
- with open( config_path, 'rb' ) as fconfig:
- data = json.load( fconfig )
+def load_build_variants_from_config(config_path):
+ with open(config_path, 'rb') as fconfig:
+ data = json.load(fconfig)
variants = data[ 'cmake_variants' ]
- build_descs_by_axis = collections.defaultdict( list )
+ build_descs_by_axis = collections.defaultdict(list)
for axis in variants:
axis_name = axis["name"]
build_descs = []
if "generators" in axis:
for generator_data in axis["generators"]:
for generator in generator_data["generator"]:
- build_desc = BuildDesc( generator=generator,
- prepend_envs=generator_data.get("env_prepend") )
- build_descs.append( build_desc )
+ build_desc = BuildDesc(generator=generator,
+ prepend_envs=generator_data.get("env_prepend"))
+ build_descs.append(build_desc)
elif "variables" in axis:
for variables in axis["variables"]:
- build_desc = BuildDesc( variables=variables )
- build_descs.append( build_desc )
+ build_desc = BuildDesc(variables=variables)
+ build_descs.append(build_desc)
elif "build_types" in axis:
for build_type in axis["build_types"]:
- build_desc = BuildDesc( build_type=build_type )
- build_descs.append( build_desc )
- build_descs_by_axis[axis_name].extend( build_descs )
+ build_desc = BuildDesc(build_type=build_type)
+ build_descs.append(build_desc)
+ build_descs_by_axis[axis_name].extend(build_descs)
return build_descs_by_axis
-def generate_build_variants( build_descs_by_axis ):
+def generate_build_variants(build_descs_by_axis):
"""Returns a list of BuildDesc generated for the partial BuildDesc for each axis."""
axis_names = list(build_descs_by_axis.keys())
build_descs = []
@@ -141,8 +141,8 @@ def generate_build_variants( build_descs_by_axis ):
if len(build_descs):
# for each existing build_desc and each axis build desc, create a new build_desc
new_build_descs = []
- for prototype_build_desc, axis_build_desc in itertools.product( build_descs, axis_build_descs):
- new_build_descs.append( prototype_build_desc.merged_with( axis_build_desc ) )
+ for prototype_build_desc, axis_build_desc in itertools.product(build_descs, axis_build_descs):
+ new_build_descs.append(prototype_build_desc.merged_with(axis_build_desc))
build_descs = new_build_descs
else:
build_descs = axis_build_descs
@@ -174,60 +174,57 @@ $tr_builds
</table>
</body></html>''')
-def generate_html_report( html_report_path, builds ):
- report_dir = os.path.dirname( html_report_path )
+def generate_html_report(html_report_path, builds):
+ report_dir = os.path.dirname(html_report_path)
# Vertical axis: generator
# Horizontal: variables, then build_type
- builds_by_generator = collections.defaultdict( list )
+ builds_by_generator = collections.defaultdict(list)
variables = set()
- build_types_by_variable = collections.defaultdict( set )
+ build_types_by_variable = collections.defaultdict(set)
build_by_pos_key = {} # { (generator, var_key, build_type): build }
for build in builds:
- builds_by_generator[build.desc.generator].append( build )
+ builds_by_generator[build.desc.generator].append(build)
var_key = tuple(sorted(build.desc.variables))
- variables.add( var_key )
- build_types_by_variable[var_key].add( build.desc.build_type )
+ variables.add(var_key)
+ build_types_by_variable[var_key].add(build.desc.build_type)
pos_key = (build.desc.generator, var_key, build.desc.build_type)
build_by_pos_key[pos_key] = build
- variables = sorted( variables )
+ variables = sorted(variables)
th_vars = []
th_build_types = []
for variable in variables:
- build_types = sorted( build_types_by_variable[variable] )
+ build_types = sorted(build_types_by_variable[variable])
nb_build_type = len(build_types_by_variable[variable])
- th_vars.append( '<th colspan="%d">%s</th>' % (nb_build_type, cgi.escape( ' '.join( variable ) ) ) )
+ th_vars.append('<th colspan="%d">%s</th>' % (nb_build_type, cgi.escape(' '.join(variable))))
for build_type in build_types:
- th_build_types.append( '<th>%s</th>' % cgi.escape(build_type) )
+ th_build_types.append('<th>%s</th>' % cgi.escape(build_type))
tr_builds = []
- for generator in sorted( builds_by_generator ):
- tds = [ '<td>%s</td>\n' % cgi.escape( generator ) ]
+ for generator in sorted(builds_by_generator):
+ tds = [ '<td>%s</td>\n' % cgi.escape(generator) ]
for variable in variables:
- build_types = sorted( build_types_by_variable[variable] )
+ build_types = sorted(build_types_by_variable[variable])
for build_type in build_types:
pos_key = (generator, variable, build_type)
build = build_by_pos_key.get(pos_key)
if build:
cmake_status = 'ok' if build.cmake_succeeded else 'FAILED'
build_status = 'ok' if build.build_succeeded else 'FAILED'
- cmake_log_url = os.path.relpath( build.cmake_log_path, report_dir )
- build_log_url = os.path.relpath( build.build_log_path, report_dir )
- td = '<td class="%s"><a href="%s" class="%s">CMake: %s</a>' % (
- build_status.lower(), cmake_log_url, cmake_status.lower(), cmake_status)
+ cmake_log_url = os.path.relpath(build.cmake_log_path, report_dir)
+ build_log_url = os.path.relpath(build.build_log_path, report_dir)
+ td = '<td class="%s"><a href="%s" class="%s">CMake: %s</a>' % ( build_status.lower(), cmake_log_url, cmake_status.lower(), cmake_status)
if build.cmake_succeeded:
- td += '<br><a href="%s" class="%s">Build: %s</a>' % (
- build_log_url, build_status.lower(), build_status)
+ td += '<br><a href="%s" class="%s">Build: %s</a>' % ( build_log_url, build_status.lower(), build_status)
td += '</td>'
else:
td = '<td></td>'
- tds.append( td )
- tr_builds.append( '<tr>%s</tr>' % '\n'.join( tds ) )
- html = HTML_TEMPLATE.substitute(
- title='Batch build report',
+ tds.append(td)
+ tr_builds.append('<tr>%s</tr>' % '\n'.join(tds))
+ html = HTML_TEMPLATE.substitute( title='Batch build report',
th_vars=' '.join(th_vars),
- th_build_types=' '.join( th_build_types),
- tr_builds='\n'.join( tr_builds ) )
- with open( html_report_path, 'wt' ) as fhtml:
- fhtml.write( html )
+ th_build_types=' '.join(th_build_types),
+ tr_builds='\n'.join(tr_builds))
+ with open(html_report_path, 'wt') as fhtml:
+ fhtml.write(html)
print('HTML report generated in:', html_report_path)
def main():
@@ -246,33 +243,33 @@ python devtools\batchbuild.py e:\buildbots\jsoncpp\build . devtools\agent_vmw7.j
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) < 3:
- parser.error( "Missing one of WORK_DIR SOURCE_DIR CONFIG_JSON_PATH." )
+ parser.error("Missing one of WORK_DIR SOURCE_DIR CONFIG_JSON_PATH.")
work_dir = args[0]
source_dir = args[1].rstrip('/\\')
config_paths = args[2:]
for config_path in config_paths:
- if not os.path.isfile( config_path ):
- parser.error( "Can not read: %r" % config_path )
+ if not os.path.isfile(config_path):
+ parser.error("Can not read: %r" % config_path)
# generate build variants
build_descs = []
for config_path in config_paths:
- build_descs_by_axis = load_build_variants_from_config( config_path )
- build_descs.extend( generate_build_variants( build_descs_by_axis ) )
+ build_descs_by_axis = load_build_variants_from_config(config_path)
+ build_descs.extend(generate_build_variants(build_descs_by_axis))
print('Build variants (%d):' % len(build_descs))
# assign build directory for each variant
- if not os.path.isdir( work_dir ):
- os.makedirs( work_dir )
+ if not os.path.isdir(work_dir):
+ os.makedirs(work_dir)
builds = []
- with open( os.path.join( work_dir, 'matrix-dir-map.txt' ), 'wt' ) as fmatrixmap:
- for index, build_desc in enumerate( build_descs ):
- build_desc_work_dir = os.path.join( work_dir, '%03d' % (index+1) )
- builds.append( BuildData( build_desc, build_desc_work_dir, source_dir ) )
- fmatrixmap.write( '%s: %s\n' % (build_desc_work_dir, build_desc) )
+ with open(os.path.join(work_dir, 'matrix-dir-map.txt'), 'wt') as fmatrixmap:
+ for index, build_desc in enumerate(build_descs):
+ build_desc_work_dir = os.path.join(work_dir, '%03d' % (index+1))
+ builds.append(BuildData(build_desc, build_desc_work_dir, source_dir))
+ fmatrixmap.write('%s: %s\n' % (build_desc_work_dir, build_desc))
for build in builds:
build.execute_build()
- html_report_path = os.path.join( work_dir, 'batchbuild-report.html' )
- generate_html_report( html_report_path, builds )
+ html_report_path = os.path.join(work_dir, 'batchbuild-report.html')
+ generate_html_report(html_report_path, builds)
print('Done')
diff --git a/3rdparty/jsoncpp/devtools/fixeol.py b/3rdparty/jsoncpp/devtools/fixeol.py
index 53af7612bc7..054eb9b227b 100644
--- a/3rdparty/jsoncpp/devtools/fixeol.py
+++ b/3rdparty/jsoncpp/devtools/fixeol.py
@@ -1,10 +1,15 @@
+# Copyright 2010 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
from __future__ import print_function
import os.path
-def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ):
+def fix_source_eol(path, is_dry_run = True, verbose = True, eol = '\n'):
"""Makes sure that all sources have the specified eol sequence (default: unix)."""
- if not os.path.isfile( path ):
- raise ValueError( 'Path "%s" is not a file' % path )
+ if not os.path.isfile(path):
+ raise ValueError('Path "%s" is not a file' % path)
try:
f = open(path, 'rb')
except IOError as msg:
@@ -29,27 +34,27 @@ def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ):
##
##
##
-##def _do_fix( is_dry_run = True ):
+##def _do_fix(is_dry_run = True):
## from waftools import antglob
-## python_sources = antglob.glob( '.',
+## python_sources = antglob.glob('.',
## includes = '**/*.py **/wscript **/wscript_build',
## excludes = antglob.default_excludes + './waf.py',
-## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
+## prune_dirs = antglob.prune_dirs + 'waf-* ./build')
## for path in python_sources:
-## _fix_python_source( path, is_dry_run )
+## _fix_python_source(path, is_dry_run)
##
-## cpp_sources = antglob.glob( '.',
+## cpp_sources = antglob.glob('.',
## includes = '**/*.cpp **/*.h **/*.inl',
-## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
+## prune_dirs = antglob.prune_dirs + 'waf-* ./build')
## for path in cpp_sources:
-## _fix_source_eol( path, is_dry_run )
+## _fix_source_eol(path, is_dry_run)
##
##
##def dry_fix(context):
-## _do_fix( is_dry_run = True )
+## _do_fix(is_dry_run = True)
##
##def fix(context):
-## _do_fix( is_dry_run = False )
+## _do_fix(is_dry_run = False)
##
##def shutdown():
## pass
diff --git a/3rdparty/jsoncpp/devtools/licenseupdater.py b/3rdparty/jsoncpp/devtools/licenseupdater.py
index 8cb71d737b3..6f823618fb9 100644
--- a/3rdparty/jsoncpp/devtools/licenseupdater.py
+++ b/3rdparty/jsoncpp/devtools/licenseupdater.py
@@ -13,7 +13,7 @@ BRIEF_LICENSE = LICENSE_BEGIN + """2007-2010 Baptiste Lepilleur
""".replace('\r\n','\n')
-def update_license( path, dry_run, show_diff ):
+def update_license(path, dry_run, show_diff):
"""Update the license statement in the specified file.
Parameters:
path: path of the C++ source file to update.
@@ -22,28 +22,28 @@ def update_license( path, dry_run, show_diff ):
show_diff: if True, print the path of the file that would be modified,
as well as the change made to the file.
"""
- with open( path, 'rt' ) as fin:
+ with open(path, 'rt') as fin:
original_text = fin.read().replace('\r\n','\n')
newline = fin.newlines and fin.newlines[0] or '\n'
- if not original_text.startswith( LICENSE_BEGIN ):
+ if not original_text.startswith(LICENSE_BEGIN):
# No existing license found => prepend it
new_text = BRIEF_LICENSE + original_text
else:
- license_end_index = original_text.index( '\n\n' ) # search first blank line
+ license_end_index = original_text.index('\n\n') # search first blank line
new_text = BRIEF_LICENSE + original_text[license_end_index+2:]
if original_text != new_text:
if not dry_run:
- with open( path, 'wb' ) as fout:
- fout.write( new_text.replace('\n', newline ) )
+ with open(path, 'wb') as fout:
+ fout.write(new_text.replace('\n', newline))
print('Updated', path)
if show_diff:
import difflib
- print('\n'.join( difflib.unified_diff( original_text.split('\n'),
- new_text.split('\n') ) ))
+ print('\n'.join(difflib.unified_diff(original_text.split('\n'),
+ new_text.split('\n'))))
return True
return False
-def update_license_in_source_directories( source_dirs, dry_run, show_diff ):
+def update_license_in_source_directories(source_dirs, dry_run, show_diff):
"""Updates license text in C++ source files found in directory source_dirs.
Parameters:
source_dirs: list of directory to scan for C++ sources. Directories are
@@ -56,11 +56,11 @@ def update_license_in_source_directories( source_dirs, dry_run, show_diff ):
from devtools import antglob
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
for source_dir in source_dirs:
- cpp_sources = antglob.glob( source_dir,
+ cpp_sources = antglob.glob(source_dir,
includes = '''**/*.h **/*.cpp **/*.inl''',
- prune_dirs = prune_dirs )
+ prune_dirs = prune_dirs)
for source in cpp_sources:
- update_license( source, dry_run, show_diff )
+ update_license(source, dry_run, show_diff)
def main():
usage = """%prog DIR [DIR2...]
@@ -83,7 +83,7 @@ python devtools\licenseupdater.py include src
help="""On update, show change made to the file.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
- update_license_in_source_directories( args, options.dry_run, options.show_diff )
+ update_license_in_source_directories(args, options.dry_run, options.show_diff)
print('Done')
if __name__ == '__main__':
diff --git a/3rdparty/jsoncpp/devtools/tarball.py b/3rdparty/jsoncpp/devtools/tarball.py
index ccbda394255..2e72717aba4 100644
--- a/3rdparty/jsoncpp/devtools/tarball.py
+++ b/3rdparty/jsoncpp/devtools/tarball.py
@@ -1,5 +1,10 @@
-import os.path
-import gzip
+# Copyright 2010 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+from contextlib import closing
+import os
import tarfile
TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
@@ -13,41 +18,35 @@ def make_tarball(tarball_path, sources, base_dir, prefix_dir=''):
prefix_dir: all files stored in the tarball be sub-directory of prefix_dir. Set to ''
to make them child of root.
"""
- base_dir = os.path.normpath( os.path.abspath( base_dir ) )
- def archive_name( path ):
+ base_dir = os.path.normpath(os.path.abspath(base_dir))
+ def archive_name(path):
"""Makes path relative to base_dir."""
- path = os.path.normpath( os.path.abspath( path ) )
- common_path = os.path.commonprefix( (base_dir, path) )
+ path = os.path.normpath(os.path.abspath(path))
+ common_path = os.path.commonprefix((base_dir, path))
archive_name = path[len(common_path):]
- if os.path.isabs( archive_name ):
+ if os.path.isabs(archive_name):
archive_name = archive_name[1:]
- return os.path.join( prefix_dir, archive_name )
+ return os.path.join(prefix_dir, archive_name)
def visit(tar, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
path_in_tar = archive_name(path)
- tar.add(path, path_in_tar )
+ tar.add(path, path_in_tar)
compression = TARGZ_DEFAULT_COMPRESSION_LEVEL
- tar = tarfile.TarFile.gzopen( tarball_path, 'w', compresslevel=compression )
- try:
+ with closing(tarfile.TarFile.open(tarball_path, 'w:gz',
+ compresslevel=compression)) as tar:
for source in sources:
source_path = source
- if os.path.isdir( source ):
- os.path.walk(source_path, visit, tar)
+ if os.path.isdir(source):
+ for dirpath, dirnames, filenames in os.walk(source_path):
+ visit(tar, dirpath, filenames)
else:
path_in_tar = archive_name(source_path)
- tar.add(source_path, path_in_tar ) # filename, arcname
- finally:
- tar.close()
+ tar.add(source_path, path_in_tar) # filename, arcname
-def decompress( tarball_path, base_dir ):
+def decompress(tarball_path, base_dir):
"""Decompress the gzipped tarball into directory base_dir.
"""
- # !!! This class method is not documented in the online doc
- # nor is bz2open!
- tar = tarfile.TarFile.gzopen(tarball_path, mode='r')
- try:
- tar.extractall( base_dir )
- finally:
- tar.close()
+ with closing(tarfile.TarFile.open(tarball_path)) as tar:
+ tar.extractall(base_dir)
diff --git a/3rdparty/jsoncpp/doc/doxyfile.in b/3rdparty/jsoncpp/doc/doxyfile.in
index a4161865cc7..57c61c27e40 100644
--- a/3rdparty/jsoncpp/doc/doxyfile.in
+++ b/3rdparty/jsoncpp/doc/doxyfile.in
@@ -819,7 +819,7 @@ EXCLUDE_SYMBOLS =
# that contain example code fragments that are included (see the \include
# command).
-EXAMPLE_PATH =
+EXAMPLE_PATH = ..
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
@@ -1946,8 +1946,7 @@ INCLUDE_FILE_PATTERNS = *.h
PREDEFINED = "_MSC_VER=1400" \
_CPPRTTI \
_WIN32 \
- JSONCPP_DOC_EXCLUDE_IMPLEMENTATION \
- JSON_VALUE_USE_INTERNAL_MAP
+ JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
diff --git a/3rdparty/jsoncpp/doc/header.html b/3rdparty/jsoncpp/doc/header.html
index 6bd2ad9e32a..4b2a5e92198 100644
--- a/3rdparty/jsoncpp/doc/header.html
+++ b/3rdparty/jsoncpp/doc/header.html
@@ -16,7 +16,7 @@ JsonCpp - JSON data format manipulation library
</a>
</td>
<td width="40%" align="right" valign="center">
- <a href="https://github.com/open-source-parsers/jsoncpp">JsonCpp home page</a>
+ <a href="http://open-source-parsers.github.io/jsoncpp-docs/doxygen/">JsonCpp home page</a>
</td>
</tr>
</table>
diff --git a/3rdparty/jsoncpp/doc/jsoncpp.dox b/3rdparty/jsoncpp/doc/jsoncpp.dox
index a9ed47ec4e9..47efc8a3576 100644
--- a/3rdparty/jsoncpp/doc/jsoncpp.dox
+++ b/3rdparty/jsoncpp/doc/jsoncpp.dox
@@ -4,11 +4,21 @@
<a HREF="http://www.json.org/">JSON (JavaScript Object Notation)</a>
is a lightweight data-interchange format.
-It can represent integer, real number, string, an ordered sequence of value, and
-a collection of name/value pairs.
Here is an example of JSON data:
\verbatim
+{
+ "encoding" : "UTF-8",
+ "plug-ins" : [
+ "python",
+ "c++",
+ "ruby"
+ ],
+ "indent" : { "length" : 3, "use_space": true }
+}
+\endverbatim
+<b>JsonCpp</b> supports comments as <i>meta-data</i>:
+\code
// Configuration options
{
// Default encoding for text
@@ -17,22 +27,22 @@ Here is an example of JSON data:
// Plug-ins loaded at start-up
"plug-ins" : [
"python",
- "c++",
+ "c++", // trailing comment
"ruby"
],
// Tab indent size
- "indent" : { "length" : 3, "use_space": true }
+ // (multi-line comment)
+ "indent" : { /*embedded comment*/ "length" : 3, "use_space": true }
}
-\endverbatim
-<code>jsoncpp</code> supports comments as <i>meta-data</i>.
+\endcode
\section _features Features
- read and write JSON document
- attach C++ style comments to element during parsing
- rewrite JSON document preserving original comments
-Notes: Comments used to be supported in JSON but where removed for
+Notes: Comments used to be supported in JSON but were removed for
portability (C like comments are not supported in Python). Since
comments are useful in configuration/input file, this feature was
preserved.
@@ -40,47 +50,77 @@ preserved.
\section _example Code example
\code
-Json::Value root; // will contains the root value after parsing.
-Json::Reader reader;
-bool parsingSuccessful = reader.parse( config_doc, root );
-if ( !parsingSuccessful )
-{
- // report to the user the failure and their locations in the document.
- std::cout << "Failed to parse configuration\n"
- << reader.getFormattedErrorMessages();
- return;
-}
+Json::Value root; // 'root' will contain the root value after parsing.
+std::cin >> root;
-// Get the value of the member of root named 'encoding', return 'UTF-8' if there is no
-// such member.
+// You can also read into a particular sub-value.
+std::cin >> root["subtree"];
+
+// Get the value of the member of root named 'encoding',
+// and return 'UTF-8' if there is no such member.
std::string encoding = root.get("encoding", "UTF-8" ).asString();
-// Get the value of the member of root named 'encoding', return a 'null' value if
+
+// Get the value of the member of root named 'plug-ins'; return a 'null' value if
// there is no such member.
const Json::Value plugins = root["plug-ins"];
-for ( int index = 0; index < plugins.size(); ++index ) // Iterates over the sequence elements.
+
+// Iterate over the sequence elements.
+for ( int index = 0; index < plugins.size(); ++index )
loadPlugIn( plugins[index].asString() );
-setIndentLength( root["indent"].get("length", 3).asInt() );
-setIndentUseSpace( root["indent"].get("use_space", true).asBool() );
+// Try other datatypes. Some are auto-convertible to others.
+foo::setIndentLength( root["indent"].get("length", 3).asInt() );
+foo::setIndentUseSpace( root["indent"].get("use_space", true).asBool() );
-// ...
-// At application shutdown to make the new configuration document:
-// Since Json::Value has implicit constructor for all value types, it is not
-// necessary to explicitly construct the Json::Value object:
-root["encoding"] = getCurrentEncoding();
-root["indent"]["length"] = getCurrentIndentLength();
-root["indent"]["use_space"] = getCurrentIndentUseSpace();
-
-Json::StyledWriter writer;
-// Make a new JSON document for the configuration. Preserve original comments.
-std::string outputConfig = writer.write( root );
-
-// You can also use streams. This will put the contents of any JSON
-// stream at a particular sub-value, if you'd like.
-std::cin >> root["subtree"];
+// Since Json::Value has an implicit constructor for all value types, it is not
+// necessary to explicitly construct the Json::Value object.
+root["encoding"] = foo::getCurrentEncoding();
+root["indent"]["length"] = foo::getCurrentIndentLength();
+root["indent"]["use_space"] = foo::getCurrentIndentUseSpace();
-// And you can write to a stream, using the StyledWriter automatically.
+// If you like the defaults, you can insert directly into a stream.
std::cout << root;
+// Of course, you can write to `std::ostringstream` if you prefer.
+
+// If desired, remember to add a linefeed and flush.
+std::cout << std::endl;
+\endcode
+
+\section _advanced Advanced usage
+
+Configure *builders* to create *readers* and *writers*. For
+configuration, we use our own `Json::Value` (rather than
+standard setters/getters) so that we can add
+features without losing binary-compatibility.
+
+\code
+// For convenience, use `writeString()` with a specialized builder.
+Json::StreamWriterBuilder wbuilder;
+wbuilder["indentation"] = "\t";
+std::string document = Json::writeString(wbuilder, root);
+
+// Here, using a specialized Builder, we discard comments and
+// record errors as we parse.
+Json::CharReaderBuilder rbuilder;
+rbuilder["collectComments"] = false;
+std::string errs;
+bool ok = Json::parseFromStream(rbuilder, std::cin, &root, &errs);
+\endcode
+
+Yes, compile-time configuration-checking would be helpful,
+but `Json::Value` lets you
+write and read the builder configuration, which is better! In other words,
+you can configure your JSON parser using JSON.
+
+CharReaders and StreamWriters are not thread-safe, but they are re-usable.
+\code
+Json::CharReaderBuilder rbuilder;
+cfg >> rbuilder.settings_;
+std::unique_ptr<Json::CharReader> const reader(rbuilder.newCharReader());
+reader->parse(start, stop, &value1, &errs);
+// ...
+reader->parse(start, stop, &value2, &errs);
+// etc.
\endcode
\section _pbuild Build instructions
@@ -116,4 +156,9 @@ Basically JsonCpp is licensed under MIT license, or public domain if desired
and recognized in your jurisdiction.
\author Baptiste Lepilleur <blep@users.sourceforge.net> (originator)
+\author Christopher Dunn <cdunn2001@gmail.com> (primary maintainer)
+\version \include version
+We make strong guarantees about binary-compatibility, consistent with
+<a href="http://apr.apache.org/versioning.html">the Apache versioning scheme</a>.
+\sa version.h
*/
diff --git a/3rdparty/jsoncpp/doc/web_doxyfile.in b/3rdparty/jsoncpp/doc/web_doxyfile.in
new file mode 100644
index 00000000000..07d6819a743
--- /dev/null
+++ b/3rdparty/jsoncpp/doc/web_doxyfile.in
@@ -0,0 +1,2301 @@
+# Doxyfile 1.8.5
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "JsonCpp"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER = %JSONCPP_VERSION%
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = %DOC_TOPDIR%
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-
+# Traditional, Croatian, Czech, Danish, Dutch, English, Esperanto, Farsi,
+# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en,
+# Korean, Korean-en, Latvian, Norwegian, Macedonian, Persian, Polish,
+# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish,
+# Turkish, Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH = %TOPDIR%
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH = %TOPDIR%/include
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 3
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES = "testCaseSetup=\link CppUT::TestCase::setUp() setUp()\endlink" \
+ "testCaseRun=\link CppUT::TestCase::run() run()\endlink" \
+ "testCaseTearDown=\link CppUT::TestCase::tearDown() tearDown()\endlink" \
+ "json_ref=<a HREF='http://www.json.org/'>JSON (JavaScript Object Notation)</a>"
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = YES
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = NO
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = YES
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = YES
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = NO
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = NO
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE = %WARNING_LOG_PATH%
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT = ../include \
+ ../src/lib_json \
+ .
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS = *.h \
+ *.cpp \
+ *.inl \
+ *.dox
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH = ..
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = NO
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = %HTML_OUTPUT%
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER = header.html
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER = footer.html
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = YES
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = %HTML_HELP%
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE = jsoncpp-%JSONCPP_VERSION%.chm
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION = "c:\Program Files\HTML Help Workshop\hhc.exe"
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = YES
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = YES
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = YES
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = NO
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH = ../include
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS = *.h
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED = "_MSC_VER=1400" \
+ _CPPRTTI \
+ _WIN32 \
+ JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all refrences to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS = NO
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = NO
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT = %HAVE_DOT%
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = %UML_LOOK%
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = YES
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = YES
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH = %DOT_PATH%
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 1000
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/3rdparty/jsoncpp/doxybuild.py b/3rdparty/jsoncpp/doxybuild.py
index 0b61c397e43..f18c9c0697b 100644
--- a/3rdparty/jsoncpp/doxybuild.py
+++ b/3rdparty/jsoncpp/doxybuild.py
@@ -1,22 +1,37 @@
"""Script to generate doxygen documentation.
"""
from __future__ import print_function
+from __future__ import unicode_literals
from devtools import tarball
+from contextlib import contextmanager
+import subprocess
+import traceback
import re
import os
-import os.path
import sys
import shutil
+@contextmanager
+def cd(newdir):
+ """
+ http://stackoverflow.com/questions/431684/how-do-i-cd-in-python
+ """
+ prevdir = os.getcwd()
+ os.chdir(newdir)
+ try:
+ yield
+ finally:
+ os.chdir(prevdir)
+
def find_program(*filenames):
"""find a program in folders path_lst, and sets env[var]
@param filenames: a list of possible names of the program to search for
@return: the full path of the filename if found, or '' if filename could not be found
"""
paths = os.environ.get('PATH', '').split(os.pathsep)
- suffixes = ('win32' in sys.platform ) and '.exe .com .bat .cmd' or ''
+ suffixes = ('win32' in sys.platform) and '.exe .com .bat .cmd' or ''
for filename in filenames:
- for name in [filename+ext for ext in suffixes.split()]:
+ for name in [filename+ext for ext in suffixes.split(' ')]:
for directory in paths:
full_path = os.path.join(directory, name)
if os.path.isfile(full_path):
@@ -28,53 +43,56 @@ def do_subst_in_file(targetfile, sourcefile, dict):
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
- try:
- f = open(sourcefile, 'rb')
+ with open(sourcefile, 'r') as f:
contents = f.read()
- f.close()
- except:
- print("Can't read source file %s"%sourcefile)
- raise
for (k,v) in list(dict.items()):
v = v.replace('\\','\\\\')
contents = re.sub(k, v, contents)
- try:
- f = open(targetfile, 'wb')
+ with open(targetfile, 'w') as f:
f.write(contents)
- f.close()
+
+def getstatusoutput(cmd):
+ """cmd is a list.
+ """
+ try:
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output, _ = process.communicate()
+ status = process.returncode
except:
- print("Can't write target file %s"%targetfile)
- raise
+ status = -1
+ output = traceback.format_exc()
+ return status, output
+
+def run_cmd(cmd, silent=False):
+ """Raise exception on failure.
+ """
+ info = 'Running: %r in %r' %(' '.join(cmd), os.getcwd())
+ print(info)
+ sys.stdout.flush()
+ if silent:
+ status, output = getstatusoutput(cmd)
+ else:
+ status, output = subprocess.call(cmd), ''
+ if status:
+ msg = 'Error while %s ...\n\terror=%d, output="""%s"""' %(info, status, output)
+ raise Exception(msg)
+
+def assert_is_exe(path):
+ if not path:
+ raise Exception('path is empty.')
+ if not os.path.isfile(path):
+ raise Exception('%r is not a file.' %path)
+ if not os.access(path, os.X_OK):
+ raise Exception('%r is not executable by this user.' %path)
def run_doxygen(doxygen_path, config_file, working_dir, is_silent):
- config_file = os.path.abspath( config_file )
- doxygen_path = doxygen_path
- old_cwd = os.getcwd()
- try:
- os.chdir( working_dir )
+ assert_is_exe(doxygen_path)
+ config_file = os.path.abspath(config_file)
+ with cd(working_dir):
cmd = [doxygen_path, config_file]
- print('Running:', ' '.join( cmd ))
- try:
- import subprocess
- except:
- if os.system( ' '.join( cmd ) ) != 0:
- print('Documentation generation failed')
- return False
- else:
- if is_silent:
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
- else:
- process = subprocess.Popen( cmd )
- stdout, _ = process.communicate()
- if process.returncode:
- print('Documentation generation failed:')
- print(stdout)
- return False
- return True
- finally:
- os.chdir( old_cwd )
+ run_cmd(cmd, is_silent)
-def build_doc( options, make_release=False ):
+def build_doc(options, make_release=False):
if make_release:
options.make_tarball = True
options.with_dot = True
@@ -83,56 +101,56 @@ def build_doc( options, make_release=False ):
options.open = False
options.silent = True
- version = open('version','rt').read().strip()
+ version = open('version', 'rt').read().strip()
output_dir = 'dist/doxygen' # relative to doc/doxyfile location.
- if not os.path.isdir( output_dir ):
- os.makedirs( output_dir )
- top_dir = os.path.abspath( '.' )
+ if not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
+ top_dir = os.path.abspath('.')
html_output_dirname = 'jsoncpp-api-html-' + version
- tarball_path = os.path.join( 'dist', html_output_dirname + '.tar.gz' )
- warning_log_path = os.path.join( output_dir, '../jsoncpp-doxygen-warning.log' )
- html_output_path = os.path.join( output_dir, html_output_dirname )
- def yesno( bool ):
+ tarball_path = os.path.join('dist', html_output_dirname + '.tar.gz')
+ warning_log_path = os.path.join(output_dir, '../jsoncpp-doxygen-warning.log')
+ html_output_path = os.path.join(output_dir, html_output_dirname)
+ def yesno(bool):
return bool and 'YES' or 'NO'
subst_keys = {
'%JSONCPP_VERSION%': version,
'%DOC_TOPDIR%': '',
'%TOPDIR%': top_dir,
- '%HTML_OUTPUT%': os.path.join( '..', output_dir, html_output_dirname ),
+ '%HTML_OUTPUT%': os.path.join('..', output_dir, html_output_dirname),
'%HAVE_DOT%': yesno(options.with_dot),
'%DOT_PATH%': os.path.split(options.dot_path)[0],
'%HTML_HELP%': yesno(options.with_html_help),
'%UML_LOOK%': yesno(options.with_uml_look),
- '%WARNING_LOG_PATH%': os.path.join( '..', warning_log_path )
+ '%WARNING_LOG_PATH%': os.path.join('..', warning_log_path)
}
- if os.path.isdir( output_dir ):
+ if os.path.isdir(output_dir):
print('Deleting directory:', output_dir)
- shutil.rmtree( output_dir )
- if not os.path.isdir( output_dir ):
- os.makedirs( output_dir )
+ shutil.rmtree(output_dir)
+ if not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
- do_subst_in_file( 'doc/doxyfile', 'doc/doxyfile.in', subst_keys )
- ok = run_doxygen( options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent )
+ do_subst_in_file('doc/doxyfile', options.doxyfile_input_path, subst_keys)
+ run_doxygen(options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent)
if not options.silent:
- print(open(warning_log_path, 'rb').read())
+ print(open(warning_log_path, 'r').read())
index_path = os.path.abspath(os.path.join('doc', subst_keys['%HTML_OUTPUT%'], 'index.html'))
print('Generated documentation can be found in:')
print(index_path)
if options.open:
import webbrowser
- webbrowser.open( 'file://' + index_path )
+ webbrowser.open('file://' + index_path)
if options.make_tarball:
print('Generating doc tarball to', tarball_path)
tarball_sources = [
output_dir,
- 'README.txt',
+ 'README.md',
'LICENSE',
'NEWS.txt',
'version'
]
- tarball_basedir = os.path.join( output_dir, html_output_dirname )
- tarball.make_tarball( tarball_path, tarball_sources, tarball_basedir, html_output_dirname )
+ tarball_basedir = os.path.join(output_dir, html_output_dirname)
+ tarball.make_tarball(tarball_path, tarball_sources, tarball_basedir, html_output_dirname)
return tarball_path, html_output_dirname
def main():
@@ -151,6 +169,8 @@ def main():
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
+ parser.add_option('--in', dest="doxyfile_input_path", action='store', default='doc/doxyfile.in',
+ help="""Path to doxygen inputs. [Default: %default]""")
parser.add_option('--with-html-help', dest="with_html_help", action='store_true', default=False,
help="""Enable generation of Microsoft HTML HELP""")
parser.add_option('--no-uml-look', dest="with_uml_look", action='store_false', default=True,
@@ -163,7 +183,7 @@ def main():
help="""Hides doxygen output""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
- build_doc( options )
+ build_doc(options)
if __name__ == '__main__':
main()
diff --git a/3rdparty/jsoncpp/include/json/assertions.h b/3rdparty/jsoncpp/include/json/assertions.h
index 5ef7e7bb7d4..fbec7ae00e2 100644
--- a/3rdparty/jsoncpp/include/json/assertions.h
+++ b/3rdparty/jsoncpp/include/json/assertions.h
@@ -7,35 +7,48 @@
#define CPPTL_JSON_ASSERTIONS_H_INCLUDED
#include <stdlib.h>
+#include <sstream>
#if !defined(JSON_IS_AMALGAMATION)
#include "config.h"
#endif // if !defined(JSON_IS_AMALGAMATION)
+/** It should not be possible for a maliciously designed file to
+ * cause an abort() or seg-fault, so these macros are used only
+ * for pre-condition violations and internal logic errors.
+ */
#if JSON_USE_EXCEPTION
-#include <stdexcept>
-#define JSON_ASSERT(condition) \
- assert(condition); // @todo <= change this into an exception throw
-#define JSON_FAIL_MESSAGE(message) throw std::runtime_error(message);
+
+// @todo <= add detail about condition in exception
+# define JSON_ASSERT(condition) \
+ {if (!(condition)) {Json::throwLogicError( "assert json failed" );}}
+
+# define JSON_FAIL_MESSAGE(message) \
+ { \
+ std::ostringstream oss; oss << message; \
+ Json::throwLogicError(oss.str()); \
+ abort(); \
+ }
+
#else // JSON_USE_EXCEPTION
-#define JSON_ASSERT(condition) assert(condition);
+
+# define JSON_ASSERT(condition) assert(condition)
// The call to assert() will show the failure message in debug builds. In
-// release bugs we write to invalid memory in order to crash hard, so that a
-// debugger or crash reporter gets the chance to take over. We still call exit()
-// afterward in order to tell the compiler that this macro doesn't return.
-#define JSON_FAIL_MESSAGE(message) \
+// release builds we abort, for a core-dump or debugger.
+# define JSON_FAIL_MESSAGE(message) \
{ \
- assert(false&& message); \
- strcpy(reinterpret_cast<char*>(666), message); \
- exit(123); \
+ std::ostringstream oss; oss << message; \
+ assert(false && oss.str().c_str()); \
+ abort(); \
}
+
#endif
#define JSON_ASSERT_MESSAGE(condition, message) \
if (!(condition)) { \
- JSON_FAIL_MESSAGE(message) \
+ JSON_FAIL_MESSAGE(message); \
}
#endif // CPPTL_JSON_ASSERTIONS_H_INCLUDED
diff --git a/3rdparty/jsoncpp/include/json/config.h b/3rdparty/jsoncpp/include/json/config.h
index afd3a45607c..0dcd8ae600f 100644
--- a/3rdparty/jsoncpp/include/json/config.h
+++ b/3rdparty/jsoncpp/include/json/config.h
@@ -15,17 +15,6 @@
/// std::map
/// as Value container.
//# define JSON_USE_CPPTL_SMALLMAP 1
-/// If defined, indicates that Json specific container should be used
-/// (hash table & simple deque container with customizable allocator).
-/// THIS FEATURE IS STILL EXPERIMENTAL! There is know bugs: See #3177332
-//# define JSON_VALUE_USE_INTERNAL_MAP 1
-/// Force usage of standard new/malloc based allocator instead of memory pool
-/// based allocator.
-/// The memory pools allocator used optimization (initializing Value and
-/// ValueInternalLink
-/// as if it was a POD) that may cause some validation tool to report errors.
-/// Only has effects if JSON_VALUE_USE_INTERNAL_MAP is defined.
-//# define JSON_USE_SIMPLE_INTERNAL_ALLOCATOR 1
// If non-zero, the library uses exceptions to report bad input instead of C
// assertion macros. The default is to use exceptions.
@@ -81,6 +70,14 @@
#if defined(_MSC_VER) && _MSC_VER >= 1500 // MSVC 2008
/// Indicates that the following function is deprecated.
#define JSONCPP_DEPRECATED(message) __declspec(deprecated(message))
+#elif defined(__clang__) && defined(__has_feature)
+#if __has_feature(attribute_deprecated_with_message)
+#define JSONCPP_DEPRECATED(message) __attribute__ ((deprecated(message)))
+#endif
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+#define JSONCPP_DEPRECATED(message) __attribute__ ((deprecated(message)))
+#elif defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
+#define JSONCPP_DEPRECATED(message) __attribute__((__deprecated__))
#endif
#if !defined(JSONCPP_DEPRECATED)
diff --git a/3rdparty/jsoncpp/include/json/forwards.h b/3rdparty/jsoncpp/include/json/forwards.h
index 84a26cd2f72..ccfe09abf4b 100644
--- a/3rdparty/jsoncpp/include/json/forwards.h
+++ b/3rdparty/jsoncpp/include/json/forwards.h
@@ -31,12 +31,6 @@ class Value;
class ValueIteratorBase;
class ValueIterator;
class ValueConstIterator;
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-class ValueMapAllocator;
-class ValueInternalLink;
-class ValueInternalArray;
-class ValueInternalMap;
-#endif // #ifdef JSON_VALUE_USE_INTERNAL_MAP
} // namespace Json
diff --git a/3rdparty/jsoncpp/include/json/reader.h b/3rdparty/jsoncpp/include/json/reader.h
index 98814d50e29..c8ff747e4b1 100644
--- a/3rdparty/jsoncpp/include/json/reader.h
+++ b/3rdparty/jsoncpp/include/json/reader.h
@@ -14,6 +14,7 @@
#include <iosfwd>
#include <stack>
#include <string>
+#include <istream>
// Disable warning C4251: <data member>: <type> needs to have dll-interface to
// be used by...
@@ -27,6 +28,7 @@ namespace Json {
/** \brief Unserialize a <a HREF="http://www.json.org">JSON</a> document into a
*Value.
*
+ * \deprecated Use CharReader and CharReaderBuilder.
*/
class JSON_API Reader {
public:
@@ -78,7 +80,7 @@ public:
document to read.
* \param endDoc Pointer on the end of the UTF-8 encoded string of the
document to read.
- \ Must be >= beginDoc.
+ * Must be >= beginDoc.
* \param root [out] Contains the root value of the document if it was
* successfully parsed.
* \param collectComments \c true to collect comment and allow writing them
@@ -108,7 +110,7 @@ public:
* during parsing.
* \deprecated Use getFormattedErrorMessages() instead (typo fix).
*/
- JSONCPP_DEPRECATED("Use getFormattedErrorMessages instead")
+ JSONCPP_DEPRECATED("Use getFormattedErrorMessages() instead.")
std::string getFormatedErrorMessages() const;
/** \brief Returns a user friendly string that list errors in the parsed
@@ -187,7 +189,6 @@ private:
typedef std::deque<ErrorInfo> Errors;
- bool expectToken(TokenType type, Token& token, const char* message);
bool readToken(Token& token);
void skipSpaces();
bool match(Location pattern, int patternLength);
@@ -239,8 +240,132 @@ private:
std::string commentsBefore_;
Features features_;
bool collectComments_;
+}; // Reader
+
+/** Interface for reading JSON from a char array.
+ */
+class JSON_API CharReader {
+public:
+ virtual ~CharReader() {}
+ /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
+ document.
+ * The document must be a UTF-8 encoded string containing the document to read.
+ *
+ * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the
+ document to read.
+ * \param endDoc Pointer on the end of the UTF-8 encoded string of the
+ document to read.
+ * Must be >= beginDoc.
+ * \param root [out] Contains the root value of the document if it was
+ * successfully parsed.
+ * \param errs [out] Formatted error messages (if not NULL)
+ * a user friendly string that lists errors in the parsed
+ * document.
+ * \return \c true if the document was successfully parsed, \c false if an
+ error occurred.
+ */
+ virtual bool parse(
+ char const* beginDoc, char const* endDoc,
+ Value* root, std::string* errs) = 0;
+
+ class Factory {
+ public:
+ virtual ~Factory() {}
+ /** \brief Allocate a CharReader via operator new().
+ * \throw std::exception if something goes wrong (e.g. invalid settings)
+ */
+ virtual CharReader* newCharReader() const = 0;
+ }; // Factory
+}; // CharReader
+
+/** \brief Build a CharReader implementation.
+
+Usage:
+\code
+ using namespace Json;
+ CharReaderBuilder builder;
+ builder["collectComments"] = false;
+ Value value;
+ std::string errs;
+ bool ok = parseFromStream(builder, std::cin, &value, &errs);
+\endcode
+*/
+class JSON_API CharReaderBuilder : public CharReader::Factory {
+public:
+ // Note: We use a Json::Value so that we can add data-members to this class
+ // without a major version bump.
+ /** Configuration of this builder.
+ These are case-sensitive.
+ Available settings (case-sensitive):
+ - `"collectComments": false or true`
+ - true to collect comment and allow writing them
+ back during serialization, false to discard comments.
+ This parameter is ignored if allowComments is false.
+ - `"allowComments": false or true`
+ - true if comments are allowed.
+ - `"strictRoot": false or true`
+ - true if root must be either an array or an object value
+ - `"allowDroppedNullPlaceholders": false or true`
+ - true if dropped null placeholders are allowed. (See StreamWriterBuilder.)
+ - `"allowNumericKeys": false or true`
+ - true if numeric object keys are allowed.
+ - `"allowSingleQuotes": false or true`
+ - true if '' are allowed for strings (both keys and values)
+ - `"stackLimit": integer`
+ - Exceeding stackLimit (recursive depth of `readValue()`) will
+ cause an exception.
+ - This is a security issue (seg-faults caused by deeply nested JSON),
+ so the default is low.
+ - `"failIfExtra": false or true`
+ - If true, `parse()` returns false when extra non-whitespace trails
+ the JSON value in the input string.
+ - `"rejectDupKeys": false or true`
+ - If true, `parse()` returns false when a key is duplicated within an object.
+
+ You can examine 'settings_` yourself
+ to see the defaults. You can also write and read them just like any
+ JSON Value.
+ \sa setDefaults()
+ */
+ Json::Value settings_;
+
+ CharReaderBuilder();
+ virtual ~CharReaderBuilder();
+
+ virtual CharReader* newCharReader() const;
+
+ /** \return true if 'settings' are legal and consistent;
+ * otherwise, indicate bad settings via 'invalid'.
+ */
+ bool validate(Json::Value* invalid) const;
+
+ /** A simple way to update a specific setting.
+ */
+ Value& operator[](std::string key);
+
+ /** Called by ctor, but you can use this to reset settings_.
+ * \pre 'settings' != NULL (but Json::null is fine)
+ * \remark Defaults:
+ * \snippet src/lib_json/json_reader.cpp CharReaderBuilderDefaults
+ */
+ static void setDefaults(Json::Value* settings);
+ /** Same as old Features::strictMode().
+ * \pre 'settings' != NULL (but Json::null is fine)
+ * \remark Defaults:
+ * \snippet src/lib_json/json_reader.cpp CharReaderBuilderStrictMode
+ */
+ static void strictMode(Json::Value* settings);
};
+/** Consume entire stream and use its begin/end.
+ * Someday we might have a real StreamReader, but for now this
+ * is convenient.
+ */
+bool JSON_API parseFromStream(
+ CharReader::Factory const&,
+ std::istream&,
+ Value* root, std::string* errs);
+
/** \brief Read from 'sin' into 'root'.
Always keep comments from the input JSON.
diff --git a/3rdparty/jsoncpp/include/json/value.h b/3rdparty/jsoncpp/include/json/value.h
index 197a85614df..2ac94fa8129 100644
--- a/3rdparty/jsoncpp/include/json/value.h
+++ b/3rdparty/jsoncpp/include/json/value.h
@@ -11,6 +11,7 @@
#endif // if !defined(JSON_IS_AMALGAMATION)
#include <string>
#include <vector>
+#include <exception>
#ifndef JSON_USE_CPPTL_SMALLMAP
#include <map>
@@ -32,6 +33,31 @@
*/
namespace Json {
+/** Base class for all exceptions we throw.
+ *
+ * We use nothing but these internally. Of course, STL can throw others.
+ */
+class JSON_API Exception;
+/** Exceptions which the user cannot easily avoid.
+ *
+ * E.g. out-of-memory (when we use malloc), stack-overflow, malicious input
+ *
+ * \remark derived from Json::Exception
+ */
+class JSON_API RuntimeError;
+/** Exceptions thrown by JSON_ASSERT/JSON_FAIL macros.
+ *
+ * These are precondition-violations (user bugs) and internal errors (our bugs).
+ *
+ * \remark derived from Json::Exception
+ */
+class JSON_API LogicError;
+
+/// used internally
+void throwRuntimeError(std::string const& msg);
+/// used internally
+void throwLogicError(std::string const& msg);
+
/** \brief Type of the value held by a Value object.
*/
enum ValueType {
@@ -74,14 +100,14 @@ enum CommentPlacement {
*/
class JSON_API StaticString {
public:
- explicit StaticString(const char* czstring) : str_(czstring) {}
+ explicit StaticString(const char* czstring) : c_str_(czstring) {}
- operator const char*() const { return str_; }
+ operator const char*() const { return c_str_; }
- const char* c_str() const { return str_; }
+ const char* c_str() const { return c_str_; }
private:
- const char* str_;
+ const char* c_str_;
};
/** \brief Represents a <a HREF="http://www.json.org">JSON</a> value.
@@ -99,26 +125,27 @@ private:
* The type of the held value is represented by a #ValueType and
* can be obtained using type().
*
- * values of an #objectValue or #arrayValue can be accessed using operator[]()
- *methods.
- * Non const methods will automatically create the a #nullValue element
+ * Values of an #objectValue or #arrayValue can be accessed using operator[]()
+ * methods.
+ * Non-const methods will automatically create the a #nullValue element
* if it does not exist.
- * The sequence of an #arrayValue will be automatically resize and initialized
+ * The sequence of an #arrayValue will be automatically resized and initialized
* with #nullValue. resize() can be used to enlarge or truncate an #arrayValue.
*
- * The get() methods can be used to obtanis default value in the case the
- *required element
- * does not exist.
+ * The get() methods can be used to obtain default value in the case the
+ * required element does not exist.
*
* It is possible to iterate over the list of a #objectValue values using
* the getMemberNames() method.
+ *
+ * \note #Value string-length fit in size_t, but keys must be < 2^30.
+ * (The reason is an implementation detail.) A #CharReader will raise an
+ * exception if a bound is exceeded to avoid security holes in your app,
+ * but the Value API does *not* check bounds. That is the responsibility
+ * of the caller.
*/
class JSON_API Value {
friend class ValueIteratorBase;
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- friend class ValueInternalLink;
- friend class ValueInternalMap;
-#endif
public:
typedef std::vector<std::string> Members;
typedef ValueIterator iterator;
@@ -133,7 +160,8 @@ public:
typedef Json::LargestUInt LargestUInt;
typedef Json::ArrayIndex ArrayIndex;
- static const Value& null;
+ static const Value& null; ///< We regret this reference to a global instance; prefer the simpler Value().
+ static const Value& nullRef; ///< just a kludge for binary-compatibility; same as null
/// Minimum signed integer value that can be stored in a Json::Value.
static const LargestInt minLargestInt;
/// Maximum signed integer value that can be stored in a Json::Value.
@@ -159,7 +187,6 @@ public:
private:
#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
class CZString {
public:
enum DuplicationPolicy {
@@ -168,20 +195,31 @@ private:
duplicateOnCopy
};
CZString(ArrayIndex index);
- CZString(const char* cstr, DuplicationPolicy allocate);
- CZString(const CZString& other);
+ CZString(char const* str, unsigned length, DuplicationPolicy allocate);
+ CZString(CZString const& other);
~CZString();
CZString& operator=(CZString other);
- bool operator<(const CZString& other) const;
- bool operator==(const CZString& other) const;
+ bool operator<(CZString const& other) const;
+ bool operator==(CZString const& other) const;
ArrayIndex index() const;
- const char* c_str() const;
+ //const char* c_str() const; ///< \deprecated
+ char const* data() const;
+ unsigned length() const;
bool isStaticString() const;
private:
void swap(CZString& other);
- const char* cstr_;
- ArrayIndex index_;
+
+ struct StringStorage {
+ unsigned policy_: 2;
+ unsigned length_: 30; // 1GB max
+ };
+
+ char const* cstr_; // actually, a prefixed string, unless policy is noDup
+ union {
+ ArrayIndex index_;
+ StringStorage storage_;
+ };
};
public:
@@ -190,7 +228,6 @@ public:
#else
typedef CppTL::SmallMap<CZString, Value> ObjectValues;
#endif // ifndef JSON_USE_CPPTL_SMALLMAP
-#endif // ifndef JSON_VALUE_USE_INTERNAL_MAP
#endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
public:
@@ -217,47 +254,59 @@ Json::Value obj_value(Json::objectValue); // {}
Value(UInt64 value);
#endif // if defined(JSON_HAS_INT64)
Value(double value);
- Value(const char* value);
- Value(const char* beginValue, const char* endValue);
+ Value(const char* value); ///< Copy til first 0. (NULL causes to seg-fault.)
+ Value(const char* beginValue, const char* endValue); ///< Copy all, incl zeroes.
/** \brief Constructs a value from a static string.
* Like other value string constructor but do not duplicate the string for
* internal storage. The given string must remain alive after the call to this
* constructor.
+ * \note This works only for null-terminated strings. (We cannot change the
+ * size of this class, so we have nowhere to store the length,
+ * which might be computed later for various operations.)
+ *
* Example of usage:
* \code
- * Json::Value aValue( StaticString("some text") );
+ * static StaticString foo("some text");
+ * Json::Value aValue(foo);
* \endcode
*/
Value(const StaticString& value);
- Value(const std::string& value);
+ Value(const std::string& value); ///< Copy data() til size(). Embedded zeroes too.
#ifdef JSON_USE_CPPTL
Value(const CppTL::ConstString& value);
#endif
Value(bool value);
+ /// Deep copy.
Value(const Value& other);
~Value();
+ /// Deep copy, then swap(other).
+ /// \note Over-write existing comments. To preserve comments, use #swapPayload().
Value& operator=(Value other);
- /// Swap values.
- /// \note Currently, comments are intentionally not swapped, for
- /// both logic and efficiency.
+ /// Swap everything.
void swap(Value& other);
+ /// Swap values but leave comments and source offsets in place.
+ void swapPayload(Value& other);
ValueType type() const;
+ /// Compare payload only, not comments etc.
bool operator<(const Value& other) const;
bool operator<=(const Value& other) const;
bool operator>=(const Value& other) const;
bool operator>(const Value& other) const;
-
bool operator==(const Value& other) const;
bool operator!=(const Value& other) const;
-
int compare(const Value& other) const;
- const char* asCString() const;
- std::string asString() const;
+ const char* asCString() const; ///< Embedded zeroes could cause you trouble!
+ std::string asString() const; ///< Embedded zeroes are possible.
+ /** Get raw char* of string-value.
+ * \return false if !string. (Seg-fault if str or end are NULL.)
+ */
+ bool getString(
+ char const** str, char const** end) const;
#ifdef JSON_USE_CPPTL
CppTL::ConstString asConstString() const;
#endif
@@ -348,19 +397,23 @@ Json::Value obj_value(Json::objectValue); // {}
Value& append(const Value& value);
/// Access an object value by name, create a null member if it does not exist.
+ /// \note Because of our implementation, keys are limited to 2^30 -1 chars.
+ /// Exceeding that will cause an exception.
Value& operator[](const char* key);
/// Access an object value by name, returns null if there is no member with
/// that name.
const Value& operator[](const char* key) const;
/// Access an object value by name, create a null member if it does not exist.
+ /// \param key may contain embedded nulls.
Value& operator[](const std::string& key);
/// Access an object value by name, returns null if there is no member with
/// that name.
+ /// \param key may contain embedded nulls.
const Value& operator[](const std::string& key) const;
/** \brief Access an object value by name, create a null member if it does not
exist.
- * If the object as no entry for that name, then the member name used to store
+ * If the object has no entry for that name, then the member name used to store
* the new entry is not duplicated.
* Example of use:
* \code
@@ -378,27 +431,69 @@ Json::Value obj_value(Json::objectValue); // {}
const Value& operator[](const CppTL::ConstString& key) const;
#endif
/// Return the member named key if it exist, defaultValue otherwise.
+ /// \note deep copy
Value get(const char* key, const Value& defaultValue) const;
/// Return the member named key if it exist, defaultValue otherwise.
+ /// \note deep copy
+ /// \param key may contain embedded nulls.
+ Value get(const char* key, const char* end, const Value& defaultValue) const;
+ /// Return the member named key if it exist, defaultValue otherwise.
+ /// \note deep copy
+ /// \param key may contain embedded nulls.
Value get(const std::string& key, const Value& defaultValue) const;
#ifdef JSON_USE_CPPTL
/// Return the member named key if it exist, defaultValue otherwise.
+ /// \note deep copy
Value get(const CppTL::ConstString& key, const Value& defaultValue) const;
#endif
+ /// Most general and efficient version of isMember()const, get()const,
+ /// and operator[]const
+ /// \note As stated elsewhere, behavior is undefined if (end-key) >= 2^30
+ Value const* find(char const* key, char const* end) const;
+ /// Most general and efficient version of object-mutators.
+ /// \note As stated elsewhere, behavior is undefined if (end-key) >= 2^30
+ /// \return non-zero, but JSON_ASSERT if this is neither object nor nullValue.
+ Value const* demand(char const* key, char const* end);
/// \brief Remove and return the named member.
///
/// Do nothing if it did not exist.
/// \return the removed Value, or null.
/// \pre type() is objectValue or nullValue
/// \post type() is unchanged
+ /// \deprecated
Value removeMember(const char* key);
/// Same as removeMember(const char*)
+ /// \param key may contain embedded nulls.
+ /// \deprecated
Value removeMember(const std::string& key);
+ /// Same as removeMember(const char* key, const char* end, Value* removed),
+ /// but 'key' is null-terminated.
+ bool removeMember(const char* key, Value* removed);
+ /** \brief Remove the named map member.
+
+ Update 'removed' iff removed.
+ \param key may contain embedded nulls.
+ \return true iff removed (no exceptions)
+ */
+ bool removeMember(std::string const& key, Value* removed);
+ /// Same as removeMember(std::string const& key, Value* removed)
+ bool removeMember(const char* key, const char* end, Value* removed);
+ /** \brief Remove the indexed array element.
+
+ O(n) expensive operations.
+ Update 'removed' iff removed.
+ \return true iff removed (no exceptions)
+ */
+ bool removeIndex(ArrayIndex i, Value* removed);
/// Return true if the object has a member named key.
+ /// \note 'key' must be null-terminated.
bool isMember(const char* key) const;
/// Return true if the object has a member named key.
+ /// \param key may contain embedded nulls.
bool isMember(const std::string& key) const;
+ /// Same as isMember(std::string const& key)const
+ bool isMember(const char* key, const char* end) const;
#ifdef JSON_USE_CPPTL
/// Return true if the object has a member named key.
bool isMember(const CppTL::ConstString& key) const;
@@ -416,9 +511,12 @@ Json::Value obj_value(Json::objectValue); // {}
// EnumValues enumValues() const;
//# endif
- /// Comments must be //... or /* ... */
+ /// \deprecated Always pass len.
+ JSONCPP_DEPRECATED("Use setComment(std::string const&) instead.")
void setComment(const char* comment, CommentPlacement placement);
/// Comments must be //... or /* ... */
+ void setComment(const char* comment, size_t len, CommentPlacement placement);
+ /// Comments must be //... or /* ... */
void setComment(const std::string& comment, CommentPlacement placement);
bool hasComment(CommentPlacement placement) const;
/// Include delimiters and embedded newlines.
@@ -442,26 +540,14 @@ Json::Value obj_value(Json::objectValue); // {}
private:
void initBasic(ValueType type, bool allocated = false);
- Value& resolveReference(const char* key, bool isStatic);
-
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- inline bool isItemAvailable() const { return itemIsUsed_ == 0; }
-
- inline void setItemUsed(bool isUsed = true) { itemIsUsed_ = isUsed ? 1 : 0; }
-
- inline bool isMemberNameStatic() const { return memberNameIsStatic_ == 0; }
-
- inline void setMemberNameIsStatic(bool isStatic) {
- memberNameIsStatic_ = isStatic ? 1 : 0;
- }
-#endif // # ifdef JSON_VALUE_USE_INTERNAL_MAP
+ Value& resolveReference(const char* key);
+ Value& resolveReference(const char* key, const char* end);
-private:
struct CommentInfo {
CommentInfo();
~CommentInfo();
- void setComment(const char* text);
+ void setComment(const char* text, size_t len);
char* comment_;
};
@@ -480,20 +566,12 @@ private:
LargestUInt uint_;
double real_;
bool bool_;
- char* string_;
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- ValueInternalArray* array_;
- ValueInternalMap* map_;
-#else
+ char* string_; // actually ptr to unsigned, followed by str, unless !allocated_
ObjectValues* map_;
-#endif
} value_;
ValueType type_ : 8;
- int allocated_ : 1; // Notes: if declared as bool, bitfield is useless.
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- unsigned int itemIsUsed_ : 1; // used by the ValueInternalMap container.
- int memberNameIsStatic_ : 1; // used by the ValueInternalMap container.
-#endif
+ unsigned int allocated_ : 1; // Notes: if declared as bool, bitfield is useless.
+ // If not allocated_, string_ must be null-terminated.
CommentInfo* comments_;
// [start, limit) byte offsets in the source JSON text from which this Value
@@ -565,345 +643,6 @@ private:
Args args_;
};
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-/** \brief Allocator to customize Value internal map.
- * Below is an example of a simple implementation (default implementation
- actually
- * use memory pool for speed).
- * \code
- class DefaultValueMapAllocator : public ValueMapAllocator
- {
- public: // overridden from ValueMapAllocator
- virtual ValueInternalMap *newMap()
- {
- return new ValueInternalMap();
- }
-
- virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other )
- {
- return new ValueInternalMap( other );
- }
-
- virtual void destructMap( ValueInternalMap *map )
- {
- delete map;
- }
-
- virtual ValueInternalLink *allocateMapBuckets( unsigned int size )
- {
- return new ValueInternalLink[size];
- }
-
- virtual void releaseMapBuckets( ValueInternalLink *links )
- {
- delete [] links;
- }
-
- virtual ValueInternalLink *allocateMapLink()
- {
- return new ValueInternalLink();
- }
-
- virtual void releaseMapLink( ValueInternalLink *link )
- {
- delete link;
- }
- };
- * \endcode
- */
-class JSON_API ValueMapAllocator {
-public:
- virtual ~ValueMapAllocator();
- virtual ValueInternalMap* newMap() = 0;
- virtual ValueInternalMap* newMapCopy(const ValueInternalMap& other) = 0;
- virtual void destructMap(ValueInternalMap* map) = 0;
- virtual ValueInternalLink* allocateMapBuckets(unsigned int size) = 0;
- virtual void releaseMapBuckets(ValueInternalLink* links) = 0;
- virtual ValueInternalLink* allocateMapLink() = 0;
- virtual void releaseMapLink(ValueInternalLink* link) = 0;
-};
-
-/** \brief ValueInternalMap hash-map bucket chain link (for internal use only).
- * \internal previous_ & next_ allows for bidirectional traversal.
- */
-class JSON_API ValueInternalLink {
-public:
- enum {
- itemPerLink = 6
- }; // sizeof(ValueInternalLink) = 128 on 32 bits architecture.
- enum InternalFlags {
- flagAvailable = 0,
- flagUsed = 1
- };
-
- ValueInternalLink();
-
- ~ValueInternalLink();
-
- Value items_[itemPerLink];
- char* keys_[itemPerLink];
- ValueInternalLink* previous_;
- ValueInternalLink* next_;
-};
-
-/** \brief A linked page based hash-table implementation used internally by
- *Value.
- * \internal ValueInternalMap is a tradional bucket based hash-table, with a
- *linked
- * list in each bucket to handle collision. There is an addional twist in that
- * each node of the collision linked list is a page containing a fixed amount of
- * value. This provides a better compromise between memory usage and speed.
- *
- * Each bucket is made up of a chained list of ValueInternalLink. The last
- * link of a given bucket can be found in the 'previous_' field of the following
- *bucket.
- * The last link of the last bucket is stored in tailLink_ as it has no
- *following bucket.
- * Only the last link of a bucket may contains 'available' item. The last link
- *always
- * contains at least one element unless is it the bucket one very first link.
- */
-class JSON_API ValueInternalMap {
- friend class ValueIteratorBase;
- friend class Value;
-
-public:
- typedef unsigned int HashKey;
- typedef unsigned int BucketIndex;
-
-#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
- struct IteratorState {
- IteratorState() : map_(0), link_(0), itemIndex_(0), bucketIndex_(0) {}
- ValueInternalMap* map_;
- ValueInternalLink* link_;
- BucketIndex itemIndex_;
- BucketIndex bucketIndex_;
- };
-#endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-
- ValueInternalMap();
- ValueInternalMap(const ValueInternalMap& other);
- ValueInternalMap& operator=(ValueInternalMap other);
- ~ValueInternalMap();
-
- void swap(ValueInternalMap& other);
-
- BucketIndex size() const;
-
- void clear();
-
- bool reserveDelta(BucketIndex growth);
-
- bool reserve(BucketIndex newItemCount);
-
- const Value* find(const char* key) const;
-
- Value* find(const char* key);
-
- Value& resolveReference(const char* key, bool isStatic);
-
- void remove(const char* key);
-
- void doActualRemove(ValueInternalLink* link,
- BucketIndex index,
- BucketIndex bucketIndex);
-
- ValueInternalLink*& getLastLinkInBucket(BucketIndex bucketIndex);
-
- Value& setNewItem(const char* key,
- bool isStatic,
- ValueInternalLink* link,
- BucketIndex index);
-
- Value& unsafeAdd(const char* key, bool isStatic, HashKey hashedKey);
-
- HashKey hash(const char* key) const;
-
- int compare(const ValueInternalMap& other) const;
-
-private:
- void makeBeginIterator(IteratorState& it) const;
- void makeEndIterator(IteratorState& it) const;
- static bool equals(const IteratorState& x, const IteratorState& other);
- static void increment(IteratorState& iterator);
- static void incrementBucket(IteratorState& iterator);
- static void decrement(IteratorState& iterator);
- static const char* key(const IteratorState& iterator);
- static const char* key(const IteratorState& iterator, bool& isStatic);
- static Value& value(const IteratorState& iterator);
- static int distance(const IteratorState& x, const IteratorState& y);
-
-private:
- ValueInternalLink* buckets_;
- ValueInternalLink* tailLink_;
- BucketIndex bucketsSize_;
- BucketIndex itemCount_;
-};
-
-/** \brief A simplified deque implementation used internally by Value.
-* \internal
-* It is based on a list of fixed "page", each page contains a fixed number of
-*items.
-* Instead of using a linked-list, a array of pointer is used for fast item
-*look-up.
-* Look-up for an element is as follow:
-* - compute page index: pageIndex = itemIndex / itemsPerPage
-* - look-up item in page: pages_[pageIndex][itemIndex % itemsPerPage]
-*
-* Insertion is amortized constant time (only the array containing the index of
-*pointers
-* need to be reallocated when items are appended).
-*/
-class JSON_API ValueInternalArray {
- friend class Value;
- friend class ValueIteratorBase;
-
-public:
- enum {
- itemsPerPage = 8
- }; // should be a power of 2 for fast divide and modulo.
- typedef Value::ArrayIndex ArrayIndex;
- typedef unsigned int PageIndex;
-
-#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
- struct IteratorState // Must be a POD
- {
- IteratorState() : array_(0), currentPageIndex_(0), currentItemIndex_(0) {}
- ValueInternalArray* array_;
- Value** currentPageIndex_;
- unsigned int currentItemIndex_;
- };
-#endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-
- ValueInternalArray();
- ValueInternalArray(const ValueInternalArray& other);
- ValueInternalArray& operator=(ValueInternalArray other);
- ~ValueInternalArray();
- void swap(ValueInternalArray& other);
-
- void clear();
- void resize(ArrayIndex newSize);
-
- Value& resolveReference(ArrayIndex index);
-
- Value* find(ArrayIndex index) const;
-
- ArrayIndex size() const;
-
- int compare(const ValueInternalArray& other) const;
-
-private:
- static bool equals(const IteratorState& x, const IteratorState& other);
- static void increment(IteratorState& iterator);
- static void decrement(IteratorState& iterator);
- static Value& dereference(const IteratorState& iterator);
- static Value& unsafeDereference(const IteratorState& iterator);
- static int distance(const IteratorState& x, const IteratorState& y);
- static ArrayIndex indexOf(const IteratorState& iterator);
- void makeBeginIterator(IteratorState& it) const;
- void makeEndIterator(IteratorState& it) const;
- void makeIterator(IteratorState& it, ArrayIndex index) const;
-
- void makeIndexValid(ArrayIndex index);
-
- Value** pages_;
- ArrayIndex size_;
- PageIndex pageCount_;
-};
-
-/** \brief Experimental: do not use. Allocator to customize Value internal
-array.
- * Below is an example of a simple implementation (actual implementation use
- * memory pool).
- \code
-class DefaultValueArrayAllocator : public ValueArrayAllocator
-{
-public: // overridden from ValueArrayAllocator
-virtual ~DefaultValueArrayAllocator()
-{
-}
-
-virtual ValueInternalArray *newArray()
-{
- return new ValueInternalArray();
-}
-
-virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other )
-{
- return new ValueInternalArray( other );
-}
-
-virtual void destruct( ValueInternalArray *array )
-{
- delete array;
-}
-
-virtual void reallocateArrayPageIndex( Value **&indexes,
- ValueInternalArray::PageIndex
-&indexCount,
- ValueInternalArray::PageIndex
-minNewIndexCount )
-{
- ValueInternalArray::PageIndex newIndexCount = (indexCount*3)/2 + 1;
- if ( minNewIndexCount > newIndexCount )
- newIndexCount = minNewIndexCount;
- void *newIndexes = realloc( indexes, sizeof(Value*) * newIndexCount );
- if ( !newIndexes )
- throw std::bad_alloc();
- indexCount = newIndexCount;
- indexes = static_cast<Value **>( newIndexes );
-}
-virtual void releaseArrayPageIndex( Value **indexes,
- ValueInternalArray::PageIndex indexCount )
-{
- if ( indexes )
- free( indexes );
-}
-
-virtual Value *allocateArrayPage()
-{
- return static_cast<Value *>( malloc( sizeof(Value) *
-ValueInternalArray::itemsPerPage ) );
-}
-
-virtual void releaseArrayPage( Value *value )
-{
- if ( value )
- free( value );
-}
-};
- \endcode
- */
-class JSON_API ValueArrayAllocator {
-public:
- virtual ~ValueArrayAllocator();
- virtual ValueInternalArray* newArray() = 0;
- virtual ValueInternalArray* newArrayCopy(const ValueInternalArray& other) = 0;
- virtual void destructArray(ValueInternalArray* array) = 0;
- /** \brief Reallocate array page index.
- * Reallocates an array of pointer on each page.
- * \param indexes [input] pointer on the current index. May be \c NULL.
- * [output] pointer on the new index of at least
- * \a minNewIndexCount pages.
- * \param indexCount [input] current number of pages in the index.
- * [output] number of page the reallocated index can handle.
- * \b MUST be >= \a minNewIndexCount.
- * \param minNewIndexCount Minimum number of page the new index must be able
- * to
- * handle.
- */
- virtual void
- reallocateArrayPageIndex(Value**& indexes,
- ValueInternalArray::PageIndex& indexCount,
- ValueInternalArray::PageIndex minNewIndexCount) = 0;
- virtual void
- releaseArrayPageIndex(Value** indexes,
- ValueInternalArray::PageIndex indexCount) = 0;
- virtual Value* allocateArrayPage() = 0;
- virtual void releaseArrayPage(Value* value) = 0;
-};
-#endif // #ifdef JSON_VALUE_USE_INTERNAL_MAP
-
/** \brief base class for Value iterators.
*
*/
@@ -914,32 +653,35 @@ public:
typedef int difference_type;
typedef ValueIteratorBase SelfType;
- ValueIteratorBase();
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
- explicit ValueIteratorBase(const Value::ObjectValues::iterator& current);
-#else
- ValueIteratorBase(const ValueInternalArray::IteratorState& state);
- ValueIteratorBase(const ValueInternalMap::IteratorState& state);
-#endif
-
bool operator==(const SelfType& other) const { return isEqual(other); }
bool operator!=(const SelfType& other) const { return !isEqual(other); }
difference_type operator-(const SelfType& other) const {
- return computeDistance(other);
+ return other.computeDistance(*this);
}
/// Return either the index or the member name of the referenced value as a
/// Value.
Value key() const;
- /// Return the index of the referenced Value. -1 if it is not an arrayValue.
+ /// Return the index of the referenced Value, or -1 if it is not an arrayValue.
UInt index() const;
+ /// Return the member name of the referenced Value, or "" if it is not an
+ /// objectValue.
+ /// \note Avoid `c_str()` on result, as embedded zeroes are possible.
+ std::string name() const;
+
/// Return the member name of the referenced Value. "" if it is not an
/// objectValue.
- const char* memberName() const;
+ /// \deprecated This cannot be used for UTF-8 strings, since there can be embedded nulls.
+ JSONCPP_DEPRECATED("Use `key = name();` instead.")
+ char const* memberName() const;
+ /// Return the member name of the referenced Value, or NULL if it is not an
+ /// objectValue.
+ /// \note Better version than memberName(). Allows embedded nulls.
+ char const* memberName(char const** end) const;
protected:
Value& deref() const;
@@ -955,17 +697,15 @@ protected:
void copy(const SelfType& other);
private:
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
Value::ObjectValues::iterator current_;
// Indicates that iterator is for a null value.
bool isNull_;
-#else
- union {
- ValueInternalArray::IteratorState array_;
- ValueInternalMap::IteratorState map_;
- } iterator_;
- bool isArray_;
-#endif
+
+public:
+ // For some reason, BORLAND needs these at the end, rather
+ // than earlier. No idea why.
+ ValueIteratorBase();
+ explicit ValueIteratorBase(const Value::ObjectValues::iterator& current);
};
/** \brief const iterator for object and array value.
@@ -976,8 +716,8 @@ class JSON_API ValueConstIterator : public ValueIteratorBase {
public:
typedef const Value value_type;
- typedef unsigned int size_t;
- typedef int difference_type;
+ //typedef unsigned int size_t;
+ //typedef int difference_type;
typedef const Value& reference;
typedef const Value* pointer;
typedef ValueConstIterator SelfType;
@@ -987,12 +727,7 @@ public:
private:
/*! \internal Use by Value to create an iterator.
*/
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
explicit ValueConstIterator(const Value::ObjectValues::iterator& current);
-#else
- ValueConstIterator(const ValueInternalArray::IteratorState& state);
- ValueConstIterator(const ValueInternalMap::IteratorState& state);
-#endif
public:
SelfType& operator=(const ValueIteratorBase& other);
@@ -1043,12 +778,7 @@ public:
private:
/*! \internal Use by Value to create an iterator.
*/
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
explicit ValueIterator(const Value::ObjectValues::iterator& current);
-#else
- ValueIterator(const ValueInternalArray::IteratorState& state);
- ValueIterator(const ValueInternalMap::IteratorState& state);
-#endif
public:
SelfType& operator=(const SelfType& other);
@@ -1081,6 +811,14 @@ public:
} // namespace Json
+
+namespace std {
+/// Specialize std::swap() for Json::Value.
+template<>
+inline void swap(Json::Value& a, Json::Value& b) { a.swap(b); }
+}
+
+
#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
#pragma warning(pop)
#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
diff --git a/3rdparty/jsoncpp/include/json/version.h b/3rdparty/jsoncpp/include/json/version.h
index 58fca8a9820..a4db8b0ae7f 100644
--- a/3rdparty/jsoncpp/include/json/version.h
+++ b/3rdparty/jsoncpp/include/json/version.h
@@ -4,10 +4,10 @@
#ifndef JSON_VERSION_H_INCLUDED
# define JSON_VERSION_H_INCLUDED
-# define JSONCPP_VERSION_STRING "1.1.0"
+# define JSONCPP_VERSION_STRING "1.6.2"
# define JSONCPP_VERSION_MAJOR 1
-# define JSONCPP_VERSION_MINOR 1
-# define JSONCPP_VERSION_PATCH 0
+# define JSONCPP_VERSION_MINOR 6
+# define JSONCPP_VERSION_PATCH 2
# define JSONCPP_VERSION_QUALIFIER
# define JSONCPP_VERSION_HEXA ((JSONCPP_VERSION_MAJOR << 24) | (JSONCPP_VERSION_MINOR << 16) | (JSONCPP_VERSION_PATCH << 8))
diff --git a/3rdparty/jsoncpp/include/json/writer.h b/3rdparty/jsoncpp/include/json/writer.h
index dc9e46f4bd6..f5f0a389ee8 100644
--- a/3rdparty/jsoncpp/include/json/writer.h
+++ b/3rdparty/jsoncpp/include/json/writer.h
@@ -11,6 +11,7 @@
#endif // if !defined(JSON_IS_AMALGAMATION)
#include <vector>
#include <string>
+#include <ostream>
// Disable warning C4251: <data member>: <type> needs to have dll-interface to
// be used by...
@@ -23,7 +24,115 @@ namespace Json {
class Value;
+/**
+
+Usage:
+\code
+ using namespace Json;
+ void writeToStdout(StreamWriter::Factory const& factory, Value const& value) {
+ std::unique_ptr<StreamWriter> const writer(
+ factory.newStreamWriter());
+ writer->write(value, &std::cout);
+ std::cout << std::endl; // add lf and flush
+ }
+\endcode
+*/
+class JSON_API StreamWriter {
+protected:
+ std::ostream* sout_; // not owned; will not delete
+public:
+ StreamWriter();
+ virtual ~StreamWriter();
+ /** Write Value into document as configured in sub-class.
+ Do not take ownership of sout, but maintain a reference during function.
+ \pre sout != NULL
+ \return zero on success (For now, we always return zero, so check the stream instead.)
+ \throw std::exception possibly, depending on configuration
+ */
+ virtual int write(Value const& root, std::ostream* sout) = 0;
+
+ /** \brief A simple abstract factory.
+ */
+ class JSON_API Factory {
+ public:
+ virtual ~Factory();
+ /** \brief Allocate a CharReader via operator new().
+ * \throw std::exception if something goes wrong (e.g. invalid settings)
+ */
+ virtual StreamWriter* newStreamWriter() const = 0;
+ }; // Factory
+}; // StreamWriter
+
+/** \brief Write into stringstream, then return string, for convenience.
+ * A StreamWriter will be created from the factory, used, and then deleted.
+ */
+std::string JSON_API writeString(StreamWriter::Factory const& factory, Value const& root);
+
+
+/** \brief Build a StreamWriter implementation.
+
+Usage:
+\code
+ using namespace Json;
+ Value value = ...;
+ StreamWriterBuilder builder;
+ builder["commentStyle"] = "None";
+ builder["indentation"] = " "; // or whatever you like
+ std::unique_ptr<Json::StreamWriter> writer(
+ builder.newStreamWriter());
+ writer->write(value, &std::cout);
+ std::cout << std::endl; // add lf and flush
+\endcode
+*/
+class JSON_API StreamWriterBuilder : public StreamWriter::Factory {
+public:
+ // Note: We use a Json::Value so that we can add data-members to this class
+ // without a major version bump.
+ /** Configuration of this builder.
+ Available settings (case-sensitive):
+ - "commentStyle": "None" or "All"
+ - "indentation": "<anything>"
+ - "enableYAMLCompatibility": false or true
+ - slightly change the whitespace around colons
+ - "dropNullPlaceholders": false or true
+ - Drop the "null" string from the writer's output for nullValues.
+ Strictly speaking, this is not valid JSON. But when the output is being
+ fed to a browser's Javascript, it makes for smaller output and the
+ browser can handle the output just fine.
+
+ You can examine 'settings_` yourself
+ to see the defaults. You can also write and read them just like any
+ JSON Value.
+ \sa setDefaults()
+ */
+ Json::Value settings_;
+
+ StreamWriterBuilder();
+ virtual ~StreamWriterBuilder();
+
+ /**
+ * \throw std::exception if something goes wrong (e.g. invalid settings)
+ */
+ virtual StreamWriter* newStreamWriter() const;
+
+ /** \return true if 'settings' are legal and consistent;
+ * otherwise, indicate bad settings via 'invalid'.
+ */
+ bool validate(Json::Value* invalid) const;
+ /** A simple way to update a specific setting.
+ */
+ Value& operator[](std::string key);
+
+ /** Called by ctor, but you can use this to reset settings_.
+ * \pre 'settings' != NULL (but Json::null is fine)
+ * \remark Defaults:
+ * \snippet src/lib_json/json_writer.cpp StreamWriterBuilderDefaults
+ */
+ static void setDefaults(Json::Value* settings);
+};
+
/** \brief Abstract class for writers.
+ * \deprecated Use StreamWriter. (And really, this is an implementation detail.)
*/
class JSON_API Writer {
public:
@@ -39,8 +148,10 @@ public:
*consumption,
* but may be usefull to support feature such as RPC where bandwith is limited.
* \sa Reader, Value
+ * \deprecated Use StreamWriterBuilder.
*/
class JSON_API FastWriter : public Writer {
+
public:
FastWriter();
virtual ~FastWriter() {}
@@ -90,6 +201,7 @@ private:
*#CommentPlacement.
*
* \sa Reader, Value, Value::setComment()
+ * \deprecated Use StreamWriterBuilder.
*/
class JSON_API StyledWriter : public Writer {
public:
@@ -151,6 +263,7 @@ private:
*
* \param indentation Each level will be indented by this amount extra.
* \sa Reader, Value, Value::setComment()
+ * \deprecated Use StreamWriterBuilder.
*/
class JSON_API StyledStreamWriter {
public:
@@ -187,7 +300,8 @@ private:
std::string indentString_;
int rightMargin_;
std::string indentation_;
- bool addChildValues_;
+ bool addChildValues_ : 1;
+ bool indented_ : 1;
};
#if defined(JSON_HAS_INT64)
diff --git a/3rdparty/jsoncpp/makefiles/vs71/lib_json.vcproj b/3rdparty/jsoncpp/makefiles/vs71/lib_json.vcproj
index 1aa5978a1fe..24c5dd411f6 100644
--- a/3rdparty/jsoncpp/makefiles/vs71/lib_json.vcproj
+++ b/3rdparty/jsoncpp/makefiles/vs71/lib_json.vcproj
@@ -179,15 +179,6 @@
RelativePath="..\..\include\json\json.h">
</File>
<File
- RelativePath="..\..\src\lib_json\json_batchallocator.h">
- </File>
- <File
- RelativePath="..\..\src\lib_json\json_internalarray.inl">
- </File>
- <File
- RelativePath="..\..\src\lib_json\json_internalmap.inl">
- </File>
- <File
RelativePath="..\..\src\lib_json\json_reader.cpp">
</File>
<File
diff --git a/3rdparty/jsoncpp/makerelease.py b/3rdparty/jsoncpp/makerelease.py
index 90276d120bb..ea3722de449 100644
--- a/3rdparty/jsoncpp/makerelease.py
+++ b/3rdparty/jsoncpp/makerelease.py
@@ -1,3 +1,8 @@
+# Copyright 2010 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
"""Tag the sandbox for release, make source and doc tarballs.
Requires Python 2.6
@@ -14,6 +19,7 @@ python makerelease.py 0.5.0 0.6.0-dev
Note: This was for Subversion. Now that we are in GitHub, we do not
need to build versioned tarballs anymore, so makerelease.py is defunct.
"""
+
from __future__ import print_function
import os.path
import subprocess
@@ -34,57 +40,57 @@ SVN_TAG_ROOT = SVN_ROOT + 'tags/jsoncpp'
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
-def set_version( version ):
+def set_version(version):
with open('version','wb') as f:
- f.write( version.strip() )
+ f.write(version.strip())
-def rmdir_if_exist( dir_path ):
- if os.path.isdir( dir_path ):
- shutil.rmtree( dir_path )
+def rmdir_if_exist(dir_path):
+ if os.path.isdir(dir_path):
+ shutil.rmtree(dir_path)
class SVNError(Exception):
pass
-def svn_command( command, *args ):
+def svn_command(command, *args):
cmd = ['svn', '--non-interactive', command] + list(args)
- print('Running:', ' '.join( cmd ))
- process = subprocess.Popen( cmd,
+ print('Running:', ' '.join(cmd))
+ process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT )
+ stderr=subprocess.STDOUT)
stdout = process.communicate()[0]
if process.returncode:
- error = SVNError( 'SVN command failed:\n' + stdout )
+ error = SVNError('SVN command failed:\n' + stdout)
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
- stdout = svn_command( 'status', '--xml' )
- etree = ElementTree.fromstring( stdout )
+ stdout = svn_command('status', '--xml')
+ etree = ElementTree.fromstring(stdout)
msg = []
- for entry in etree.getiterator( 'entry' ):
+ for entry in etree.getiterator('entry'):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
- msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
+ msg.append('File "%s" has pending change (status="%s")' % (path, status))
if msg:
- msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
- return '\n'.join( msg )
+ msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!')
+ return '\n'.join(msg)
-def svn_join_url( base_url, suffix ):
+def svn_join_url(base_url, suffix):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
-def svn_check_if_tag_exist( tag_url ):
+def svn_check_if_tag_exist(tag_url):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
- list_stdout = svn_command( 'list', tag_url )
+ list_stdout = svn_command('list', tag_url)
except SVNError as e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
@@ -92,82 +98,82 @@ def svn_check_if_tag_exist( tag_url ):
return False
return True
-def svn_commit( message ):
+def svn_commit(message):
"""Commit the sandbox, providing the specified comment.
"""
- svn_command( 'ci', '-m', message )
+ svn_command('ci', '-m', message)
-def svn_tag_sandbox( tag_url, message ):
+def svn_tag_sandbox(tag_url, message):
"""Makes a tag based on the sandbox revisions.
"""
- svn_command( 'copy', '-m', message, '.', tag_url )
+ svn_command('copy', '-m', message, '.', tag_url)
-def svn_remove_tag( tag_url, message ):
+def svn_remove_tag(tag_url, message):
"""Removes an existing tag.
"""
- svn_command( 'delete', '-m', message, tag_url )
+ svn_command('delete', '-m', message, tag_url)
-def svn_export( tag_url, export_dir ):
+def svn_export(tag_url, export_dir):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
- rmdir_if_exist( export_dir )
- svn_command( 'export', tag_url, export_dir )
+ rmdir_if_exist(export_dir)
+ svn_command('export', tag_url, export_dir)
-def fix_sources_eol( dist_dir ):
+def fix_sources_eol(dist_dir):
"""Set file EOL for tarball distribution.
"""
print('Preparing exported source file EOL for distribution...')
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
- win_sources = antglob.glob( dist_dir,
+ win_sources = antglob.glob(dist_dir,
includes = '**/*.sln **/*.vcproj',
- prune_dirs = prune_dirs )
- unix_sources = antglob.glob( dist_dir,
+ prune_dirs = prune_dirs)
+ unix_sources = antglob.glob(dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
- prune_dirs = prune_dirs )
+ prune_dirs = prune_dirs)
for path in win_sources:
- fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
+ fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\r\n')
for path in unix_sources:
- fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
+ fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\n')
-def download( url, target_path ):
+def download(url, target_path):
"""Download file represented by url to target_path.
"""
- f = urllib2.urlopen( url )
+ f = urllib2.urlopen(url)
try:
data = f.read()
finally:
f.close()
- fout = open( target_path, 'wb' )
+ fout = open(target_path, 'wb')
try:
- fout.write( data )
+ fout.write(data)
finally:
fout.close()
-def check_compile( distcheck_top_dir, platform ):
+def check_compile(distcheck_top_dir, platform):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
- print('Running:', ' '.join( cmd ))
- log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
- flog = open( log_path, 'wb' )
+ print('Running:', ' '.join(cmd))
+ log_path = os.path.join(distcheck_top_dir, 'build-%s.log' % platform)
+ flog = open(log_path, 'wb')
try:
- process = subprocess.Popen( cmd,
+ process = subprocess.Popen(cmd,
stdout=flog,
stderr=subprocess.STDOUT,
- cwd=distcheck_top_dir )
+ cwd=distcheck_top_dir)
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
-def write_tempfile( content, **kwargs ):
- fd, path = tempfile.mkstemp( **kwargs )
- f = os.fdopen( fd, 'wt' )
+def write_tempfile(content, **kwargs):
+ fd, path = tempfile.mkstemp(**kwargs)
+ f = os.fdopen(fd, 'wt')
try:
- f.write( content )
+ f.write(content)
finally:
f.close()
return path
@@ -175,34 +181,34 @@ def write_tempfile( content, **kwargs ):
class SFTPError(Exception):
pass
-def run_sftp_batch( userhost, sftp, batch, retry=0 ):
- path = write_tempfile( batch, suffix='.sftp', text=True )
+def run_sftp_batch(userhost, sftp, batch, retry=0):
+ path = write_tempfile(batch, suffix='.sftp', text=True)
# psftp -agent -C blep,jsoncpp@web.sourceforge.net -batch -b batch.sftp -bc
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in range(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
- print(heading, ' '.join( cmd ))
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+ print(heading, ' '.join(cmd))
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = process.communicate()[0]
if process.returncode != 0:
- error = SFTPError( 'SFTP batch failed:\n' + stdout )
+ error = SFTPError('SFTP batch failed:\n' + stdout)
else:
break
if error:
raise error
return stdout
-def sourceforge_web_synchro( sourceforge_project, doc_dir,
- user=None, sftp='sftp' ):
+def sourceforge_web_synchro(sourceforge_project, doc_dir,
+ user=None, sftp='sftp'):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
- stdout = run_sftp_batch( userhost, sftp, """
+ stdout = run_sftp_batch(userhost, sftp, """
cd htdocs
dir
exit
-""" )
+""")
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
@@ -216,15 +222,15 @@ exit
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
- existing_paths.add( path[0] )
- upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
+ existing_paths.add(path[0])
+ upload_paths = set([os.path.basename(p) for p in antglob.glob(doc_dir)])
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print('Removing the following file from web:')
- print('\n'.join( paths_to_remove ))
- stdout = run_sftp_batch( userhost, sftp, """cd htdocs
+ print('\n'.join(paths_to_remove))
+ stdout = run_sftp_batch(userhost, sftp, """cd htdocs
rm %s
-exit""" % ' '.join(paths_to_remove) )
+exit""" % ' '.join(paths_to_remove))
print('Uploading %d files:' % len(upload_paths))
batch_size = 10
upload_paths = list(upload_paths)
@@ -235,17 +241,17 @@ exit""" % ' '.join(paths_to_remove) )
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print('%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec))
- run_sftp_batch( userhost, sftp, """cd htdocs
+ run_sftp_batch(userhost, sftp, """cd htdocs
lcd %s
mput %s
-exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
+exit""" % (doc_dir, ' '.join(paths)), retry=3)
-def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
+def sourceforge_release_tarball(sourceforge_project, paths, user=None, sftp='sftp'):
userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
- run_sftp_batch( userhost, sftp, """
+ run_sftp_batch(userhost, sftp, """
mput %s
exit
-""" % (' '.join(paths),) )
+""" % (' '.join(paths),))
def main():
@@ -286,12 +292,12 @@ Warning: --force should only be used when developping/testing the release script
options, args = parser.parse_args()
if len(args) != 2:
- parser.error( 'release_version missing on command-line.' )
+ parser.error('release_version missing on command-line.')
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
- parser.error( 'You must specify either --platform or --no-test option.' )
+ parser.error('You must specify either --platform or --no-test option.')
if options.ignore_pending_commit:
msg = ''
@@ -299,86 +305,86 @@ Warning: --force should only be used when developping/testing the release script
msg = check_no_pending_commit()
if not msg:
print('Setting version to', release_version)
- set_version( release_version )
- svn_commit( 'Release ' + release_version )
- tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
- if svn_check_if_tag_exist( tag_url ):
+ set_version(release_version)
+ svn_commit('Release ' + release_version)
+ tag_url = svn_join_url(SVN_TAG_ROOT, release_version)
+ if svn_check_if_tag_exist(tag_url):
if options.retag_release:
- svn_remove_tag( tag_url, 'Overwriting previous tag' )
+ svn_remove_tag(tag_url, 'Overwriting previous tag')
else:
print('Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url)
- sys.exit( 1 )
- svn_tag_sandbox( tag_url, 'Release ' + release_version )
+ sys.exit(1)
+ svn_tag_sandbox(tag_url, 'Release ' + release_version)
print('Generated doxygen document...')
## doc_dirname = r'jsoncpp-api-html-0.5.0'
## doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
- doc_tarball_path, doc_dirname = doxybuild.build_doc( options, make_release=True )
+ doc_tarball_path, doc_dirname = doxybuild.build_doc(options, make_release=True)
doc_distcheck_dir = 'dist/doccheck'
- tarball.decompress( doc_tarball_path, doc_distcheck_dir )
- doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
+ tarball.decompress(doc_tarball_path, doc_distcheck_dir)
+ doc_distcheck_top_dir = os.path.join(doc_distcheck_dir, doc_dirname)
export_dir = 'dist/export'
- svn_export( tag_url, export_dir )
- fix_sources_eol( export_dir )
+ svn_export(tag_url, export_dir)
+ fix_sources_eol(export_dir)
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print('Generating source tarball to', source_tarball_path)
- tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
+ tarball.make_tarball(source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir)
amalgamation_tarball_path = 'dist/%s-amalgamation.tar.gz' % source_dir
print('Generating amalgamation source tarball to', amalgamation_tarball_path)
amalgamation_dir = 'dist/amalgamation'
- amalgamate.amalgamate_source( export_dir, '%s/jsoncpp.cpp' % amalgamation_dir, 'json/json.h' )
+ amalgamate.amalgamate_source(export_dir, '%s/jsoncpp.cpp' % amalgamation_dir, 'json/json.h')
amalgamation_source_dir = 'jsoncpp-src-amalgamation' + release_version
- tarball.make_tarball( amalgamation_tarball_path, [amalgamation_dir],
- amalgamation_dir, prefix_dir=amalgamation_source_dir )
+ tarball.make_tarball(amalgamation_tarball_path, [amalgamation_dir],
+ amalgamation_dir, prefix_dir=amalgamation_source_dir)
# Decompress source tarball, download and install scons-local
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print('Decompressing source tarball to', distcheck_dir)
- rmdir_if_exist( distcheck_dir )
- tarball.decompress( source_tarball_path, distcheck_dir )
+ rmdir_if_exist(distcheck_dir)
+ tarball.decompress(source_tarball_path, distcheck_dir)
scons_local_path = 'dist/scons-local.tar.gz'
print('Downloading scons-local to', scons_local_path)
- download( SCONS_LOCAL_URL, scons_local_path )
+ download(SCONS_LOCAL_URL, scons_local_path)
print('Decompressing scons-local to', distcheck_top_dir)
- tarball.decompress( scons_local_path, distcheck_top_dir )
+ tarball.decompress(scons_local_path, distcheck_top_dir)
# Run compilation
print('Compiling decompressed tarball')
all_build_status = True
for platform in options.platforms.split(','):
print('Testing platform:', platform)
- build_status, log_path = check_compile( distcheck_top_dir, platform )
+ build_status, log_path = check_compile(distcheck_top_dir, platform)
print('see build log:', log_path)
print(build_status and '=> ok' or '=> FAILED')
all_build_status = all_build_status and build_status
if not build_status:
print('Testing failed on at least one platform, aborting...')
- svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
+ svn_remove_tag(tag_url, 'Removing tag due to failed testing')
sys.exit(1)
if options.user:
if not options.no_web:
print('Uploading documentation using user', options.user)
- sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
+ sourceforge_web_synchro(SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp)
print('Completed documentation upload')
print('Uploading source and documentation tarballs for release using user', options.user)
- sourceforge_release_tarball( SOURCEFORGE_PROJECT,
+ sourceforge_release_tarball(SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
- user=options.user, sftp=options.sftp )
+ user=options.user, sftp=options.sftp)
print('Source and doc release tarballs uploaded')
else:
print('No upload user specified. Web site and download tarbal were not uploaded.')
print('Tarball can be found at:', doc_tarball_path)
# Set next version number and commit
- set_version( next_version )
- svn_commit( 'Released ' + release_version )
+ set_version(next_version)
+ svn_commit('Released ' + release_version)
else:
- sys.stderr.write( msg + '\n' )
+ sys.stderr.write(msg + '\n')
if __name__ == '__main__':
main()
diff --git a/3rdparty/jsoncpp/pkg-config/jsoncpp.pc.in b/3rdparty/jsoncpp/pkg-config/jsoncpp.pc.in
index 9613181b2d1..3ca4a84d027 100644
--- a/3rdparty/jsoncpp/pkg-config/jsoncpp.pc.in
+++ b/3rdparty/jsoncpp/pkg-config/jsoncpp.pc.in
@@ -1,7 +1,7 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
-libdir=${exec_prefix}/@LIBRARY_INSTALL_DIR@
-includedir=${prefix}/@INCLUDE_INSTALL_DIR@
+libdir=@LIBRARY_INSTALL_DIR@
+includedir=@INCLUDE_INSTALL_DIR@
Name: jsoncpp
Description: A C++ library for interacting with JSON
diff --git a/3rdparty/jsoncpp/scons-tools/globtool.py b/3rdparty/jsoncpp/scons-tools/globtool.py
index 811140e8aab..890f1b7b1f4 100644
--- a/3rdparty/jsoncpp/scons-tools/globtool.py
+++ b/3rdparty/jsoncpp/scons-tools/globtool.py
@@ -1,9 +1,14 @@
+# Copyright 2009 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
import fnmatch
import os
-def generate( env ):
- def Glob( env, includes = None, excludes = None, dir = '.' ):
- """Adds Glob( includes = Split( '*' ), excludes = None, dir = '.')
+def generate(env):
+ def Glob(env, includes = None, excludes = None, dir = '.'):
+ """Adds Glob(includes = Split('*'), excludes = None, dir = '.')
helper function to environment.
Glob both the file-system files.
@@ -12,36 +17,36 @@ def generate( env ):
excludes: list of file name pattern exluced from the return list.
Example:
- sources = env.Glob( ("*.cpp", '*.h'), "~*.cpp", "#src" )
+ sources = env.Glob(("*.cpp", '*.h'), "~*.cpp", "#src")
"""
def filterFilename(path):
- abs_path = os.path.join( dir, path )
+ abs_path = os.path.join(dir, path)
if not os.path.isfile(abs_path):
return 0
fn = os.path.basename(path)
match = 0
for include in includes:
- if fnmatch.fnmatchcase( fn, include ):
+ if fnmatch.fnmatchcase(fn, include):
match = 1
break
if match == 1 and not excludes is None:
for exclude in excludes:
- if fnmatch.fnmatchcase( fn, exclude ):
+ if fnmatch.fnmatchcase(fn, exclude):
match = 0
break
return match
if includes is None:
includes = ('*',)
- elif type(includes) in ( type(''), type(u'') ):
+ elif type(includes) in (type(''), type(u'')):
includes = (includes,)
- if type(excludes) in ( type(''), type(u'') ):
+ if type(excludes) in (type(''), type(u'')):
excludes = (excludes,)
dir = env.Dir(dir).abspath
- paths = os.listdir( dir )
- def makeAbsFileNode( path ):
- return env.File( os.path.join( dir, path ) )
- nodes = filter( filterFilename, paths )
- return map( makeAbsFileNode, nodes )
+ paths = os.listdir(dir)
+ def makeAbsFileNode(path):
+ return env.File(os.path.join(dir, path))
+ nodes = filter(filterFilename, paths)
+ return map(makeAbsFileNode, nodes)
from SCons.Script import Environment
Environment.Glob = Glob
diff --git a/3rdparty/jsoncpp/scons-tools/srcdist.py b/3rdparty/jsoncpp/scons-tools/srcdist.py
index 864ff408158..fe1d74695a2 100644
--- a/3rdparty/jsoncpp/scons-tools/srcdist.py
+++ b/3rdparty/jsoncpp/scons-tools/srcdist.py
@@ -1,3 +1,8 @@
+# Copyright 2007 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
import os
import os.path
from fnmatch import fnmatch
@@ -47,7 +52,7 @@ import targz
## elif token == "=":
## data[key] = list()
## else:
-## append_data( data, key, new_data, token )
+## append_data(data, key, new_data, token)
## new_data = True
##
## last_token = token
@@ -55,7 +60,7 @@ import targz
##
## if last_token == '\\' and token != '\n':
## new_data = False
-## append_data( data, key, new_data, '\\' )
+## append_data(data, key, new_data, '\\')
##
## # compress lists of len 1 into single strings
## for (k, v) in data.items():
@@ -116,7 +121,7 @@ import targz
## else:
## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern])))
-## sources = map( lambda path: env.File(path), sources )
+## sources = map(lambda path: env.File(path), sources)
## return sources
##
##
@@ -143,7 +148,7 @@ def srcDistEmitter(source, target, env):
## # add our output locations
## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES":
-## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
+## targets.append(env.Dir(os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))))
##
## # don't clobber targets
## for node in targets:
@@ -161,14 +166,13 @@ def generate(env):
Add builders and construction variables for the
SrcDist tool.
"""
-## doxyfile_scanner = env.Scanner(
-## DoxySourceScan,
+## doxyfile_scanner = env.Scanner(## DoxySourceScan,
## "DoxySourceScan",
## scan_check = DoxySourceScanCheck,
-## )
+##)
if targz.exists(env):
- srcdist_builder = targz.makeBuilder( srcDistEmitter )
+ srcdist_builder = targz.makeBuilder(srcDistEmitter)
env['BUILDERS']['SrcDist'] = srcdist_builder
diff --git a/3rdparty/jsoncpp/scons-tools/substinfile.py b/3rdparty/jsoncpp/scons-tools/substinfile.py
index ef18b4edbcb..c620442d572 100644
--- a/3rdparty/jsoncpp/scons-tools/substinfile.py
+++ b/3rdparty/jsoncpp/scons-tools/substinfile.py
@@ -1,3 +1,8 @@
+# Copyright 2010 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
import re
from SCons.Script import * # the usual scons stuff you get in a SConscript
import collections
@@ -70,7 +75,7 @@ def generate(env):
return target, source
## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!?
- subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string )
+ subst_action = SCons.Action.Action(subst_in_file, subst_in_file_string)
env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
def exists(env):
diff --git a/3rdparty/jsoncpp/scons-tools/targz.py b/3rdparty/jsoncpp/scons-tools/targz.py
index f5432003df7..8e5ba83003b 100644
--- a/3rdparty/jsoncpp/scons-tools/targz.py
+++ b/3rdparty/jsoncpp/scons-tools/targz.py
@@ -1,3 +1,8 @@
+# Copyright 2007 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
"""tarball
Tool-specific initialization for tarball.
@@ -27,9 +32,9 @@ TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
if internal_targz:
def targz(target, source, env):
- def archive_name( path ):
- path = os.path.normpath( os.path.abspath( path ) )
- common_path = os.path.commonprefix( (base_dir, path) )
+ def archive_name(path):
+ path = os.path.normpath(os.path.abspath(path))
+ common_path = os.path.commonprefix((base_dir, path))
archive_name = path[len(common_path):]
return archive_name
@@ -37,23 +42,23 @@ if internal_targz:
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
- tar.add(path, archive_name(path) )
+ tar.add(path, archive_name(path))
compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
- base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
+ base_dir = os.path.normpath(env.get('TARGZ_BASEDIR', env.Dir('.')).abspath)
target_path = str(target[0])
- fileobj = gzip.GzipFile( target_path, 'wb', compression )
+ fileobj = gzip.GzipFile(target_path, 'wb', compression)
tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj)
for source in source:
source_path = str(source)
if source.isdir():
os.path.walk(source_path, visit, tar)
else:
- tar.add(source_path, archive_name(source_path) ) # filename, arcname
+ tar.add(source_path, archive_name(source_path)) # filename, arcname
tar.close()
targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR'])
- def makeBuilder( emitter = None ):
+ def makeBuilder(emitter = None):
return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
diff --git a/3rdparty/jsoncpp/src/jsontestrunner/CMakeLists.txt b/3rdparty/jsoncpp/src/jsontestrunner/CMakeLists.txt
index dd8e2175f0f..820a7cd3524 100644
--- a/3rdparty/jsoncpp/src/jsontestrunner/CMakeLists.txt
+++ b/3rdparty/jsoncpp/src/jsontestrunner/CMakeLists.txt
@@ -1,20 +1,23 @@
-FIND_PACKAGE(PythonInterp 2.6 REQUIRED)
-
-IF(JSONCPP_LIB_BUILD_SHARED)
- ADD_DEFINITIONS( -DJSON_DLL )
-ENDIF(JSONCPP_LIB_BUILD_SHARED)
+FIND_PACKAGE(PythonInterp 2.6)
ADD_EXECUTABLE(jsontestrunner_exe
main.cpp
)
-TARGET_LINK_LIBRARIES(jsontestrunner_exe jsoncpp_lib)
+
+IF(BUILD_SHARED_LIBS)
+ ADD_DEFINITIONS( -DJSON_DLL )
+ TARGET_LINK_LIBRARIES(jsontestrunner_exe jsoncpp_lib)
+ELSE(BUILD_SHARED_LIBS)
+ TARGET_LINK_LIBRARIES(jsontestrunner_exe jsoncpp_lib_static)
+ENDIF(BUILD_SHARED_LIBS)
+
SET_TARGET_PROPERTIES(jsontestrunner_exe PROPERTIES OUTPUT_NAME jsontestrunner_exe)
IF(PYTHONINTERP_FOUND)
# Run end to end parser/writer tests
SET(TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../test)
SET(RUNJSONTESTS_PATH ${TEST_DIR}/runjsontests.py)
- ADD_CUSTOM_TARGET(jsoncpp_readerwriter_tests ALL
+ ADD_CUSTOM_TARGET(jsoncpp_readerwriter_tests
"${PYTHON_EXECUTABLE}" -B "${RUNJSONTESTS_PATH}" $<TARGET_FILE:jsontestrunner_exe> "${TEST_DIR}/data"
DEPENDS jsontestrunner_exe jsoncpp_test
)
diff --git a/3rdparty/jsoncpp/src/jsontestrunner/main.cpp b/3rdparty/jsoncpp/src/jsontestrunner/main.cpp
index ba985877db4..1ec1fb6fbcd 100644
--- a/3rdparty/jsoncpp/src/jsontestrunner/main.cpp
+++ b/3rdparty/jsoncpp/src/jsontestrunner/main.cpp
@@ -8,12 +8,22 @@
#include <json/json.h>
#include <algorithm> // sort
+#include <sstream>
#include <stdio.h>
#if defined(_MSC_VER) && _MSC_VER >= 1310
#pragma warning(disable : 4996) // disable fopen deprecation warning
#endif
+struct Options
+{
+ std::string path;
+ Json::Features features;
+ bool parseOnly;
+ typedef std::string (*writeFuncType)(Json::Value const&);
+ writeFuncType write;
+};
+
static std::string normalizeFloatingPointStr(double value) {
char buffer[32];
#if defined(_MSC_VER) && defined(__STDC_SECURE_LIB__)
@@ -129,43 +139,67 @@ printValueTree(FILE* fout, Json::Value& value, const std::string& path = ".") {
static int parseAndSaveValueTree(const std::string& input,
const std::string& actual,
const std::string& kind,
- Json::Value& root,
const Json::Features& features,
- bool parseOnly) {
+ bool parseOnly,
+ Json::Value* root)
+{
Json::Reader reader(features);
- bool parsingSuccessful = reader.parse(input, root);
+ bool parsingSuccessful = reader.parse(input, *root);
if (!parsingSuccessful) {
printf("Failed to parse %s file: \n%s\n",
kind.c_str(),
reader.getFormattedErrorMessages().c_str());
return 1;
}
-
if (!parseOnly) {
FILE* factual = fopen(actual.c_str(), "wt");
if (!factual) {
printf("Failed to create %s actual file.\n", kind.c_str());
return 2;
}
- printValueTree(factual, root);
+ printValueTree(factual, *root);
fclose(factual);
}
return 0;
}
-
-static int rewriteValueTree(const std::string& rewritePath,
- const Json::Value& root,
- std::string& rewrite) {
- // Json::FastWriter writer;
- // writer.enableYAMLCompatibility();
+// static std::string useFastWriter(Json::Value const& root) {
+// Json::FastWriter writer;
+// writer.enableYAMLCompatibility();
+// return writer.write(root);
+// }
+static std::string useStyledWriter(
+ Json::Value const& root)
+{
Json::StyledWriter writer;
- rewrite = writer.write(root);
+ return writer.write(root);
+}
+static std::string useStyledStreamWriter(
+ Json::Value const& root)
+{
+ Json::StyledStreamWriter writer;
+ std::ostringstream sout;
+ writer.write(sout, root);
+ return sout.str();
+}
+static std::string useBuiltStyledStreamWriter(
+ Json::Value const& root)
+{
+ Json::StreamWriterBuilder builder;
+ return Json::writeString(builder, root);
+}
+static int rewriteValueTree(
+ const std::string& rewritePath,
+ const Json::Value& root,
+ Options::writeFuncType write,
+ std::string* rewrite)
+{
+ *rewrite = write(root);
FILE* fout = fopen(rewritePath.c_str(), "wt");
if (!fout) {
printf("Failed to create rewrite file: %s\n", rewritePath.c_str());
return 2;
}
- fprintf(fout, "%s\n", rewrite.c_str());
+ fprintf(fout, "%s\n", rewrite->c_str());
fclose(fout);
return 0;
}
@@ -194,84 +228,98 @@ static int printUsage(const char* argv[]) {
return 3;
}
-int parseCommandLine(int argc,
- const char* argv[],
- Json::Features& features,
- std::string& path,
- bool& parseOnly) {
- parseOnly = false;
+static int parseCommandLine(
+ int argc, const char* argv[], Options* opts)
+{
+ opts->parseOnly = false;
+ opts->write = &useStyledWriter;
if (argc < 2) {
return printUsage(argv);
}
-
int index = 1;
- if (std::string(argv[1]) == "--json-checker") {
- features = Json::Features::strictMode();
- parseOnly = true;
+ if (std::string(argv[index]) == "--json-checker") {
+ opts->features = Json::Features::strictMode();
+ opts->parseOnly = true;
++index;
}
-
- if (std::string(argv[1]) == "--json-config") {
+ if (std::string(argv[index]) == "--json-config") {
printConfig();
return 3;
}
-
+ if (std::string(argv[index]) == "--json-writer") {
+ ++index;
+ std::string const writerName(argv[index++]);
+ if (writerName == "StyledWriter") {
+ opts->write = &useStyledWriter;
+ } else if (writerName == "StyledStreamWriter") {
+ opts->write = &useStyledStreamWriter;
+ } else if (writerName == "BuiltStyledStreamWriter") {
+ opts->write = &useBuiltStyledStreamWriter;
+ } else {
+ printf("Unknown '--json-writer %s'\n", writerName.c_str());
+ return 4;
+ }
+ }
if (index == argc || index + 1 < argc) {
return printUsage(argv);
}
-
- path = argv[index];
+ opts->path = argv[index];
return 0;
}
+static int runTest(Options const& opts)
+{
+ int exitCode = 0;
+
+ std::string input = readInputTestFile(opts.path.c_str());
+ if (input.empty()) {
+ printf("Failed to read input or empty input: %s\n", opts.path.c_str());
+ return 3;
+ }
+
+ std::string basePath = removeSuffix(opts.path, ".json");
+ if (!opts.parseOnly && basePath.empty()) {
+ printf("Bad input path. Path does not end with '.expected':\n%s\n",
+ opts.path.c_str());
+ return 3;
+ }
+ std::string const actualPath = basePath + ".actual";
+ std::string const rewritePath = basePath + ".rewrite";
+ std::string const rewriteActualPath = basePath + ".actual-rewrite";
+
+ Json::Value root;
+ exitCode = parseAndSaveValueTree(
+ input, actualPath, "input",
+ opts.features, opts.parseOnly, &root);
+ if (exitCode || opts.parseOnly) {
+ return exitCode;
+ }
+ std::string rewrite;
+ exitCode = rewriteValueTree(rewritePath, root, opts.write, &rewrite);
+ if (exitCode) {
+ return exitCode;
+ }
+ Json::Value rewriteRoot;
+ exitCode = parseAndSaveValueTree(
+ rewrite, rewriteActualPath, "rewrite",
+ opts.features, opts.parseOnly, &rewriteRoot);
+ if (exitCode) {
+ return exitCode;
+ }
+ return 0;
+}
int main(int argc, const char* argv[]) {
- std::string path;
- Json::Features features;
- bool parseOnly;
- int exitCode = parseCommandLine(argc, argv, features, path, parseOnly);
+ Options opts;
+ int exitCode = parseCommandLine(argc, argv, &opts);
if (exitCode != 0) {
+ printf("Failed to parse command-line.");
return exitCode;
}
-
try {
- std::string input = readInputTestFile(path.c_str());
- if (input.empty()) {
- printf("Failed to read input or empty input: %s\n", path.c_str());
- return 3;
- }
-
- std::string basePath = removeSuffix(argv[1], ".json");
- if (!parseOnly && basePath.empty()) {
- printf("Bad input path. Path does not end with '.expected':\n%s\n",
- path.c_str());
- return 3;
- }
-
- std::string actualPath = basePath + ".actual";
- std::string rewritePath = basePath + ".rewrite";
- std::string rewriteActualPath = basePath + ".actual-rewrite";
-
- Json::Value root;
- exitCode = parseAndSaveValueTree(
- input, actualPath, "input", root, features, parseOnly);
- if (exitCode == 0 && !parseOnly) {
- std::string rewrite;
- exitCode = rewriteValueTree(rewritePath, root, rewrite);
- if (exitCode == 0) {
- Json::Value rewriteRoot;
- exitCode = parseAndSaveValueTree(rewrite,
- rewriteActualPath,
- "rewrite",
- rewriteRoot,
- features,
- parseOnly);
- }
- }
+ return runTest(opts);
}
catch (const std::exception& e) {
printf("Unhandled exception:\n%s\n", e.what());
- exitCode = 1;
+ return 1;
}
-
- return exitCode;
}
diff --git a/3rdparty/jsoncpp/src/lib_json/CMakeLists.txt b/3rdparty/jsoncpp/src/lib_json/CMakeLists.txt
index d0f6a5ea5ab..79ffa2ec5fe 100644
--- a/3rdparty/jsoncpp/src/lib_json/CMakeLists.txt
+++ b/3rdparty/jsoncpp/src/lib_json/CMakeLists.txt
@@ -1,20 +1,8 @@
-OPTION(JSONCPP_LIB_BUILD_SHARED "Build jsoncpp_lib as a shared library." OFF)
-IF(BUILD_SHARED_LIBS)
- SET(JSONCPP_LIB_BUILD_SHARED ON)
-ENDIF(BUILD_SHARED_LIBS)
-
-IF(JSONCPP_LIB_BUILD_SHARED)
- SET(JSONCPP_LIB_TYPE SHARED)
- ADD_DEFINITIONS( -DJSON_DLL_BUILD )
-ELSE(JSONCPP_LIB_BUILD_SHARED)
- SET(JSONCPP_LIB_TYPE STATIC)
-ENDIF(JSONCPP_LIB_BUILD_SHARED)
-
if( CMAKE_COMPILER_IS_GNUCXX )
#Get compiler version.
execute_process( COMMAND ${CMAKE_CXX_COMPILER} -dumpversion
OUTPUT_VARIABLE GNUCXX_VERSION )
-
+
#-Werror=* was introduced -after- GCC 4.1.2
if( GNUCXX_VERSION VERSION_GREATER 4.1.2 )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror=strict-aliasing")
@@ -36,25 +24,13 @@ SET( PUBLIC_HEADERS
SOURCE_GROUP( "Public API" FILES ${PUBLIC_HEADERS} )
-ADD_LIBRARY( jsoncpp_lib ${JSONCPP_LIB_TYPE}
- ${PUBLIC_HEADERS}
- json_tool.h
- json_reader.cpp
- json_batchallocator.h
- json_valueiterator.inl
- json_value.cpp
- json_writer.cpp
- version.h.in
- )
-SET_TARGET_PROPERTIES( jsoncpp_lib PROPERTIES OUTPUT_NAME jsoncpp )
-SET_TARGET_PROPERTIES( jsoncpp_lib PROPERTIES VERSION ${JSONCPP_VERSION} SOVERSION ${JSONCPP_VERSION_MAJOR} )
-
-IF(NOT CMAKE_VERSION VERSION_LESS 2.8.11)
- TARGET_INCLUDE_DIRECTORIES( jsoncpp_lib PUBLIC
- $<INSTALL_INTERFACE:${INCLUDE_INSTALL_DIR}>
- $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/${JSONCPP_INCLUDE_DIR}>
- )
-ENDIF(NOT CMAKE_VERSION VERSION_LESS 2.8.11)
+SET(jsoncpp_sources
+ json_tool.h
+ json_reader.cpp
+ json_valueiterator.inl
+ json_value.cpp
+ json_writer.cpp
+ version.h.in)
# Install instructions for this target
IF(JSONCPP_WITH_CMAKE_PACKAGE)
@@ -63,8 +39,42 @@ ELSE(JSONCPP_WITH_CMAKE_PACKAGE)
SET(INSTALL_EXPORT)
ENDIF(JSONCPP_WITH_CMAKE_PACKAGE)
-INSTALL( TARGETS jsoncpp_lib ${INSTALL_EXPORT}
+IF(BUILD_SHARED_LIBS)
+ ADD_DEFINITIONS( -DJSON_DLL_BUILD )
+ ADD_LIBRARY(jsoncpp_lib SHARED ${PUBLIC_HEADERS} ${jsoncpp_sources})
+ SET_TARGET_PROPERTIES( jsoncpp_lib PROPERTIES VERSION ${JSONCPP_VERSION} SOVERSION ${JSONCPP_VERSION_MAJOR})
+ SET_TARGET_PROPERTIES( jsoncpp_lib PROPERTIES OUTPUT_NAME jsoncpp
+ DEBUG_OUTPUT_NAME jsoncpp${DEBUG_LIBNAME_SUFFIX} )
+
+ INSTALL( TARGETS jsoncpp_lib ${INSTALL_EXPORT}
RUNTIME DESTINATION ${RUNTIME_INSTALL_DIR}
LIBRARY DESTINATION ${LIBRARY_INSTALL_DIR}
- ARCHIVE DESTINATION ${ARCHIVE_INSTALL_DIR}
-)
+ ARCHIVE DESTINATION ${ARCHIVE_INSTALL_DIR})
+
+ IF(NOT CMAKE_VERSION VERSION_LESS 2.8.11)
+ TARGET_INCLUDE_DIRECTORIES( jsoncpp_lib PUBLIC
+ $<INSTALL_INTERFACE:${INCLUDE_INSTALL_DIR}>
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/${JSONCPP_INCLUDE_DIR}>)
+ ENDIF(NOT CMAKE_VERSION VERSION_LESS 2.8.11)
+
+ENDIF()
+
+IF(BUILD_STATIC_LIBS)
+ ADD_LIBRARY(jsoncpp_lib_static STATIC ${PUBLIC_HEADERS} ${jsoncpp_sources})
+ SET_TARGET_PROPERTIES( jsoncpp_lib_static PROPERTIES VERSION ${JSONCPP_VERSION} SOVERSION ${JSONCPP_VERSION_MAJOR})
+ SET_TARGET_PROPERTIES( jsoncpp_lib_static PROPERTIES OUTPUT_NAME jsoncpp
+ DEBUG_OUTPUT_NAME jsoncpp${DEBUG_LIBNAME_SUFFIX} )
+
+ INSTALL( TARGETS jsoncpp_lib_static ${INSTALL_EXPORT}
+ RUNTIME DESTINATION ${RUNTIME_INSTALL_DIR}
+ LIBRARY DESTINATION ${LIBRARY_INSTALL_DIR}
+ ARCHIVE DESTINATION ${ARCHIVE_INSTALL_DIR})
+
+ IF(NOT CMAKE_VERSION VERSION_LESS 2.8.11)
+ TARGET_INCLUDE_DIRECTORIES( jsoncpp_lib_static PUBLIC
+ $<INSTALL_INTERFACE:${INCLUDE_INSTALL_DIR}>
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/${JSONCPP_INCLUDE_DIR}>
+ )
+ ENDIF(NOT CMAKE_VERSION VERSION_LESS 2.8.11)
+
+ENDIF()
diff --git a/3rdparty/jsoncpp/src/lib_json/json_batchallocator.h b/3rdparty/jsoncpp/src/lib_json/json_batchallocator.h
deleted file mode 100644
index 2fbef7a8601..00000000000
--- a/3rdparty/jsoncpp/src/lib_json/json_batchallocator.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2007-2010 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-#ifndef JSONCPP_BATCHALLOCATOR_H_INCLUDED
-#define JSONCPP_BATCHALLOCATOR_H_INCLUDED
-
-#include <stdlib.h>
-#include <assert.h>
-
-#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-
-namespace Json {
-
-/* Fast memory allocator.
- *
- * This memory allocator allocates memory for a batch of object (specified by
- * the page size, the number of object in each page).
- *
- * It does not allow the destruction of a single object. All the allocated
- * objects can be destroyed at once. The memory can be either released or reused
- * for future allocation.
- *
- * The in-place new operator must be used to construct the object using the
- * pointer returned by allocate.
- */
-template <typename AllocatedType, const unsigned int objectPerAllocation>
-class BatchAllocator {
-public:
- BatchAllocator(unsigned int objectsPerPage = 255)
- : freeHead_(0), objectsPerPage_(objectsPerPage) {
- // printf( "Size: %d => %s\n", sizeof(AllocatedType),
- // typeid(AllocatedType).name() );
- assert(sizeof(AllocatedType) * objectPerAllocation >=
- sizeof(AllocatedType*)); // We must be able to store a slist in the
- // object free space.
- assert(objectsPerPage >= 16);
- batches_ = allocateBatch(0); // allocated a dummy page
- currentBatch_ = batches_;
- }
-
- ~BatchAllocator() {
- for (BatchInfo* batch = batches_; batch;) {
- BatchInfo* nextBatch = batch->next_;
- free(batch);
- batch = nextBatch;
- }
- }
-
- /// allocate space for an array of objectPerAllocation object.
- /// @warning it is the responsability of the caller to call objects
- /// constructors.
- AllocatedType* allocate() {
- if (freeHead_) // returns node from free list.
- {
- AllocatedType* object = freeHead_;
- freeHead_ = *(AllocatedType**)object;
- return object;
- }
- if (currentBatch_->used_ == currentBatch_->end_) {
- currentBatch_ = currentBatch_->next_;
- while (currentBatch_ && currentBatch_->used_ == currentBatch_->end_)
- currentBatch_ = currentBatch_->next_;
-
- if (!currentBatch_) // no free batch found, allocate a new one
- {
- currentBatch_ = allocateBatch(objectsPerPage_);
- currentBatch_->next_ = batches_; // insert at the head of the list
- batches_ = currentBatch_;
- }
- }
- AllocatedType* allocated = currentBatch_->used_;
- currentBatch_->used_ += objectPerAllocation;
- return allocated;
- }
-
- /// Release the object.
- /// @warning it is the responsability of the caller to actually destruct the
- /// object.
- void release(AllocatedType* object) {
- assert(object != 0);
- *(AllocatedType**)object = freeHead_;
- freeHead_ = object;
- }
-
-private:
- struct BatchInfo {
- BatchInfo* next_;
- AllocatedType* used_;
- AllocatedType* end_;
- AllocatedType buffer_[objectPerAllocation];
- };
-
- // disabled copy constructor and assignement operator.
- BatchAllocator(const BatchAllocator&);
- void operator=(const BatchAllocator&);
-
- static BatchInfo* allocateBatch(unsigned int objectsPerPage) {
- const unsigned int mallocSize =
- sizeof(BatchInfo) - sizeof(AllocatedType) * objectPerAllocation +
- sizeof(AllocatedType) * objectPerAllocation * objectsPerPage;
- BatchInfo* batch = static_cast<BatchInfo*>(malloc(mallocSize));
- batch->next_ = 0;
- batch->used_ = batch->buffer_;
- batch->end_ = batch->buffer_ + objectsPerPage;
- return batch;
- }
-
- BatchInfo* batches_;
- BatchInfo* currentBatch_;
- /// Head of a single linked list within the allocated space of freeed object
- AllocatedType* freeHead_;
- unsigned int objectsPerPage_;
-};
-
-} // namespace Json
-
-#endif // ifndef JSONCPP_DOC_INCLUDE_IMPLEMENTATION
-
-#endif // JSONCPP_BATCHALLOCATOR_H_INCLUDED
diff --git a/3rdparty/jsoncpp/src/lib_json/json_internalarray.inl b/3rdparty/jsoncpp/src/lib_json/json_internalarray.inl
deleted file mode 100644
index 9ee15e9dbf2..00000000000
--- a/3rdparty/jsoncpp/src/lib_json/json_internalarray.inl
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2007-2010 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-// included by json_value.cpp
-
-namespace Json {
-
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// class ValueInternalArray
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-
-ValueArrayAllocator::~ValueArrayAllocator() {}
-
-// //////////////////////////////////////////////////////////////////
-// class DefaultValueArrayAllocator
-// //////////////////////////////////////////////////////////////////
-#ifdef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-class DefaultValueArrayAllocator : public ValueArrayAllocator {
-public: // overridden from ValueArrayAllocator
- virtual ~DefaultValueArrayAllocator() {}
-
- virtual ValueInternalArray* newArray() { return new ValueInternalArray(); }
-
- virtual ValueInternalArray* newArrayCopy(const ValueInternalArray& other) {
- return new ValueInternalArray(other);
- }
-
- virtual void destructArray(ValueInternalArray* array) { delete array; }
-
- virtual void
- reallocateArrayPageIndex(Value**& indexes,
- ValueInternalArray::PageIndex& indexCount,
- ValueInternalArray::PageIndex minNewIndexCount) {
- ValueInternalArray::PageIndex newIndexCount = (indexCount * 3) / 2 + 1;
- if (minNewIndexCount > newIndexCount)
- newIndexCount = minNewIndexCount;
- void* newIndexes = realloc(indexes, sizeof(Value*) * newIndexCount);
- JSON_ASSERT_MESSAGE(newIndexes, "Couldn't realloc.");
- indexCount = newIndexCount;
- indexes = static_cast<Value**>(newIndexes);
- }
- virtual void releaseArrayPageIndex(Value** indexes,
- ValueInternalArray::PageIndex indexCount) {
- if (indexes)
- free(indexes);
- }
-
- virtual Value* allocateArrayPage() {
- return static_cast<Value*>(
- malloc(sizeof(Value) * ValueInternalArray::itemsPerPage));
- }
-
- virtual void releaseArrayPage(Value* value) {
- if (value)
- free(value);
- }
-};
-
-#else // #ifdef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-/// @todo make this thread-safe (lock when accessign batch allocator)
-class DefaultValueArrayAllocator : public ValueArrayAllocator {
-public: // overridden from ValueArrayAllocator
- virtual ~DefaultValueArrayAllocator() {}
-
- virtual ValueInternalArray* newArray() {
- ValueInternalArray* array = arraysAllocator_.allocate();
- new (array) ValueInternalArray(); // placement new
- return array;
- }
-
- virtual ValueInternalArray* newArrayCopy(const ValueInternalArray& other) {
- ValueInternalArray* array = arraysAllocator_.allocate();
- new (array) ValueInternalArray(other); // placement new
- return array;
- }
-
- virtual void destructArray(ValueInternalArray* array) {
- if (array) {
- array->~ValueInternalArray();
- arraysAllocator_.release(array);
- }
- }
-
- virtual void
- reallocateArrayPageIndex(Value**& indexes,
- ValueInternalArray::PageIndex& indexCount,
- ValueInternalArray::PageIndex minNewIndexCount) {
- ValueInternalArray::PageIndex newIndexCount = (indexCount * 3) / 2 + 1;
- if (minNewIndexCount > newIndexCount)
- newIndexCount = minNewIndexCount;
- void* newIndexes = realloc(indexes, sizeof(Value*) * newIndexCount);
- JSON_ASSERT_MESSAGE(newIndexes, "Couldn't realloc.");
- indexCount = newIndexCount;
- indexes = static_cast<Value**>(newIndexes);
- }
- virtual void releaseArrayPageIndex(Value** indexes,
- ValueInternalArray::PageIndex indexCount) {
- if (indexes)
- free(indexes);
- }
-
- virtual Value* allocateArrayPage() {
- return static_cast<Value*>(pagesAllocator_.allocate());
- }
-
- virtual void releaseArrayPage(Value* value) {
- if (value)
- pagesAllocator_.release(value);
- }
-
-private:
- BatchAllocator<ValueInternalArray, 1> arraysAllocator_;
- BatchAllocator<Value, ValueInternalArray::itemsPerPage> pagesAllocator_;
-};
-#endif // #ifdef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-
-static ValueArrayAllocator*& arrayAllocator() {
- static DefaultValueArrayAllocator defaultAllocator;
- static ValueArrayAllocator* arrayAllocator = &defaultAllocator;
- return arrayAllocator;
-}
-
-static struct DummyArrayAllocatorInitializer {
- DummyArrayAllocatorInitializer() {
- arrayAllocator(); // ensure arrayAllocator() statics are initialized before
- // main().
- }
-} dummyArrayAllocatorInitializer;
-
-// //////////////////////////////////////////////////////////////////
-// class ValueInternalArray
-// //////////////////////////////////////////////////////////////////
-bool ValueInternalArray::equals(const IteratorState& x,
- const IteratorState& other) {
- return x.array_ == other.array_ &&
- x.currentItemIndex_ == other.currentItemIndex_ &&
- x.currentPageIndex_ == other.currentPageIndex_;
-}
-
-void ValueInternalArray::increment(IteratorState& it) {
- JSON_ASSERT_MESSAGE(
- it.array_ && (it.currentPageIndex_ - it.array_->pages_) * itemsPerPage +
- it.currentItemIndex_ !=
- it.array_->size_,
- "ValueInternalArray::increment(): moving iterator beyond end");
- ++(it.currentItemIndex_);
- if (it.currentItemIndex_ == itemsPerPage) {
- it.currentItemIndex_ = 0;
- ++(it.currentPageIndex_);
- }
-}
-
-void ValueInternalArray::decrement(IteratorState& it) {
- JSON_ASSERT_MESSAGE(
- it.array_ && it.currentPageIndex_ == it.array_->pages_ &&
- it.currentItemIndex_ == 0,
- "ValueInternalArray::decrement(): moving iterator beyond end");
- if (it.currentItemIndex_ == 0) {
- it.currentItemIndex_ = itemsPerPage - 1;
- --(it.currentPageIndex_);
- } else {
- --(it.currentItemIndex_);
- }
-}
-
-Value& ValueInternalArray::unsafeDereference(const IteratorState& it) {
- return (*(it.currentPageIndex_))[it.currentItemIndex_];
-}
-
-Value& ValueInternalArray::dereference(const IteratorState& it) {
- JSON_ASSERT_MESSAGE(
- it.array_ && (it.currentPageIndex_ - it.array_->pages_) * itemsPerPage +
- it.currentItemIndex_ <
- it.array_->size_,
- "ValueInternalArray::dereference(): dereferencing invalid iterator");
- return unsafeDereference(it);
-}
-
-void ValueInternalArray::makeBeginIterator(IteratorState& it) const {
- it.array_ = const_cast<ValueInternalArray*>(this);
- it.currentItemIndex_ = 0;
- it.currentPageIndex_ = pages_;
-}
-
-void ValueInternalArray::makeIterator(IteratorState& it,
- ArrayIndex index) const {
- it.array_ = const_cast<ValueInternalArray*>(this);
- it.currentItemIndex_ = index % itemsPerPage;
- it.currentPageIndex_ = pages_ + index / itemsPerPage;
-}
-
-void ValueInternalArray::makeEndIterator(IteratorState& it) const {
- makeIterator(it, size_);
-}
-
-ValueInternalArray::ValueInternalArray() : pages_(0), size_(0), pageCount_(0) {}
-
-ValueInternalArray::ValueInternalArray(const ValueInternalArray& other)
- : pages_(0), size_(other.size_), pageCount_(0) {
- PageIndex minNewPages = other.size_ / itemsPerPage;
- arrayAllocator()->reallocateArrayPageIndex(pages_, pageCount_, minNewPages);
- JSON_ASSERT_MESSAGE(pageCount_ >= minNewPages,
- "ValueInternalArray::reserve(): bad reallocation");
- IteratorState itOther;
- other.makeBeginIterator(itOther);
- Value* value;
- for (ArrayIndex index = 0; index < size_; ++index, increment(itOther)) {
- if (index % itemsPerPage == 0) {
- PageIndex pageIndex = index / itemsPerPage;
- value = arrayAllocator()->allocateArrayPage();
- pages_[pageIndex] = value;
- }
- new (value) Value(dereference(itOther));
- }
-}
-
-ValueInternalArray& ValueInternalArray::operator=(ValueInternalArray other) {
- swap(other);
- return *this;
-}
-
-ValueInternalArray::~ValueInternalArray() {
- // destroy all constructed items
- IteratorState it;
- IteratorState itEnd;
- makeBeginIterator(it);
- makeEndIterator(itEnd);
- for (; !equals(it, itEnd); increment(it)) {
- Value* value = &dereference(it);
- value->~Value();
- }
- // release all pages
- PageIndex lastPageIndex = size_ / itemsPerPage;
- for (PageIndex pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex)
- arrayAllocator()->releaseArrayPage(pages_[pageIndex]);
- // release pages index
- arrayAllocator()->releaseArrayPageIndex(pages_, pageCount_);
-}
-
-void ValueInternalArray::swap(ValueInternalArray& other) {
- Value** tempPages = pages_;
- pages_ = other.pages_;
- other.pages_ = tempPages;
- ArrayIndex tempSize = size_;
- size_ = other.size_;
- other.size_ = tempSize;
- PageIndex tempPageCount = pageCount_;
- pageCount_ = other.pageCount_;
- other.pageCount_ = tempPageCount;
-}
-
-void ValueInternalArray::clear() {
- ValueInternalArray dummy;
- swap(dummy);
-}
-
-void ValueInternalArray::resize(ArrayIndex newSize) {
- if (newSize == 0)
- clear();
- else if (newSize < size_) {
- IteratorState it;
- IteratorState itEnd;
- makeIterator(it, newSize);
- makeIterator(itEnd, size_);
- for (; !equals(it, itEnd); increment(it)) {
- Value* value = &dereference(it);
- value->~Value();
- }
- PageIndex pageIndex = (newSize + itemsPerPage - 1) / itemsPerPage;
- PageIndex lastPageIndex = size_ / itemsPerPage;
- for (; pageIndex < lastPageIndex; ++pageIndex)
- arrayAllocator()->releaseArrayPage(pages_[pageIndex]);
- size_ = newSize;
- } else if (newSize > size_)
- resolveReference(newSize);
-}
-
-void ValueInternalArray::makeIndexValid(ArrayIndex index) {
- // Need to enlarge page index ?
- if (index >= pageCount_ * itemsPerPage) {
- PageIndex minNewPages = (index + 1) / itemsPerPage;
- arrayAllocator()->reallocateArrayPageIndex(pages_, pageCount_, minNewPages);
- JSON_ASSERT_MESSAGE(pageCount_ >= minNewPages,
- "ValueInternalArray::reserve(): bad reallocation");
- }
-
- // Need to allocate new pages ?
- ArrayIndex nextPageIndex = (size_ % itemsPerPage) != 0
- ? size_ - (size_ % itemsPerPage) + itemsPerPage
- : size_;
- if (nextPageIndex <= index) {
- PageIndex pageIndex = nextPageIndex / itemsPerPage;
- PageIndex pageToAllocate = (index - nextPageIndex) / itemsPerPage + 1;
- for (; pageToAllocate-- > 0; ++pageIndex)
- pages_[pageIndex] = arrayAllocator()->allocateArrayPage();
- }
-
- // Initialize all new entries
- IteratorState it;
- IteratorState itEnd;
- makeIterator(it, size_);
- size_ = index + 1;
- makeIterator(itEnd, size_);
- for (; !equals(it, itEnd); increment(it)) {
- Value* value = &dereference(it);
- new (value) Value(); // Construct a default value using placement new
- }
-}
-
-Value& ValueInternalArray::resolveReference(ArrayIndex index) {
- if (index >= size_)
- makeIndexValid(index);
- return pages_[index / itemsPerPage][index % itemsPerPage];
-}
-
-Value* ValueInternalArray::find(ArrayIndex index) const {
- if (index >= size_)
- return 0;
- return &(pages_[index / itemsPerPage][index % itemsPerPage]);
-}
-
-ValueInternalArray::ArrayIndex ValueInternalArray::size() const {
- return size_;
-}
-
-int ValueInternalArray::distance(const IteratorState& x,
- const IteratorState& y) {
- return indexOf(y) - indexOf(x);
-}
-
-ValueInternalArray::ArrayIndex
-ValueInternalArray::indexOf(const IteratorState& iterator) {
- if (!iterator.array_)
- return ArrayIndex(-1);
- return ArrayIndex((iterator.currentPageIndex_ - iterator.array_->pages_) *
- itemsPerPage +
- iterator.currentItemIndex_);
-}
-
-int ValueInternalArray::compare(const ValueInternalArray& other) const {
- int sizeDiff(size_ - other.size_);
- if (sizeDiff != 0)
- return sizeDiff;
-
- for (ArrayIndex index = 0; index < size_; ++index) {
- int diff = pages_[index / itemsPerPage][index % itemsPerPage].compare(
- other.pages_[index / itemsPerPage][index % itemsPerPage]);
- if (diff != 0)
- return diff;
- }
- return 0;
-}
-
-} // namespace Json
diff --git a/3rdparty/jsoncpp/src/lib_json/json_internalmap.inl b/3rdparty/jsoncpp/src/lib_json/json_internalmap.inl
deleted file mode 100644
index ef3f3302dc9..00000000000
--- a/3rdparty/jsoncpp/src/lib_json/json_internalmap.inl
+++ /dev/null
@@ -1,473 +0,0 @@
-// Copyright 2007-2010 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-// included by json_value.cpp
-
-namespace Json {
-
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// class ValueInternalMap
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-
-/** \internal MUST be safely initialized using memset( this, 0,
- * sizeof(ValueInternalLink) );
- * This optimization is used by the fast allocator.
- */
-ValueInternalLink::ValueInternalLink() : previous_(0), next_(0) {}
-
-ValueInternalLink::~ValueInternalLink() {
- for (int index = 0; index < itemPerLink; ++index) {
- if (!items_[index].isItemAvailable()) {
- if (!items_[index].isMemberNameStatic())
- free(keys_[index]);
- } else
- break;
- }
-}
-
-ValueMapAllocator::~ValueMapAllocator() {}
-
-#ifdef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-class DefaultValueMapAllocator : public ValueMapAllocator {
-public: // overridden from ValueMapAllocator
- virtual ValueInternalMap* newMap() { return new ValueInternalMap(); }
-
- virtual ValueInternalMap* newMapCopy(const ValueInternalMap& other) {
- return new ValueInternalMap(other);
- }
-
- virtual void destructMap(ValueInternalMap* map) { delete map; }
-
- virtual ValueInternalLink* allocateMapBuckets(unsigned int size) {
- return new ValueInternalLink[size];
- }
-
- virtual void releaseMapBuckets(ValueInternalLink* links) { delete[] links; }
-
- virtual ValueInternalLink* allocateMapLink() {
- return new ValueInternalLink();
- }
-
- virtual void releaseMapLink(ValueInternalLink* link) { delete link; }
-};
-#else
-/// @todo make this thread-safe (lock when accessign batch allocator)
-class DefaultValueMapAllocator : public ValueMapAllocator {
-public: // overridden from ValueMapAllocator
- virtual ValueInternalMap* newMap() {
- ValueInternalMap* map = mapsAllocator_.allocate();
- new (map) ValueInternalMap(); // placement new
- return map;
- }
-
- virtual ValueInternalMap* newMapCopy(const ValueInternalMap& other) {
- ValueInternalMap* map = mapsAllocator_.allocate();
- new (map) ValueInternalMap(other); // placement new
- return map;
- }
-
- virtual void destructMap(ValueInternalMap* map) {
- if (map) {
- map->~ValueInternalMap();
- mapsAllocator_.release(map);
- }
- }
-
- virtual ValueInternalLink* allocateMapBuckets(unsigned int size) {
- return new ValueInternalLink[size];
- }
-
- virtual void releaseMapBuckets(ValueInternalLink* links) { delete[] links; }
-
- virtual ValueInternalLink* allocateMapLink() {
- ValueInternalLink* link = linksAllocator_.allocate();
- memset(link, 0, sizeof(ValueInternalLink));
- return link;
- }
-
- virtual void releaseMapLink(ValueInternalLink* link) {
- link->~ValueInternalLink();
- linksAllocator_.release(link);
- }
-
-private:
- BatchAllocator<ValueInternalMap, 1> mapsAllocator_;
- BatchAllocator<ValueInternalLink, 1> linksAllocator_;
-};
-#endif
-
-static ValueMapAllocator*& mapAllocator() {
- static DefaultValueMapAllocator defaultAllocator;
- static ValueMapAllocator* mapAllocator = &defaultAllocator;
- return mapAllocator;
-}
-
-static struct DummyMapAllocatorInitializer {
- DummyMapAllocatorInitializer() {
- mapAllocator(); // ensure mapAllocator() statics are initialized before
- // main().
- }
-} dummyMapAllocatorInitializer;
-
-// h(K) = value * K >> w ; with w = 32 & K prime w.r.t. 2^32.
-
-/*
-use linked list hash map.
-buckets array is a container.
-linked list element contains 6 key/values. (memory = (16+4) * 6 + 4 = 124)
-value have extra state: valid, available, deleted
-*/
-
-ValueInternalMap::ValueInternalMap()
- : buckets_(0), tailLink_(0), bucketsSize_(0), itemCount_(0) {}
-
-ValueInternalMap::ValueInternalMap(const ValueInternalMap& other)
- : buckets_(0), tailLink_(0), bucketsSize_(0), itemCount_(0) {
- reserve(other.itemCount_);
- IteratorState it;
- IteratorState itEnd;
- other.makeBeginIterator(it);
- other.makeEndIterator(itEnd);
- for (; !equals(it, itEnd); increment(it)) {
- bool isStatic;
- const char* memberName = key(it, isStatic);
- const Value& aValue = value(it);
- resolveReference(memberName, isStatic) = aValue;
- }
-}
-
-ValueInternalMap& ValueInternalMap::operator=(ValueInternalMap other) {
- swap(other);
- return *this;
-}
-
-ValueInternalMap::~ValueInternalMap() {
- if (buckets_) {
- for (BucketIndex bucketIndex = 0; bucketIndex < bucketsSize_;
- ++bucketIndex) {
- ValueInternalLink* link = buckets_[bucketIndex].next_;
- while (link) {
- ValueInternalLink* linkToRelease = link;
- link = link->next_;
- mapAllocator()->releaseMapLink(linkToRelease);
- }
- }
- mapAllocator()->releaseMapBuckets(buckets_);
- }
-}
-
-void ValueInternalMap::swap(ValueInternalMap& other) {
- ValueInternalLink* tempBuckets = buckets_;
- buckets_ = other.buckets_;
- other.buckets_ = tempBuckets;
- ValueInternalLink* tempTailLink = tailLink_;
- tailLink_ = other.tailLink_;
- other.tailLink_ = tempTailLink;
- BucketIndex tempBucketsSize = bucketsSize_;
- bucketsSize_ = other.bucketsSize_;
- other.bucketsSize_ = tempBucketsSize;
- BucketIndex tempItemCount = itemCount_;
- itemCount_ = other.itemCount_;
- other.itemCount_ = tempItemCount;
-}
-
-void ValueInternalMap::clear() {
- ValueInternalMap dummy;
- swap(dummy);
-}
-
-ValueInternalMap::BucketIndex ValueInternalMap::size() const {
- return itemCount_;
-}
-
-bool ValueInternalMap::reserveDelta(BucketIndex growth) {
- return reserve(itemCount_ + growth);
-}
-
-bool ValueInternalMap::reserve(BucketIndex newItemCount) {
- if (!buckets_ && newItemCount > 0) {
- buckets_ = mapAllocator()->allocateMapBuckets(1);
- bucketsSize_ = 1;
- tailLink_ = &buckets_[0];
- }
- // BucketIndex idealBucketCount = (newItemCount +
- // ValueInternalLink::itemPerLink) / ValueInternalLink::itemPerLink;
- return true;
-}
-
-const Value* ValueInternalMap::find(const char* key) const {
- if (!bucketsSize_)
- return 0;
- HashKey hashedKey = hash(key);
- BucketIndex bucketIndex = hashedKey % bucketsSize_;
- for (const ValueInternalLink* current = &buckets_[bucketIndex]; current != 0;
- current = current->next_) {
- for (BucketIndex index = 0; index < ValueInternalLink::itemPerLink;
- ++index) {
- if (current->items_[index].isItemAvailable())
- return 0;
- if (strcmp(key, current->keys_[index]) == 0)
- return &current->items_[index];
- }
- }
- return 0;
-}
-
-Value* ValueInternalMap::find(const char* key) {
- const ValueInternalMap* constThis = this;
- return const_cast<Value*>(constThis->find(key));
-}
-
-Value& ValueInternalMap::resolveReference(const char* key, bool isStatic) {
- HashKey hashedKey = hash(key);
- if (bucketsSize_) {
- BucketIndex bucketIndex = hashedKey % bucketsSize_;
- ValueInternalLink** previous = 0;
- BucketIndex index;
- for (ValueInternalLink* current = &buckets_[bucketIndex]; current != 0;
- previous = &current->next_, current = current->next_) {
- for (index = 0; index < ValueInternalLink::itemPerLink; ++index) {
- if (current->items_[index].isItemAvailable())
- return setNewItem(key, isStatic, current, index);
- if (strcmp(key, current->keys_[index]) == 0)
- return current->items_[index];
- }
- }
- }
-
- reserveDelta(1);
- return unsafeAdd(key, isStatic, hashedKey);
-}
-
-void ValueInternalMap::remove(const char* key) {
- HashKey hashedKey = hash(key);
- if (!bucketsSize_)
- return;
- BucketIndex bucketIndex = hashedKey % bucketsSize_;
- for (ValueInternalLink* link = &buckets_[bucketIndex]; link != 0;
- link = link->next_) {
- BucketIndex index;
- for (index = 0; index < ValueInternalLink::itemPerLink; ++index) {
- if (link->items_[index].isItemAvailable())
- return;
- if (strcmp(key, link->keys_[index]) == 0) {
- doActualRemove(link, index, bucketIndex);
- return;
- }
- }
- }
-}
-
-void ValueInternalMap::doActualRemove(ValueInternalLink* link,
- BucketIndex index,
- BucketIndex bucketIndex) {
- // find last item of the bucket and swap it with the 'removed' one.
- // set removed items flags to 'available'.
- // if last page only contains 'available' items, then desallocate it (it's
- // empty)
- ValueInternalLink*& lastLink = getLastLinkInBucket(index);
- BucketIndex lastItemIndex = 1; // a link can never be empty, so start at 1
- for (; lastItemIndex < ValueInternalLink::itemPerLink;
- ++lastItemIndex) // may be optimized with dicotomic search
- {
- if (lastLink->items_[lastItemIndex].isItemAvailable())
- break;
- }
-
- BucketIndex lastUsedIndex = lastItemIndex - 1;
- Value* valueToDelete = &link->items_[index];
- Value* valueToPreserve = &lastLink->items_[lastUsedIndex];
- if (valueToDelete != valueToPreserve)
- valueToDelete->swap(*valueToPreserve);
- if (lastUsedIndex == 0) // page is now empty
- { // remove it from bucket linked list and delete it.
- ValueInternalLink* linkPreviousToLast = lastLink->previous_;
- if (linkPreviousToLast != 0) // can not deleted bucket link.
- {
- mapAllocator()->releaseMapLink(lastLink);
- linkPreviousToLast->next_ = 0;
- lastLink = linkPreviousToLast;
- }
- } else {
- Value dummy;
- valueToPreserve->swap(dummy); // restore deleted to default Value.
- valueToPreserve->setItemUsed(false);
- }
- --itemCount_;
-}
-
-ValueInternalLink*&
-ValueInternalMap::getLastLinkInBucket(BucketIndex bucketIndex) {
- if (bucketIndex == bucketsSize_ - 1)
- return tailLink_;
- ValueInternalLink*& previous = buckets_[bucketIndex + 1].previous_;
- if (!previous)
- previous = &buckets_[bucketIndex];
- return previous;
-}
-
-Value& ValueInternalMap::setNewItem(const char* key,
- bool isStatic,
- ValueInternalLink* link,
- BucketIndex index) {
- char* duplicatedKey = makeMemberName(key);
- ++itemCount_;
- link->keys_[index] = duplicatedKey;
- link->items_[index].setItemUsed();
- link->items_[index].setMemberNameIsStatic(isStatic);
- return link->items_[index]; // items already default constructed.
-}
-
-Value&
-ValueInternalMap::unsafeAdd(const char* key, bool isStatic, HashKey hashedKey) {
- JSON_ASSERT_MESSAGE(bucketsSize_ > 0,
- "ValueInternalMap::unsafeAdd(): internal logic error.");
- BucketIndex bucketIndex = hashedKey % bucketsSize_;
- ValueInternalLink*& previousLink = getLastLinkInBucket(bucketIndex);
- ValueInternalLink* link = previousLink;
- BucketIndex index;
- for (index = 0; index < ValueInternalLink::itemPerLink; ++index) {
- if (link->items_[index].isItemAvailable())
- break;
- }
- if (index == ValueInternalLink::itemPerLink) // need to add a new page
- {
- ValueInternalLink* newLink = mapAllocator()->allocateMapLink();
- index = 0;
- link->next_ = newLink;
- previousLink = newLink;
- link = newLink;
- }
- return setNewItem(key, isStatic, link, index);
-}
-
-ValueInternalMap::HashKey ValueInternalMap::hash(const char* key) const {
- HashKey hash = 0;
- while (*key)
- hash += *key++ * 37;
- return hash;
-}
-
-int ValueInternalMap::compare(const ValueInternalMap& other) const {
- int sizeDiff(itemCount_ - other.itemCount_);
- if (sizeDiff != 0)
- return sizeDiff;
- // Strict order guaranty is required. Compare all keys FIRST, then compare
- // values.
- IteratorState it;
- IteratorState itEnd;
- makeBeginIterator(it);
- makeEndIterator(itEnd);
- for (; !equals(it, itEnd); increment(it)) {
- if (!other.find(key(it)))
- return 1;
- }
-
- // All keys are equals, let's compare values
- makeBeginIterator(it);
- for (; !equals(it, itEnd); increment(it)) {
- const Value* otherValue = other.find(key(it));
- int valueDiff = value(it).compare(*otherValue);
- if (valueDiff != 0)
- return valueDiff;
- }
- return 0;
-}
-
-void ValueInternalMap::makeBeginIterator(IteratorState& it) const {
- it.map_ = const_cast<ValueInternalMap*>(this);
- it.bucketIndex_ = 0;
- it.itemIndex_ = 0;
- it.link_ = buckets_;
-}
-
-void ValueInternalMap::makeEndIterator(IteratorState& it) const {
- it.map_ = const_cast<ValueInternalMap*>(this);
- it.bucketIndex_ = bucketsSize_;
- it.itemIndex_ = 0;
- it.link_ = 0;
-}
-
-bool ValueInternalMap::equals(const IteratorState& x,
- const IteratorState& other) {
- return x.map_ == other.map_ && x.bucketIndex_ == other.bucketIndex_ &&
- x.link_ == other.link_ && x.itemIndex_ == other.itemIndex_;
-}
-
-void ValueInternalMap::incrementBucket(IteratorState& iterator) {
- ++iterator.bucketIndex_;
- JSON_ASSERT_MESSAGE(
- iterator.bucketIndex_ <= iterator.map_->bucketsSize_,
- "ValueInternalMap::increment(): attempting to iterate beyond end.");
- if (iterator.bucketIndex_ == iterator.map_->bucketsSize_)
- iterator.link_ = 0;
- else
- iterator.link_ = &(iterator.map_->buckets_[iterator.bucketIndex_]);
- iterator.itemIndex_ = 0;
-}
-
-void ValueInternalMap::increment(IteratorState& iterator) {
- JSON_ASSERT_MESSAGE(iterator.map_,
- "Attempting to iterator using invalid iterator.");
- ++iterator.itemIndex_;
- if (iterator.itemIndex_ == ValueInternalLink::itemPerLink) {
- JSON_ASSERT_MESSAGE(
- iterator.link_ != 0,
- "ValueInternalMap::increment(): attempting to iterate beyond end.");
- iterator.link_ = iterator.link_->next_;
- if (iterator.link_ == 0)
- incrementBucket(iterator);
- } else if (iterator.link_->items_[iterator.itemIndex_].isItemAvailable()) {
- incrementBucket(iterator);
- }
-}
-
-void ValueInternalMap::decrement(IteratorState& iterator) {
- if (iterator.itemIndex_ == 0) {
- JSON_ASSERT_MESSAGE(iterator.map_,
- "Attempting to iterate using invalid iterator.");
- if (iterator.link_ == &iterator.map_->buckets_[iterator.bucketIndex_]) {
- JSON_ASSERT_MESSAGE(iterator.bucketIndex_ > 0,
- "Attempting to iterate beyond beginning.");
- --(iterator.bucketIndex_);
- }
- iterator.link_ = iterator.link_->previous_;
- iterator.itemIndex_ = ValueInternalLink::itemPerLink - 1;
- }
-}
-
-const char* ValueInternalMap::key(const IteratorState& iterator) {
- JSON_ASSERT_MESSAGE(iterator.link_,
- "Attempting to iterate using invalid iterator.");
- return iterator.link_->keys_[iterator.itemIndex_];
-}
-
-const char* ValueInternalMap::key(const IteratorState& iterator,
- bool& isStatic) {
- JSON_ASSERT_MESSAGE(iterator.link_,
- "Attempting to iterate using invalid iterator.");
- isStatic = iterator.link_->items_[iterator.itemIndex_].isMemberNameStatic();
- return iterator.link_->keys_[iterator.itemIndex_];
-}
-
-Value& ValueInternalMap::value(const IteratorState& iterator) {
- JSON_ASSERT_MESSAGE(iterator.link_,
- "Attempting to iterate using invalid iterator.");
- return iterator.link_->items_[iterator.itemIndex_];
-}
-
-int ValueInternalMap::distance(const IteratorState& x, const IteratorState& y) {
- int offset = 0;
- IteratorState it = x;
- while (!equals(it, y))
- increment(it);
- return offset;
-}
-
-} // namespace Json
diff --git a/3rdparty/jsoncpp/src/lib_json/json_reader.cpp b/3rdparty/jsoncpp/src/lib_json/json_reader.cpp
index c5111f8d708..ceff66c3134 100644
--- a/3rdparty/jsoncpp/src/lib_json/json_reader.cpp
+++ b/3rdparty/jsoncpp/src/lib_json/json_reader.cpp
@@ -14,6 +14,9 @@
#include <cassert>
#include <cstring>
#include <istream>
+#include <sstream>
+#include <memory>
+#include <set>
#if defined(_MSC_VER) && _MSC_VER < 1500 // VC++ 8.0 and below
#define snprintf _snprintf
@@ -24,8 +27,17 @@
#pragma warning(disable : 4996)
#endif
+static int const stackLimit_g = 1000;
+static int stackDepth_g = 0; // see readValue()
+
namespace Json {
+#if __cplusplus >= 201103L
+typedef std::unique_ptr<CharReader> CharReaderPtr;
+#else
+typedef std::auto_ptr<CharReader> CharReaderPtr;
+#endif
+
// Implementation of class Features
// ////////////////////////////////
@@ -47,23 +59,6 @@ Features Features::strictMode() {
// Implementation of class Reader
// ////////////////////////////////
-static inline bool in(Reader::Char c,
- Reader::Char c1,
- Reader::Char c2,
- Reader::Char c3,
- Reader::Char c4) {
- return c == c1 || c == c2 || c == c3 || c == c4;
-}
-
-static inline bool in(Reader::Char c,
- Reader::Char c1,
- Reader::Char c2,
- Reader::Char c3,
- Reader::Char c4,
- Reader::Char c5) {
- return c == c1 || c == c2 || c == c3 || c == c4 || c == c5;
-}
-
static bool containsNewLine(Reader::Location begin, Reader::Location end) {
for (; begin < end; ++begin)
if (*begin == '\n' || *begin == '\r')
@@ -125,6 +120,7 @@ bool Reader::parse(const char* beginDoc,
nodes_.pop();
nodes_.push(&root);
+ stackDepth_g = 0; // Yes, this is bad coding, but options are limited.
bool successful = readValue();
Token token;
skipCommentTokens(token);
@@ -147,19 +143,18 @@ bool Reader::parse(const char* beginDoc,
}
bool Reader::readValue() {
+ // This is a non-reentrant way to support a stackLimit. Terrible!
+ // But this deprecated class has a security problem: Bad input can
+ // cause a seg-fault. This seems like a fair, binary-compatible way
+ // to prevent the problem.
+ if (stackDepth_g >= stackLimit_g) throwRuntimeError("Exceeded stackLimit in readValue().");
+ ++stackDepth_g;
+
Token token;
skipCommentTokens(token);
bool successful = true;
if (collectComments_ && !commentsBefore_.empty()) {
- // Remove newline characters at the end of the comments
- size_t lastNonNewline = commentsBefore_.find_last_not_of("\r\n");
- if (lastNonNewline != std::string::npos) {
- commentsBefore_.erase(lastNonNewline + 1);
- } else {
- commentsBefore_.clear();
- }
-
currentValue().setComment(commentsBefore_, commentBefore);
commentsBefore_ = "";
}
@@ -180,31 +175,42 @@ bool Reader::readValue() {
successful = decodeString(token);
break;
case tokenTrue:
- currentValue() = true;
+ {
+ Value v(true);
+ currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
+ }
break;
case tokenFalse:
- currentValue() = false;
+ {
+ Value v(false);
+ currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
+ }
break;
case tokenNull:
- currentValue() = Value();
+ {
+ Value v;
+ currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
+ }
break;
case tokenArraySeparator:
+ case tokenObjectEnd:
+ case tokenArrayEnd:
if (features_.allowDroppedNullPlaceholders_) {
// "Un-read" the current token and mark the current value as a null
// token.
current_--;
- currentValue() = Value();
+ Value v;
+ currentValue().swapPayload(v);
currentValue().setOffsetStart(current_ - begin_ - 1);
currentValue().setOffsetLimit(current_ - begin_);
break;
- }
- // Else, fall through...
+ } // Else, fall through...
default:
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
@@ -216,6 +222,7 @@ bool Reader::readValue() {
lastValue_ = &currentValue();
}
+ --stackDepth_g;
return successful;
}
@@ -229,13 +236,6 @@ void Reader::skipCommentTokens(Token& token) {
}
}
-bool Reader::expectToken(TokenType type, Token& token, const char* message) {
- readToken(token);
- if (token.type_ != type)
- return addError(message, token);
- return true;
-}
-
bool Reader::readToken(Token& token) {
skipSpaces();
token.start_ = current_;
@@ -351,14 +351,34 @@ bool Reader::readComment() {
return true;
}
+static std::string normalizeEOL(Reader::Location begin, Reader::Location end) {
+ std::string normalized;
+ normalized.reserve(end - begin);
+ Reader::Location current = begin;
+ while (current != end) {
+ char c = *current++;
+ if (c == '\r') {
+ if (current != end && *current == '\n')
+ // convert dos EOL
+ ++current;
+ // convert Mac EOL
+ normalized += '\n';
+ } else {
+ normalized += c;
+ }
+ }
+ return normalized;
+}
+
void
Reader::addComment(Location begin, Location end, CommentPlacement placement) {
assert(collectComments_);
+ const std::string& normalized = normalizeEOL(begin, end);
if (placement == commentAfterOnSameLine) {
assert(lastValue_ != 0);
- lastValue_->setComment(std::string(begin, end), placement);
+ lastValue_->setComment(normalized, placement);
} else {
- commentsBefore_ += std::string(begin, end);
+ commentsBefore_ += normalized;
}
}
@@ -374,18 +394,38 @@ bool Reader::readCStyleComment() {
bool Reader::readCppStyleComment() {
while (current_ != end_) {
Char c = getNextChar();
- if (c == '\r' || c == '\n')
+ if (c == '\n')
+ break;
+ if (c == '\r') {
+ // Consume DOS EOL. It will be normalized in addComment.
+ if (current_ != end_ && *current_ == '\n')
+ getNextChar();
+ // Break on Moc OS 9 EOL.
break;
+ }
}
return true;
}
void Reader::readNumber() {
- while (current_ != end_) {
- if (!(*current_ >= '0' && *current_ <= '9') &&
- !in(*current_, '.', 'e', 'E', '+', '-'))
- break;
- ++current_;
+ const char *p = current_;
+ char c = '0'; // stopgap for already consumed character
+ // integral part
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : 0;
+ // fractional part
+ if (c == '.') {
+ c = (current_ = p) < end_ ? *p++ : 0;
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : 0;
+ }
+ // exponential part
+ if (c == 'e' || c == 'E') {
+ c = (current_ = p) < end_ ? *p++ : 0;
+ if (c == '+' || c == '-')
+ c = (current_ = p) < end_ ? *p++ : 0;
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : 0;
}
}
@@ -404,7 +444,8 @@ bool Reader::readString() {
bool Reader::readObject(Token& tokenStart) {
Token tokenName;
std::string name;
- currentValue() = Value(objectValue);
+ Value init(objectValue);
+ currentValue().swapPayload(init);
currentValue().setOffsetStart(tokenStart.start_ - begin_);
while (readToken(tokenName)) {
bool initialTokenOk = true;
@@ -457,7 +498,8 @@ bool Reader::readObject(Token& tokenStart) {
}
bool Reader::readArray(Token& tokenStart) {
- currentValue() = Value(arrayValue);
+ Value init(arrayValue);
+ currentValue().swapPayload(init);
currentValue().setOffsetStart(tokenStart.start_ - begin_);
skipSpaces();
if (*current_ == ']') // empty array
@@ -497,20 +539,13 @@ bool Reader::decodeNumber(Token& token) {
Value decoded;
if (!decodeNumber(token, decoded))
return false;
- currentValue() = decoded;
+ currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
}
bool Reader::decodeNumber(Token& token, Value& decoded) {
- bool isDouble = false;
- for (Location inspect = token.start_; inspect != token.end_; ++inspect) {
- isDouble = isDouble || in(*inspect, '.', 'e', 'E', '+') ||
- (*inspect == '-' && inspect != token.start_);
- }
- if (isDouble)
- return decodeDouble(token, decoded);
// Attempts to parse the number as an integer. If the number is
// larger than the maximum supported value of an integer then
// we decode the number as a double.
@@ -518,6 +553,7 @@ bool Reader::decodeNumber(Token& token, Value& decoded) {
bool isNegative = *current == '-';
if (isNegative)
++current;
+ // TODO: Help the compiler do the div and mod at compile time or get rid of them.
Value::LargestUInt maxIntegerValue =
isNegative ? Value::LargestUInt(-Value::minLargestInt)
: Value::maxLargestUInt;
@@ -526,9 +562,7 @@ bool Reader::decodeNumber(Token& token, Value& decoded) {
while (current < token.end_) {
Char c = *current++;
if (c < '0' || c > '9')
- return addError("'" + std::string(token.start_, token.end_) +
- "' is not a number.",
- token);
+ return decodeDouble(token, decoded);
Value::UInt digit(c - '0');
if (value >= threshold) {
// We've hit or exceeded the max value divided by 10 (rounded down). If
@@ -555,7 +589,7 @@ bool Reader::decodeDouble(Token& token) {
Value decoded;
if (!decodeDouble(token, decoded))
return false;
- currentValue() = decoded;
+ currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
@@ -563,6 +597,945 @@ bool Reader::decodeDouble(Token& token) {
bool Reader::decodeDouble(Token& token, Value& decoded) {
double value = 0;
+ std::string buffer(token.start_, token.end_);
+ std::istringstream is(buffer);
+ if (!(is >> value))
+ return addError("'" + std::string(token.start_, token.end_) +
+ "' is not a number.",
+ token);
+ decoded = value;
+ return true;
+}
+
+bool Reader::decodeString(Token& token) {
+ std::string decoded_string;
+ if (!decodeString(token, decoded_string))
+ return false;
+ Value decoded(decoded_string);
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool Reader::decodeString(Token& token, std::string& decoded) {
+ decoded.reserve(token.end_ - token.start_ - 2);
+ Location current = token.start_ + 1; // skip '"'
+ Location end = token.end_ - 1; // do not include '"'
+ while (current != end) {
+ Char c = *current++;
+ if (c == '"')
+ break;
+ else if (c == '\\') {
+ if (current == end)
+ return addError("Empty escape sequence in string", token, current);
+ Char escape = *current++;
+ switch (escape) {
+ case '"':
+ decoded += '"';
+ break;
+ case '/':
+ decoded += '/';
+ break;
+ case '\\':
+ decoded += '\\';
+ break;
+ case 'b':
+ decoded += '\b';
+ break;
+ case 'f':
+ decoded += '\f';
+ break;
+ case 'n':
+ decoded += '\n';
+ break;
+ case 'r':
+ decoded += '\r';
+ break;
+ case 't':
+ decoded += '\t';
+ break;
+ case 'u': {
+ unsigned int unicode;
+ if (!decodeUnicodeCodePoint(token, current, end, unicode))
+ return false;
+ decoded += codePointToUTF8(unicode);
+ } break;
+ default:
+ return addError("Bad escape sequence in string", token, current);
+ }
+ } else {
+ decoded += c;
+ }
+ }
+ return true;
+}
+
+bool Reader::decodeUnicodeCodePoint(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode) {
+
+ if (!decodeUnicodeEscapeSequence(token, current, end, unicode))
+ return false;
+ if (unicode >= 0xD800 && unicode <= 0xDBFF) {
+ // surrogate pairs
+ if (end - current < 6)
+ return addError(
+ "additional six characters expected to parse unicode surrogate pair.",
+ token,
+ current);
+ unsigned int surrogatePair;
+ if (*(current++) == '\\' && *(current++) == 'u') {
+ if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) {
+ unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
+ } else
+ return false;
+ } else
+ return addError("expecting another \\u token to begin the second half of "
+ "a unicode surrogate pair",
+ token,
+ current);
+ }
+ return true;
+}
+
+bool Reader::decodeUnicodeEscapeSequence(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode) {
+ if (end - current < 4)
+ return addError(
+ "Bad unicode escape sequence in string: four digits expected.",
+ token,
+ current);
+ unicode = 0;
+ for (int index = 0; index < 4; ++index) {
+ Char c = *current++;
+ unicode *= 16;
+ if (c >= '0' && c <= '9')
+ unicode += c - '0';
+ else if (c >= 'a' && c <= 'f')
+ unicode += c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ unicode += c - 'A' + 10;
+ else
+ return addError(
+ "Bad unicode escape sequence in string: hexadecimal digit expected.",
+ token,
+ current);
+ }
+ return true;
+}
+
+bool
+Reader::addError(const std::string& message, Token& token, Location extra) {
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = extra;
+ errors_.push_back(info);
+ return false;
+}
+
+bool Reader::recoverFromError(TokenType skipUntilToken) {
+ int errorCount = int(errors_.size());
+ Token skip;
+ for (;;) {
+ if (!readToken(skip))
+ errors_.resize(errorCount); // discard errors caused by recovery
+ if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream)
+ break;
+ }
+ errors_.resize(errorCount);
+ return false;
+}
+
+bool Reader::addErrorAndRecover(const std::string& message,
+ Token& token,
+ TokenType skipUntilToken) {
+ addError(message, token);
+ return recoverFromError(skipUntilToken);
+}
+
+Value& Reader::currentValue() { return *(nodes_.top()); }
+
+Reader::Char Reader::getNextChar() {
+ if (current_ == end_)
+ return 0;
+ return *current_++;
+}
+
+void Reader::getLocationLineAndColumn(Location location,
+ int& line,
+ int& column) const {
+ Location current = begin_;
+ Location lastLineStart = current;
+ line = 0;
+ while (current < location && current != end_) {
+ Char c = *current++;
+ if (c == '\r') {
+ if (*current == '\n')
+ ++current;
+ lastLineStart = current;
+ ++line;
+ } else if (c == '\n') {
+ lastLineStart = current;
+ ++line;
+ }
+ }
+ // column & line start at 1
+ column = int(location - lastLineStart) + 1;
+ ++line;
+}
+
+std::string Reader::getLocationLineAndColumn(Location location) const {
+ int line, column;
+ getLocationLineAndColumn(location, line, column);
+ char buffer[18 + 16 + 16 + 1];
+#if defined(_MSC_VER) && defined(__STDC_SECURE_LIB__)
+#if defined(WINCE)
+ _snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
+#else
+ sprintf_s(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
+#endif
+#else
+ snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
+#endif
+ return buffer;
+}
+
+// Deprecated. Preserved for backward compatibility
+std::string Reader::getFormatedErrorMessages() const {
+ return getFormattedErrorMessages();
+}
+
+std::string Reader::getFormattedErrorMessages() const {
+ std::string formattedMessage;
+ for (Errors::const_iterator itError = errors_.begin();
+ itError != errors_.end();
+ ++itError) {
+ const ErrorInfo& error = *itError;
+ formattedMessage +=
+ "* " + getLocationLineAndColumn(error.token_.start_) + "\n";
+ formattedMessage += " " + error.message_ + "\n";
+ if (error.extra_)
+ formattedMessage +=
+ "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n";
+ }
+ return formattedMessage;
+}
+
+std::vector<Reader::StructuredError> Reader::getStructuredErrors() const {
+ std::vector<Reader::StructuredError> allErrors;
+ for (Errors::const_iterator itError = errors_.begin();
+ itError != errors_.end();
+ ++itError) {
+ const ErrorInfo& error = *itError;
+ Reader::StructuredError structured;
+ structured.offset_start = error.token_.start_ - begin_;
+ structured.offset_limit = error.token_.end_ - begin_;
+ structured.message = error.message_;
+ allErrors.push_back(structured);
+ }
+ return allErrors;
+}
+
+bool Reader::pushError(const Value& value, const std::string& message) {
+ size_t length = end_ - begin_;
+ if(value.getOffsetStart() > length
+ || value.getOffsetLimit() > length)
+ return false;
+ Token token;
+ token.type_ = tokenError;
+ token.start_ = begin_ + value.getOffsetStart();
+ token.end_ = end_ + value.getOffsetLimit();
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = 0;
+ errors_.push_back(info);
+ return true;
+}
+
+bool Reader::pushError(const Value& value, const std::string& message, const Value& extra) {
+ size_t length = end_ - begin_;
+ if(value.getOffsetStart() > length
+ || value.getOffsetLimit() > length
+ || extra.getOffsetLimit() > length)
+ return false;
+ Token token;
+ token.type_ = tokenError;
+ token.start_ = begin_ + value.getOffsetStart();
+ token.end_ = begin_ + value.getOffsetLimit();
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = begin_ + extra.getOffsetStart();
+ errors_.push_back(info);
+ return true;
+}
+
+bool Reader::good() const {
+ return !errors_.size();
+}
+
+// exact copy of Features
+class OurFeatures {
+public:
+ static OurFeatures all();
+ OurFeatures();
+ bool allowComments_;
+ bool strictRoot_;
+ bool allowDroppedNullPlaceholders_;
+ bool allowNumericKeys_;
+ bool allowSingleQuotes_;
+ bool failIfExtra_;
+ bool rejectDupKeys_;
+ int stackLimit_;
+}; // OurFeatures
+
+// exact copy of Implementation of class Features
+// ////////////////////////////////
+
+OurFeatures::OurFeatures()
+ : allowComments_(true), strictRoot_(false)
+ , allowDroppedNullPlaceholders_(false), allowNumericKeys_(false)
+ , allowSingleQuotes_(false)
+ , failIfExtra_(false)
+{
+}
+
+OurFeatures OurFeatures::all() { return OurFeatures(); }
+
+// Implementation of class Reader
+// ////////////////////////////////
+
+// exact copy of Reader, renamed to OurReader
+class OurReader {
+public:
+ typedef char Char;
+ typedef const Char* Location;
+ struct StructuredError {
+ size_t offset_start;
+ size_t offset_limit;
+ std::string message;
+ };
+
+ OurReader(OurFeatures const& features);
+ bool parse(const char* beginDoc,
+ const char* endDoc,
+ Value& root,
+ bool collectComments = true);
+ std::string getFormattedErrorMessages() const;
+ std::vector<StructuredError> getStructuredErrors() const;
+ bool pushError(const Value& value, const std::string& message);
+ bool pushError(const Value& value, const std::string& message, const Value& extra);
+ bool good() const;
+
+private:
+ OurReader(OurReader const&); // no impl
+ void operator=(OurReader const&); // no impl
+
+ enum TokenType {
+ tokenEndOfStream = 0,
+ tokenObjectBegin,
+ tokenObjectEnd,
+ tokenArrayBegin,
+ tokenArrayEnd,
+ tokenString,
+ tokenNumber,
+ tokenTrue,
+ tokenFalse,
+ tokenNull,
+ tokenArraySeparator,
+ tokenMemberSeparator,
+ tokenComment,
+ tokenError
+ };
+
+ class Token {
+ public:
+ TokenType type_;
+ Location start_;
+ Location end_;
+ };
+
+ class ErrorInfo {
+ public:
+ Token token_;
+ std::string message_;
+ Location extra_;
+ };
+
+ typedef std::deque<ErrorInfo> Errors;
+
+ bool readToken(Token& token);
+ void skipSpaces();
+ bool match(Location pattern, int patternLength);
+ bool readComment();
+ bool readCStyleComment();
+ bool readCppStyleComment();
+ bool readString();
+ bool readStringSingleQuote();
+ void readNumber();
+ bool readValue();
+ bool readObject(Token& token);
+ bool readArray(Token& token);
+ bool decodeNumber(Token& token);
+ bool decodeNumber(Token& token, Value& decoded);
+ bool decodeString(Token& token);
+ bool decodeString(Token& token, std::string& decoded);
+ bool decodeDouble(Token& token);
+ bool decodeDouble(Token& token, Value& decoded);
+ bool decodeUnicodeCodePoint(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode);
+ bool decodeUnicodeEscapeSequence(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode);
+ bool addError(const std::string& message, Token& token, Location extra = 0);
+ bool recoverFromError(TokenType skipUntilToken);
+ bool addErrorAndRecover(const std::string& message,
+ Token& token,
+ TokenType skipUntilToken);
+ void skipUntilSpace();
+ Value& currentValue();
+ Char getNextChar();
+ void
+ getLocationLineAndColumn(Location location, int& line, int& column) const;
+ std::string getLocationLineAndColumn(Location location) const;
+ void addComment(Location begin, Location end, CommentPlacement placement);
+ void skipCommentTokens(Token& token);
+
+ typedef std::stack<Value*> Nodes;
+ Nodes nodes_;
+ Errors errors_;
+ std::string document_;
+ Location begin_;
+ Location end_;
+ Location current_;
+ Location lastValueEnd_;
+ Value* lastValue_;
+ std::string commentsBefore_;
+ int stackDepth_;
+
+ OurFeatures const features_;
+ bool collectComments_;
+}; // OurReader
+
+// complete copy of Read impl, for OurReader
+
+OurReader::OurReader(OurFeatures const& features)
+ : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
+ lastValue_(), commentsBefore_(), features_(features), collectComments_() {
+}
+
+bool OurReader::parse(const char* beginDoc,
+ const char* endDoc,
+ Value& root,
+ bool collectComments) {
+ if (!features_.allowComments_) {
+ collectComments = false;
+ }
+
+ begin_ = beginDoc;
+ end_ = endDoc;
+ collectComments_ = collectComments;
+ current_ = begin_;
+ lastValueEnd_ = 0;
+ lastValue_ = 0;
+ commentsBefore_ = "";
+ errors_.clear();
+ while (!nodes_.empty())
+ nodes_.pop();
+ nodes_.push(&root);
+
+ stackDepth_ = 0;
+ bool successful = readValue();
+ Token token;
+ skipCommentTokens(token);
+ if (features_.failIfExtra_) {
+ if (token.type_ != tokenError && token.type_ != tokenEndOfStream) {
+ addError("Extra non-whitespace after JSON value.", token);
+ return false;
+ }
+ }
+ if (collectComments_ && !commentsBefore_.empty())
+ root.setComment(commentsBefore_, commentAfter);
+ if (features_.strictRoot_) {
+ if (!root.isArray() && !root.isObject()) {
+ // Set error location to start of doc, ideally should be first token found
+ // in doc
+ token.type_ = tokenError;
+ token.start_ = beginDoc;
+ token.end_ = endDoc;
+ addError(
+ "A valid JSON document must be either an array or an object value.",
+ token);
+ return false;
+ }
+ }
+ return successful;
+}
+
+bool OurReader::readValue() {
+ if (stackDepth_ >= features_.stackLimit_) throwRuntimeError("Exceeded stackLimit in readValue().");
+ ++stackDepth_;
+ Token token;
+ skipCommentTokens(token);
+ bool successful = true;
+
+ if (collectComments_ && !commentsBefore_.empty()) {
+ currentValue().setComment(commentsBefore_, commentBefore);
+ commentsBefore_ = "";
+ }
+
+ switch (token.type_) {
+ case tokenObjectBegin:
+ successful = readObject(token);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ case tokenArrayBegin:
+ successful = readArray(token);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ case tokenNumber:
+ successful = decodeNumber(token);
+ break;
+ case tokenString:
+ successful = decodeString(token);
+ break;
+ case tokenTrue:
+ {
+ Value v(true);
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenFalse:
+ {
+ Value v(false);
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenNull:
+ {
+ Value v;
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenArraySeparator:
+ case tokenObjectEnd:
+ case tokenArrayEnd:
+ if (features_.allowDroppedNullPlaceholders_) {
+ // "Un-read" the current token and mark the current value as a null
+ // token.
+ current_--;
+ Value v;
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(current_ - begin_ - 1);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ } // else, fall through ...
+ default:
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return addError("Syntax error: value, object or array expected.", token);
+ }
+
+ if (collectComments_) {
+ lastValueEnd_ = current_;
+ lastValue_ = &currentValue();
+ }
+
+ --stackDepth_;
+ return successful;
+}
+
+void OurReader::skipCommentTokens(Token& token) {
+ if (features_.allowComments_) {
+ do {
+ readToken(token);
+ } while (token.type_ == tokenComment);
+ } else {
+ readToken(token);
+ }
+}
+
+bool OurReader::readToken(Token& token) {
+ skipSpaces();
+ token.start_ = current_;
+ Char c = getNextChar();
+ bool ok = true;
+ switch (c) {
+ case '{':
+ token.type_ = tokenObjectBegin;
+ break;
+ case '}':
+ token.type_ = tokenObjectEnd;
+ break;
+ case '[':
+ token.type_ = tokenArrayBegin;
+ break;
+ case ']':
+ token.type_ = tokenArrayEnd;
+ break;
+ case '"':
+ token.type_ = tokenString;
+ ok = readString();
+ break;
+ case '\'':
+ if (features_.allowSingleQuotes_) {
+ token.type_ = tokenString;
+ ok = readStringSingleQuote();
+ break;
+ } // else continue
+ case '/':
+ token.type_ = tokenComment;
+ ok = readComment();
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '-':
+ token.type_ = tokenNumber;
+ readNumber();
+ break;
+ case 't':
+ token.type_ = tokenTrue;
+ ok = match("rue", 3);
+ break;
+ case 'f':
+ token.type_ = tokenFalse;
+ ok = match("alse", 4);
+ break;
+ case 'n':
+ token.type_ = tokenNull;
+ ok = match("ull", 3);
+ break;
+ case ',':
+ token.type_ = tokenArraySeparator;
+ break;
+ case ':':
+ token.type_ = tokenMemberSeparator;
+ break;
+ case 0:
+ token.type_ = tokenEndOfStream;
+ break;
+ default:
+ ok = false;
+ break;
+ }
+ if (!ok)
+ token.type_ = tokenError;
+ token.end_ = current_;
+ return true;
+}
+
+void OurReader::skipSpaces() {
+ while (current_ != end_) {
+ Char c = *current_;
+ if (c == ' ' || c == '\t' || c == '\r' || c == '\n')
+ ++current_;
+ else
+ break;
+ }
+}
+
+bool OurReader::match(Location pattern, int patternLength) {
+ if (end_ - current_ < patternLength)
+ return false;
+ int index = patternLength;
+ while (index--)
+ if (current_[index] != pattern[index])
+ return false;
+ current_ += patternLength;
+ return true;
+}
+
+bool OurReader::readComment() {
+ Location commentBegin = current_ - 1;
+ Char c = getNextChar();
+ bool successful = false;
+ if (c == '*')
+ successful = readCStyleComment();
+ else if (c == '/')
+ successful = readCppStyleComment();
+ if (!successful)
+ return false;
+
+ if (collectComments_) {
+ CommentPlacement placement = commentBefore;
+ if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) {
+ if (c != '*' || !containsNewLine(commentBegin, current_))
+ placement = commentAfterOnSameLine;
+ }
+
+ addComment(commentBegin, current_, placement);
+ }
+ return true;
+}
+
+void
+OurReader::addComment(Location begin, Location end, CommentPlacement placement) {
+ assert(collectComments_);
+ const std::string& normalized = normalizeEOL(begin, end);
+ if (placement == commentAfterOnSameLine) {
+ assert(lastValue_ != 0);
+ lastValue_->setComment(normalized, placement);
+ } else {
+ commentsBefore_ += normalized;
+ }
+}
+
+bool OurReader::readCStyleComment() {
+ while (current_ != end_) {
+ Char c = getNextChar();
+ if (c == '*' && *current_ == '/')
+ break;
+ }
+ return getNextChar() == '/';
+}
+
+bool OurReader::readCppStyleComment() {
+ while (current_ != end_) {
+ Char c = getNextChar();
+ if (c == '\n')
+ break;
+ if (c == '\r') {
+ // Consume DOS EOL. It will be normalized in addComment.
+ if (current_ != end_ && *current_ == '\n')
+ getNextChar();
+ // Break on Moc OS 9 EOL.
+ break;
+ }
+ }
+ return true;
+}
+
+void OurReader::readNumber() {
+ const char *p = current_;
+ char c = '0'; // stopgap for already consumed character
+ // integral part
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : 0;
+ // fractional part
+ if (c == '.') {
+ c = (current_ = p) < end_ ? *p++ : 0;
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : 0;
+ }
+ // exponential part
+ if (c == 'e' || c == 'E') {
+ c = (current_ = p) < end_ ? *p++ : 0;
+ if (c == '+' || c == '-')
+ c = (current_ = p) < end_ ? *p++ : 0;
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : 0;
+ }
+}
+bool OurReader::readString() {
+ Char c = 0;
+ while (current_ != end_) {
+ c = getNextChar();
+ if (c == '\\')
+ getNextChar();
+ else if (c == '"')
+ break;
+ }
+ return c == '"';
+}
+
+
+bool OurReader::readStringSingleQuote() {
+ Char c = 0;
+ while (current_ != end_) {
+ c = getNextChar();
+ if (c == '\\')
+ getNextChar();
+ else if (c == '\'')
+ break;
+ }
+ return c == '\'';
+}
+
+bool OurReader::readObject(Token& tokenStart) {
+ Token tokenName;
+ std::string name;
+ Value init(objectValue);
+ currentValue().swapPayload(init);
+ currentValue().setOffsetStart(tokenStart.start_ - begin_);
+ while (readToken(tokenName)) {
+ bool initialTokenOk = true;
+ while (tokenName.type_ == tokenComment && initialTokenOk)
+ initialTokenOk = readToken(tokenName);
+ if (!initialTokenOk)
+ break;
+ if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object
+ return true;
+ name = "";
+ if (tokenName.type_ == tokenString) {
+ if (!decodeString(tokenName, name))
+ return recoverFromError(tokenObjectEnd);
+ } else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) {
+ Value numberName;
+ if (!decodeNumber(tokenName, numberName))
+ return recoverFromError(tokenObjectEnd);
+ name = numberName.asString();
+ } else {
+ break;
+ }
+
+ Token colon;
+ if (!readToken(colon) || colon.type_ != tokenMemberSeparator) {
+ return addErrorAndRecover(
+ "Missing ':' after object member name", colon, tokenObjectEnd);
+ }
+ if (name.length() >= (1U<<30)) throwRuntimeError("keylength >= 2^30");
+ if (features_.rejectDupKeys_ && currentValue().isMember(name)) {
+ std::string msg = "Duplicate key: '" + name + "'";
+ return addErrorAndRecover(
+ msg, tokenName, tokenObjectEnd);
+ }
+ Value& value = currentValue()[name];
+ nodes_.push(&value);
+ bool ok = readValue();
+ nodes_.pop();
+ if (!ok) // error already set
+ return recoverFromError(tokenObjectEnd);
+
+ Token comma;
+ if (!readToken(comma) ||
+ (comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
+ comma.type_ != tokenComment)) {
+ return addErrorAndRecover(
+ "Missing ',' or '}' in object declaration", comma, tokenObjectEnd);
+ }
+ bool finalizeTokenOk = true;
+ while (comma.type_ == tokenComment && finalizeTokenOk)
+ finalizeTokenOk = readToken(comma);
+ if (comma.type_ == tokenObjectEnd)
+ return true;
+ }
+ return addErrorAndRecover(
+ "Missing '}' or object member name", tokenName, tokenObjectEnd);
+}
+
+bool OurReader::readArray(Token& tokenStart) {
+ Value init(arrayValue);
+ currentValue().swapPayload(init);
+ currentValue().setOffsetStart(tokenStart.start_ - begin_);
+ skipSpaces();
+ if (*current_ == ']') // empty array
+ {
+ Token endArray;
+ readToken(endArray);
+ return true;
+ }
+ int index = 0;
+ for (;;) {
+ Value& value = currentValue()[index++];
+ nodes_.push(&value);
+ bool ok = readValue();
+ nodes_.pop();
+ if (!ok) // error already set
+ return recoverFromError(tokenArrayEnd);
+
+ Token token;
+ // Accept Comment after last item in the array.
+ ok = readToken(token);
+ while (token.type_ == tokenComment && ok) {
+ ok = readToken(token);
+ }
+ bool badTokenType =
+ (token.type_ != tokenArraySeparator && token.type_ != tokenArrayEnd);
+ if (!ok || badTokenType) {
+ return addErrorAndRecover(
+ "Missing ',' or ']' in array declaration", token, tokenArrayEnd);
+ }
+ if (token.type_ == tokenArrayEnd)
+ break;
+ }
+ return true;
+}
+
+bool OurReader::decodeNumber(Token& token) {
+ Value decoded;
+ if (!decodeNumber(token, decoded))
+ return false;
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool OurReader::decodeNumber(Token& token, Value& decoded) {
+ // Attempts to parse the number as an integer. If the number is
+ // larger than the maximum supported value of an integer then
+ // we decode the number as a double.
+ Location current = token.start_;
+ bool isNegative = *current == '-';
+ if (isNegative)
+ ++current;
+ // TODO: Help the compiler do the div and mod at compile time or get rid of them.
+ Value::LargestUInt maxIntegerValue =
+ isNegative ? Value::LargestUInt(-Value::minLargestInt)
+ : Value::maxLargestUInt;
+ Value::LargestUInt threshold = maxIntegerValue / 10;
+ Value::LargestUInt value = 0;
+ while (current < token.end_) {
+ Char c = *current++;
+ if (c < '0' || c > '9')
+ return decodeDouble(token, decoded);
+ Value::UInt digit(c - '0');
+ if (value >= threshold) {
+ // We've hit or exceeded the max value divided by 10 (rounded down). If
+ // a) we've only just touched the limit, b) this is the last digit, and
+ // c) it's small enough to fit in that rounding delta, we're okay.
+ // Otherwise treat this number as a double to avoid overflow.
+ if (value > threshold || current != token.end_ ||
+ digit > maxIntegerValue % 10) {
+ return decodeDouble(token, decoded);
+ }
+ }
+ value = value * 10 + digit;
+ }
+ if (isNegative)
+ decoded = -Value::LargestInt(value);
+ else if (value <= Value::LargestUInt(Value::maxInt))
+ decoded = Value::LargestInt(value);
+ else
+ decoded = value;
+ return true;
+}
+
+bool OurReader::decodeDouble(Token& token) {
+ Value decoded;
+ if (!decodeDouble(token, decoded))
+ return false;
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool OurReader::decodeDouble(Token& token, Value& decoded) {
+ double value = 0;
const int bufferSize = 32;
int count;
int length = int(token.end_ - token.start_);
@@ -597,17 +1570,18 @@ bool Reader::decodeDouble(Token& token, Value& decoded) {
return true;
}
-bool Reader::decodeString(Token& token) {
- std::string decoded;
- if (!decodeString(token, decoded))
+bool OurReader::decodeString(Token& token) {
+ std::string decoded_string;
+ if (!decodeString(token, decoded_string))
return false;
- currentValue() = decoded;
+ Value decoded(decoded_string);
+ currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
}
-bool Reader::decodeString(Token& token, std::string& decoded) {
+bool OurReader::decodeString(Token& token, std::string& decoded) {
decoded.reserve(token.end_ - token.start_ - 2);
Location current = token.start_ + 1; // skip '"'
Location end = token.end_ - 1; // do not include '"'
@@ -660,7 +1634,7 @@ bool Reader::decodeString(Token& token, std::string& decoded) {
return true;
}
-bool Reader::decodeUnicodeCodePoint(Token& token,
+bool OurReader::decodeUnicodeCodePoint(Token& token,
Location& current,
Location end,
unsigned int& unicode) {
@@ -689,7 +1663,7 @@ bool Reader::decodeUnicodeCodePoint(Token& token,
return true;
}
-bool Reader::decodeUnicodeEscapeSequence(Token& token,
+bool OurReader::decodeUnicodeEscapeSequence(Token& token,
Location& current,
Location end,
unsigned int& unicode) {
@@ -718,7 +1692,7 @@ bool Reader::decodeUnicodeEscapeSequence(Token& token,
}
bool
-Reader::addError(const std::string& message, Token& token, Location extra) {
+OurReader::addError(const std::string& message, Token& token, Location extra) {
ErrorInfo info;
info.token_ = token;
info.message_ = message;
@@ -727,7 +1701,7 @@ Reader::addError(const std::string& message, Token& token, Location extra) {
return false;
}
-bool Reader::recoverFromError(TokenType skipUntilToken) {
+bool OurReader::recoverFromError(TokenType skipUntilToken) {
int errorCount = int(errors_.size());
Token skip;
for (;;) {
@@ -740,22 +1714,22 @@ bool Reader::recoverFromError(TokenType skipUntilToken) {
return false;
}
-bool Reader::addErrorAndRecover(const std::string& message,
+bool OurReader::addErrorAndRecover(const std::string& message,
Token& token,
TokenType skipUntilToken) {
addError(message, token);
return recoverFromError(skipUntilToken);
}
-Value& Reader::currentValue() { return *(nodes_.top()); }
+Value& OurReader::currentValue() { return *(nodes_.top()); }
-Reader::Char Reader::getNextChar() {
+OurReader::Char OurReader::getNextChar() {
if (current_ == end_)
return 0;
return *current_++;
}
-void Reader::getLocationLineAndColumn(Location location,
+void OurReader::getLocationLineAndColumn(Location location,
int& line,
int& column) const {
Location current = begin_;
@@ -778,7 +1752,7 @@ void Reader::getLocationLineAndColumn(Location location,
++line;
}
-std::string Reader::getLocationLineAndColumn(Location location) const {
+std::string OurReader::getLocationLineAndColumn(Location location) const {
int line, column;
getLocationLineAndColumn(location, line, column);
char buffer[18 + 16 + 16 + 1];
@@ -794,12 +1768,7 @@ std::string Reader::getLocationLineAndColumn(Location location) const {
return buffer;
}
-// Deprecated. Preserved for backward compatibility
-std::string Reader::getFormatedErrorMessages() const {
- return getFormattedErrorMessages();
-}
-
-std::string Reader::getFormattedErrorMessages() const {
+std::string OurReader::getFormattedErrorMessages() const {
std::string formattedMessage;
for (Errors::const_iterator itError = errors_.begin();
itError != errors_.end();
@@ -815,13 +1784,13 @@ std::string Reader::getFormattedErrorMessages() const {
return formattedMessage;
}
-std::vector<Reader::StructuredError> Reader::getStructuredErrors() const {
- std::vector<Reader::StructuredError> allErrors;
+std::vector<OurReader::StructuredError> OurReader::getStructuredErrors() const {
+ std::vector<OurReader::StructuredError> allErrors;
for (Errors::const_iterator itError = errors_.begin();
itError != errors_.end();
++itError) {
const ErrorInfo& error = *itError;
- Reader::StructuredError structured;
+ OurReader::StructuredError structured;
structured.offset_start = error.token_.start_ - begin_;
structured.offset_limit = error.token_.end_ - begin_;
structured.message = error.message_;
@@ -830,7 +1799,7 @@ std::vector<Reader::StructuredError> Reader::getStructuredErrors() const {
return allErrors;
}
-bool Reader::pushError(const Value& value, const std::string& message) {
+bool OurReader::pushError(const Value& value, const std::string& message) {
size_t length = end_ - begin_;
if(value.getOffsetStart() > length
|| value.getOffsetLimit() > length)
@@ -847,7 +1816,7 @@ bool Reader::pushError(const Value& value, const std::string& message) {
return true;
}
-bool Reader::pushError(const Value& value, const std::string& message, const Value& extra) {
+bool OurReader::pushError(const Value& value, const std::string& message, const Value& extra) {
size_t length = end_ - begin_;
if(value.getOffsetStart() > length
|| value.getOffsetLimit() > length
@@ -865,19 +1834,142 @@ bool Reader::pushError(const Value& value, const std::string& message, const Val
return true;
}
-bool Reader::good() const {
+bool OurReader::good() const {
return !errors_.size();
}
+
+class OurCharReader : public CharReader {
+ bool const collectComments_;
+ OurReader reader_;
+public:
+ OurCharReader(
+ bool collectComments,
+ OurFeatures const& features)
+ : collectComments_(collectComments)
+ , reader_(features)
+ {}
+ virtual bool parse(
+ char const* beginDoc, char const* endDoc,
+ Value* root, std::string* errs) {
+ bool ok = reader_.parse(beginDoc, endDoc, *root, collectComments_);
+ if (errs) {
+ *errs = reader_.getFormattedErrorMessages();
+ }
+ return ok;
+ }
+};
+
+CharReaderBuilder::CharReaderBuilder()
+{
+ setDefaults(&settings_);
+}
+CharReaderBuilder::~CharReaderBuilder()
+{}
+CharReader* CharReaderBuilder::newCharReader() const
+{
+ bool collectComments = settings_["collectComments"].asBool();
+ OurFeatures features = OurFeatures::all();
+ features.allowComments_ = settings_["allowComments"].asBool();
+ features.strictRoot_ = settings_["strictRoot"].asBool();
+ features.allowDroppedNullPlaceholders_ = settings_["allowDroppedNullPlaceholders"].asBool();
+ features.allowNumericKeys_ = settings_["allowNumericKeys"].asBool();
+ features.allowSingleQuotes_ = settings_["allowSingleQuotes"].asBool();
+ features.stackLimit_ = settings_["stackLimit"].asInt();
+ features.failIfExtra_ = settings_["failIfExtra"].asBool();
+ features.rejectDupKeys_ = settings_["rejectDupKeys"].asBool();
+ return new OurCharReader(collectComments, features);
+}
+static void getValidReaderKeys(std::set<std::string>* valid_keys)
+{
+ valid_keys->clear();
+ valid_keys->insert("collectComments");
+ valid_keys->insert("allowComments");
+ valid_keys->insert("strictRoot");
+ valid_keys->insert("allowDroppedNullPlaceholders");
+ valid_keys->insert("allowNumericKeys");
+ valid_keys->insert("allowSingleQuotes");
+ valid_keys->insert("stackLimit");
+ valid_keys->insert("failIfExtra");
+ valid_keys->insert("rejectDupKeys");
+}
+bool CharReaderBuilder::validate(Json::Value* invalid) const
+{
+ Json::Value my_invalid;
+ if (!invalid) invalid = &my_invalid; // so we do not need to test for NULL
+ Json::Value& inv = *invalid;
+ std::set<std::string> valid_keys;
+ getValidReaderKeys(&valid_keys);
+ Value::Members keys = settings_.getMemberNames();
+ size_t n = keys.size();
+ for (size_t i = 0; i < n; ++i) {
+ std::string const& key = keys[i];
+ if (valid_keys.find(key) == valid_keys.end()) {
+ inv[key] = settings_[key];
+ }
+ }
+ return 0u == inv.size();
+}
+Value& CharReaderBuilder::operator[](std::string key)
+{
+ return settings_[key];
+}
+// static
+void CharReaderBuilder::strictMode(Json::Value* settings)
+{
+//! [CharReaderBuilderStrictMode]
+ (*settings)["allowComments"] = false;
+ (*settings)["strictRoot"] = true;
+ (*settings)["allowDroppedNullPlaceholders"] = false;
+ (*settings)["allowNumericKeys"] = false;
+ (*settings)["allowSingleQuotes"] = false;
+ (*settings)["failIfExtra"] = true;
+ (*settings)["rejectDupKeys"] = true;
+//! [CharReaderBuilderStrictMode]
+}
+// static
+void CharReaderBuilder::setDefaults(Json::Value* settings)
+{
+//! [CharReaderBuilderDefaults]
+ (*settings)["collectComments"] = true;
+ (*settings)["allowComments"] = true;
+ (*settings)["strictRoot"] = false;
+ (*settings)["allowDroppedNullPlaceholders"] = false;
+ (*settings)["allowNumericKeys"] = false;
+ (*settings)["allowSingleQuotes"] = false;
+ (*settings)["stackLimit"] = 1000;
+ (*settings)["failIfExtra"] = false;
+ (*settings)["rejectDupKeys"] = false;
+//! [CharReaderBuilderDefaults]
+}
+
+//////////////////////////////////
+// global functions
+
+bool parseFromStream(
+ CharReader::Factory const& fact, std::istream& sin,
+ Value* root, std::string* errs)
+{
+ std::ostringstream ssin;
+ ssin << sin.rdbuf();
+ std::string doc = ssin.str();
+ char const* begin = doc.data();
+ char const* end = begin + doc.size();
+ // Note that we do not actually need a null-terminator.
+ CharReaderPtr const reader(fact.newCharReader());
+ return reader->parse(begin, end, root, errs);
+}
+
std::istream& operator>>(std::istream& sin, Value& root) {
- Json::Reader reader;
- bool ok = reader.parse(sin, root, true);
+ CharReaderBuilder b;
+ std::string errs;
+ bool ok = parseFromStream(b, sin, &root, &errs);
if (!ok) {
fprintf(stderr,
"Error from reader: %s",
- reader.getFormattedErrorMessages().c_str());
+ errs.c_str());
- JSON_FAIL_MESSAGE("reader error");
+ throwRuntimeError("reader error");
}
return sin;
}
diff --git a/3rdparty/jsoncpp/src/lib_json/json_value.cpp b/3rdparty/jsoncpp/src/lib_json/json_value.cpp
index b73deac1725..cc7d4d3d8ba 100644
--- a/3rdparty/jsoncpp/src/lib_json/json_value.cpp
+++ b/3rdparty/jsoncpp/src/lib_json/json_value.cpp
@@ -7,9 +7,6 @@
#include <json/assertions.h>
#include <json/value.h>
#include <json/writer.h>
-#ifndef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-#include "json_batchallocator.h"
-#endif // #ifndef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
#endif // if !defined(JSON_IS_AMALGAMATION)
#include <math.h>
#include <sstream>
@@ -20,6 +17,7 @@
#include <cpptl/conststring.h>
#endif
#include <cstddef> // size_t
+#include <algorithm> // min()
#define JSON_ASSERT_UNREACHABLE assert(false)
@@ -36,6 +34,7 @@ namespace Json {
static const unsigned char ALIGNAS(8) kNull[sizeof(Value)] = { 0 };
const unsigned char& kNullRef = kNull[0];
const Value& Value::null = reinterpret_cast<const Value&>(kNullRef);
+const Value& Value::nullRef = null;
const Int Value::minInt = Int(~(UInt(-1) / 2));
const Int Value::maxInt = Int(UInt(-1) / 2);
@@ -53,9 +52,6 @@ const LargestInt Value::minLargestInt = LargestInt(~(LargestUInt(-1) / 2));
const LargestInt Value::maxLargestInt = LargestInt(LargestUInt(-1) / 2);
const LargestUInt Value::maxLargestUInt = LargestUInt(-1);
-/// Unknown size marker
-static const unsigned int unknown = (unsigned)-1;
-
#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
template <typename T, typename U>
static inline bool InRange(double d, T min, U max) {
@@ -84,25 +80,59 @@ static inline bool InRange(double d, T min, U max) {
* @return Pointer on the duplicate instance of string.
*/
static inline char* duplicateStringValue(const char* value,
- unsigned int length = unknown) {
- if (length == unknown)
- length = (unsigned int)strlen(value);
-
+ size_t length) {
// Avoid an integer overflow in the call to malloc below by limiting length
// to a sane value.
- if (length >= (unsigned)Value::maxInt)
+ if (length >= (size_t)Value::maxInt)
length = Value::maxInt - 1;
char* newString = static_cast<char*>(malloc(length + 1));
- JSON_ASSERT_MESSAGE(newString != 0,
- "in Json::Value::duplicateStringValue(): "
- "Failed to allocate string value buffer");
+ if (newString == NULL) {
+ throwRuntimeError(
+ "in Json::Value::duplicateStringValue(): "
+ "Failed to allocate string value buffer");
+ }
memcpy(newString, value, length);
newString[length] = 0;
return newString;
}
-/** Free the string duplicated by duplicateStringValue().
+/* Record the length as a prefix.
+ */
+static inline char* duplicateAndPrefixStringValue(
+ const char* value,
+ unsigned int length)
+{
+ // Avoid an integer overflow in the call to malloc below by limiting length
+ // to a sane value.
+ JSON_ASSERT_MESSAGE(length <= (unsigned)Value::maxInt - sizeof(unsigned) - 1U,
+ "in Json::Value::duplicateAndPrefixStringValue(): "
+ "length too big for prefixing");
+ unsigned actualLength = length + sizeof(unsigned) + 1U;
+ char* newString = static_cast<char*>(malloc(actualLength));
+ if (newString == 0) {
+ throwRuntimeError(
+ "in Json::Value::duplicateAndPrefixStringValue(): "
+ "Failed to allocate string value buffer");
+ }
+ *reinterpret_cast<unsigned*>(newString) = length;
+ memcpy(newString + sizeof(unsigned), value, length);
+ newString[actualLength - 1U] = 0; // to avoid buffer over-run accidents by users later
+ return newString;
+}
+inline static void decodePrefixedString(
+ bool isPrefixed, char const* prefixed,
+ unsigned* length, char const** value)
+{
+ if (!isPrefixed) {
+ *length = static_cast<unsigned>(strlen(prefixed));
+ *value = prefixed;
+ } else {
+ *length = *reinterpret_cast<unsigned const*>(prefixed);
+ *value = prefixed + sizeof(unsigned);
+ }
+}
+/** Free the string duplicated by duplicateStringValue()/duplicateAndPrefixStringValue().
*/
static inline void releaseStringValue(char* value) { free(value); }
@@ -116,16 +146,53 @@ static inline void releaseStringValue(char* value) { free(value); }
// //////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////
#if !defined(JSON_IS_AMALGAMATION)
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-#include "json_internalarray.inl"
-#include "json_internalmap.inl"
-#endif // JSON_VALUE_USE_INTERNAL_MAP
#include "json_valueiterator.inl"
#endif // if !defined(JSON_IS_AMALGAMATION)
namespace Json {
+class JSON_API Exception : public std::exception {
+public:
+ Exception(std::string const& msg);
+ virtual ~Exception() throw();
+ virtual char const* what() const throw();
+protected:
+ std::string const msg_;
+};
+class JSON_API RuntimeError : public Exception {
+public:
+ RuntimeError(std::string const& msg);
+};
+class JSON_API LogicError : public Exception {
+public:
+ LogicError(std::string const& msg);
+};
+
+Exception::Exception(std::string const& msg)
+ : msg_(msg)
+{}
+Exception::~Exception() throw()
+{}
+char const* Exception::what() const throw()
+{
+ return msg_.c_str();
+}
+RuntimeError::RuntimeError(std::string const& msg)
+ : Exception(msg)
+{}
+LogicError::LogicError(std::string const& msg)
+ : Exception(msg)
+{}
+void throwRuntimeError(std::string const& msg)
+{
+ throw RuntimeError(msg);
+}
+void throwLogicError(std::string const& msg)
+{
+ throw LogicError(msg);
+}
+
// //////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////
@@ -141,15 +208,17 @@ Value::CommentInfo::~CommentInfo() {
releaseStringValue(comment_);
}
-void Value::CommentInfo::setComment(const char* text) {
- if (comment_)
+void Value::CommentInfo::setComment(const char* text, size_t len) {
+ if (comment_) {
releaseStringValue(comment_);
+ comment_ = 0;
+ }
JSON_ASSERT(text != 0);
JSON_ASSERT_MESSAGE(
text[0] == '\0' || text[0] == '/',
"in Json::Value::setComment(): Comments must start with /");
// It seems that /**/ style comments are acceptable as well.
- comment_ = duplicateStringValue(text);
+ comment_ = duplicateStringValue(text, len);
}
// //////////////////////////////////////////////////////////////////
@@ -159,28 +228,34 @@ void Value::CommentInfo::setComment(const char* text) {
// //////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-// Notes: index_ indicates if the string was allocated when
+// Notes: policy_ indicates if the string was allocated when
// a string is stored.
Value::CZString::CZString(ArrayIndex index) : cstr_(0), index_(index) {}
-Value::CZString::CZString(const char* cstr, DuplicationPolicy allocate)
- : cstr_(allocate == duplicate ? duplicateStringValue(cstr) : cstr),
- index_(allocate) {}
+Value::CZString::CZString(char const* str, unsigned length, DuplicationPolicy allocate)
+ : cstr_(str)
+{
+ // allocate != duplicate
+ storage_.policy_ = allocate;
+ storage_.length_ = length;
+}
Value::CZString::CZString(const CZString& other)
- : cstr_(other.index_ != noDuplication && other.cstr_ != 0
- ? duplicateStringValue(other.cstr_)
- : other.cstr_),
- index_(other.cstr_
- ? static_cast<ArrayIndex>(other.index_ == noDuplication
+ : cstr_(other.storage_.policy_ != noDuplication && other.cstr_ != 0
+ ? duplicateStringValue(other.cstr_, other.storage_.length_)
+ : other.cstr_)
+{
+ storage_.policy_ = (other.cstr_
+ ? (other.storage_.policy_ == noDuplication
? noDuplication : duplicate)
- : other.index_) {}
+ : other.storage_.policy_);
+ storage_.length_ = other.storage_.length_;
+}
Value::CZString::~CZString() {
- if (cstr_ && index_ == duplicate)
+ if (cstr_ && storage_.policy_ == duplicate)
releaseStringValue(const_cast<char*>(cstr_));
}
@@ -195,24 +270,35 @@ Value::CZString& Value::CZString::operator=(CZString other) {
}
bool Value::CZString::operator<(const CZString& other) const {
- if (cstr_)
- return strcmp(cstr_, other.cstr_) < 0;
- return index_ < other.index_;
+ if (!cstr_) return index_ < other.index_;
+ //return strcmp(cstr_, other.cstr_) < 0;
+ // Assume both are strings.
+ unsigned this_len = this->storage_.length_;
+ unsigned other_len = other.storage_.length_;
+ unsigned min_len = std::min(this_len, other_len);
+ int comp = memcmp(this->cstr_, other.cstr_, min_len);
+ if (comp < 0) return true;
+ if (comp > 0) return false;
+ return (this_len < other_len);
}
bool Value::CZString::operator==(const CZString& other) const {
- if (cstr_)
- return strcmp(cstr_, other.cstr_) == 0;
- return index_ == other.index_;
+ if (!cstr_) return index_ == other.index_;
+ //return strcmp(cstr_, other.cstr_) == 0;
+ // Assume both are strings.
+ unsigned this_len = this->storage_.length_;
+ unsigned other_len = other.storage_.length_;
+ if (this_len != other_len) return false;
+ int comp = memcmp(this->cstr_, other.cstr_, this_len);
+ return comp == 0;
}
ArrayIndex Value::CZString::index() const { return index_; }
-const char* Value::CZString::c_str() const { return cstr_; }
-
-bool Value::CZString::isStaticString() const { return index_ == noDuplication; }
-
-#endif // ifndef JSON_VALUE_USE_INTERNAL_MAP
+//const char* Value::CZString::c_str() const { return cstr_; }
+const char* Value::CZString::data() const { return cstr_; }
+unsigned Value::CZString::length() const { return storage_.length_; }
+bool Value::CZString::isStaticString() const { return storage_.policy_ == noDuplication; }
// //////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////
@@ -241,19 +327,10 @@ Value::Value(ValueType type) {
case stringValue:
value_.string_ = 0;
break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
case arrayValue:
case objectValue:
value_.map_ = new ObjectValues();
break;
-#else
- case arrayValue:
- value_.array_ = arrayAllocator()->newArray();
- break;
- case objectValue:
- value_.map_ = mapAllocator()->newMap();
- break;
-#endif
case booleanValue:
value_.bool_ = false;
break;
@@ -289,19 +366,19 @@ Value::Value(double value) {
Value::Value(const char* value) {
initBasic(stringValue, true);
- value_.string_ = duplicateStringValue(value);
+ value_.string_ = duplicateAndPrefixStringValue(value, static_cast<unsigned>(strlen(value)));
}
Value::Value(const char* beginValue, const char* endValue) {
initBasic(stringValue, true);
value_.string_ =
- duplicateStringValue(beginValue, (unsigned int)(endValue - beginValue));
+ duplicateAndPrefixStringValue(beginValue, static_cast<unsigned>(endValue - beginValue));
}
Value::Value(const std::string& value) {
initBasic(stringValue, true);
value_.string_ =
- duplicateStringValue(value.c_str(), (unsigned int)value.length());
+ duplicateAndPrefixStringValue(value.data(), static_cast<unsigned>(value.length()));
}
Value::Value(const StaticString& value) {
@@ -312,7 +389,7 @@ Value::Value(const StaticString& value) {
#ifdef JSON_USE_CPPTL
Value::Value(const CppTL::ConstString& value) {
initBasic(stringValue, true);
- value_.string_ = duplicateStringValue(value, value.length());
+ value_.string_ = duplicateAndPrefixStringValue(value, static_cast<unsigned>(value.length()));
}
#endif
@@ -321,14 +398,11 @@ Value::Value(bool value) {
value_.bool_ = value;
}
-Value::Value(const Value& other)
+Value::Value(Value const& other)
: type_(other.type_), allocated_(false)
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
,
- itemIsUsed_(0)
-#endif
- ,
- comments_(0), start_(other.start_), limit_(other.limit_) {
+ comments_(0), start_(other.start_), limit_(other.limit_)
+{
switch (type_) {
case nullValue:
case intValue:
@@ -338,27 +412,22 @@ Value::Value(const Value& other)
value_ = other.value_;
break;
case stringValue:
- if (other.value_.string_) {
- value_.string_ = duplicateStringValue(other.value_.string_);
+ if (other.value_.string_ && other.allocated_) {
+ unsigned len;
+ char const* str;
+ decodePrefixedString(other.allocated_, other.value_.string_,
+ &len, &str);
+ value_.string_ = duplicateAndPrefixStringValue(str, len);
allocated_ = true;
} else {
- value_.string_ = 0;
+ value_.string_ = other.value_.string_;
allocated_ = false;
}
break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
case arrayValue:
case objectValue:
value_.map_ = new ObjectValues(*other.value_.map_);
break;
-#else
- case arrayValue:
- value_.array_ = arrayAllocator()->newArrayCopy(*other.value_.array_);
- break;
- case objectValue:
- value_.map_ = mapAllocator()->newMapCopy(*other.value_.map_);
- break;
-#endif
default:
JSON_ASSERT_UNREACHABLE;
}
@@ -367,7 +436,8 @@ Value::Value(const Value& other)
for (int comment = 0; comment < numberOfCommentPlacement; ++comment) {
const CommentInfo& otherComment = other.comments_[comment];
if (otherComment.comment_)
- comments_[comment].setComment(otherComment.comment_);
+ comments_[comment].setComment(
+ otherComment.comment_, strlen(otherComment.comment_));
}
}
}
@@ -384,19 +454,10 @@ Value::~Value() {
if (allocated_)
releaseStringValue(value_.string_);
break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
case arrayValue:
case objectValue:
delete value_.map_;
break;
-#else
- case arrayValue:
- arrayAllocator()->destructArray(value_.array_);
- break;
- case objectValue:
- mapAllocator()->destructMap(value_.map_);
- break;
-#endif
default:
JSON_ASSERT_UNREACHABLE;
}
@@ -410,7 +471,7 @@ Value& Value::operator=(Value other) {
return *this;
}
-void Value::swap(Value& other) {
+void Value::swapPayload(Value& other) {
ValueType temp = type_;
type_ = other.type_;
other.type_ = temp;
@@ -418,6 +479,11 @@ void Value::swap(Value& other) {
int temp2 = allocated_;
allocated_ = other.allocated_;
other.allocated_ = temp2;
+}
+
+void Value::swap(Value& other) {
+ swapPayload(other);
+ std::swap(comments_, other.comments_);
std::swap(start_, other.start_);
std::swap(limit_, other.limit_);
}
@@ -448,10 +514,23 @@ bool Value::operator<(const Value& other) const {
case booleanValue:
return value_.bool_ < other.value_.bool_;
case stringValue:
- return (value_.string_ == 0 && other.value_.string_) ||
- (other.value_.string_ && value_.string_ &&
- strcmp(value_.string_, other.value_.string_) < 0);
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
+ {
+ if ((value_.string_ == 0) || (other.value_.string_ == 0)) {
+ if (other.value_.string_) return true;
+ else return false;
+ }
+ unsigned this_len;
+ unsigned other_len;
+ char const* this_str;
+ char const* other_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ decodePrefixedString(other.allocated_, other.value_.string_, &other_len, &other_str);
+ unsigned min_len = std::min(this_len, other_len);
+ int comp = memcmp(this_str, other_str, min_len);
+ if (comp < 0) return true;
+ if (comp > 0) return false;
+ return (this_len < other_len);
+ }
case arrayValue:
case objectValue: {
int delta = int(value_.map_->size() - other.value_.map_->size());
@@ -459,12 +538,6 @@ bool Value::operator<(const Value& other) const {
return delta < 0;
return (*value_.map_) < (*other.value_.map_);
}
-#else
- case arrayValue:
- return value_.array_->compare(*(other.value_.array_)) < 0;
- case objectValue:
- return value_.map_->compare(*(other.value_.map_)) < 0;
-#endif
default:
JSON_ASSERT_UNREACHABLE;
}
@@ -497,20 +570,24 @@ bool Value::operator==(const Value& other) const {
case booleanValue:
return value_.bool_ == other.value_.bool_;
case stringValue:
- return (value_.string_ == other.value_.string_) ||
- (other.value_.string_ && value_.string_ &&
- strcmp(value_.string_, other.value_.string_) == 0);
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
+ {
+ if ((value_.string_ == 0) || (other.value_.string_ == 0)) {
+ return (value_.string_ == other.value_.string_);
+ }
+ unsigned this_len;
+ unsigned other_len;
+ char const* this_str;
+ char const* other_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ decodePrefixedString(other.allocated_, other.value_.string_, &other_len, &other_str);
+ if (this_len != other_len) return false;
+ int comp = memcmp(this_str, other_str, this_len);
+ return comp == 0;
+ }
case arrayValue:
case objectValue:
return value_.map_->size() == other.value_.map_->size() &&
(*value_.map_) == (*other.value_.map_);
-#else
- case arrayValue:
- return value_.array_->compare(*(other.value_.array_)) == 0;
- case objectValue:
- return value_.map_->compare(*(other.value_.map_)) == 0;
-#endif
default:
JSON_ASSERT_UNREACHABLE;
}
@@ -522,7 +599,20 @@ bool Value::operator!=(const Value& other) const { return !(*this == other); }
const char* Value::asCString() const {
JSON_ASSERT_MESSAGE(type_ == stringValue,
"in Json::Value::asCString(): requires stringValue");
- return value_.string_;
+ if (value_.string_ == 0) return 0;
+ unsigned this_len;
+ char const* this_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ return this_str;
+}
+
+bool Value::getString(char const** str, char const** end) const {
+ if (type_ != stringValue) return false;
+ if (value_.string_ == 0) return false;
+ unsigned length;
+ decodePrefixedString(this->allocated_, this->value_.string_, &length, str);
+ *end = *str + length;
+ return true;
}
std::string Value::asString() const {
@@ -530,7 +620,13 @@ std::string Value::asString() const {
case nullValue:
return "";
case stringValue:
- return value_.string_ ? value_.string_ : "";
+ {
+ if (value_.string_ == 0) return "";
+ unsigned this_len;
+ char const* this_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ return std::string(this_str, this_len);
+ }
case booleanValue:
return value_.bool_ ? "true" : "false";
case intValue:
@@ -546,7 +642,11 @@ std::string Value::asString() const {
#ifdef JSON_USE_CPPTL
CppTL::ConstString Value::asConstString() const {
- return CppTL::ConstString(asString().c_str());
+ unsigned len;
+ char const* str;
+ decodePrefixedString(allocated_, value_.string_,
+ &len, &str);
+ return CppTL::ConstString(str, len);
}
#endif
@@ -760,7 +860,6 @@ ArrayIndex Value::size() const {
case booleanValue:
case stringValue:
return 0;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
case arrayValue: // size of the array is highest index + 1
if (!value_.map_->empty()) {
ObjectValues::const_iterator itLast = value_.map_->end();
@@ -770,12 +869,6 @@ ArrayIndex Value::size() const {
return 0;
case objectValue:
return ArrayIndex(value_.map_->size());
-#else
- case arrayValue:
- return Int(value_.array_->size());
- case objectValue:
- return Int(value_.map_->size());
-#endif
}
JSON_ASSERT_UNREACHABLE;
return 0; // unreachable;
@@ -797,19 +890,10 @@ void Value::clear() {
start_ = 0;
limit_ = 0;
switch (type_) {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
- case arrayValue:
- case objectValue:
- value_.map_->clear();
- break;
-#else
case arrayValue:
- value_.array_->clear();
- break;
case objectValue:
value_.map_->clear();
break;
-#endif
default:
break;
}
@@ -820,7 +904,6 @@ void Value::resize(ArrayIndex newSize) {
"in Json::Value::resize(): requires arrayValue");
if (type_ == nullValue)
*this = Value(arrayValue);
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
ArrayIndex oldSize = size();
if (newSize == 0)
clear();
@@ -832,9 +915,6 @@ void Value::resize(ArrayIndex newSize) {
}
assert(size() == newSize);
}
-#else
- value_.array_->resize(newSize);
-#endif
}
Value& Value::operator[](ArrayIndex index) {
@@ -843,18 +923,14 @@ Value& Value::operator[](ArrayIndex index) {
"in Json::Value::operator[](ArrayIndex): requires arrayValue");
if (type_ == nullValue)
*this = Value(arrayValue);
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
CZString key(index);
ObjectValues::iterator it = value_.map_->lower_bound(key);
if (it != value_.map_->end() && (*it).first == key)
return (*it).second;
- ObjectValues::value_type defaultValue(key, null);
+ ObjectValues::value_type defaultValue(key, nullRef);
it = value_.map_->insert(it, defaultValue);
return (*it).second;
-#else
- return value_.array_->resolveReference(index);
-#endif
}
Value& Value::operator[](int index) {
@@ -869,17 +945,12 @@ const Value& Value::operator[](ArrayIndex index) const {
type_ == nullValue || type_ == arrayValue,
"in Json::Value::operator[](ArrayIndex)const: requires arrayValue");
if (type_ == nullValue)
- return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
+ return nullRef;
CZString key(index);
ObjectValues::const_iterator it = value_.map_->find(key);
if (it == value_.map_->end())
- return null;
+ return nullRef;
return (*it).second;
-#else
- Value* value = value_.array_->find(index);
- return value ? *value : null;
-#endif
}
const Value& Value::operator[](int index) const {
@@ -889,149 +960,211 @@ const Value& Value::operator[](int index) const {
return (*this)[ArrayIndex(index)];
}
-Value& Value::operator[](const char* key) {
- return resolveReference(key, false);
-}
-
void Value::initBasic(ValueType type, bool allocated) {
type_ = type;
allocated_ = allocated;
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- itemIsUsed_ = 0;
-#endif
comments_ = 0;
start_ = 0;
limit_ = 0;
}
-Value& Value::resolveReference(const char* key, bool isStatic) {
+// Access an object value by name, create a null member if it does not exist.
+// @pre Type of '*this' is object or null.
+// @param key is null-terminated.
+Value& Value::resolveReference(const char* key) {
JSON_ASSERT_MESSAGE(
type_ == nullValue || type_ == objectValue,
"in Json::Value::resolveReference(): requires objectValue");
if (type_ == nullValue)
*this = Value(objectValue);
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
CZString actualKey(
- key, isStatic ? CZString::noDuplication : CZString::duplicateOnCopy);
+ key, static_cast<unsigned>(strlen(key)), CZString::noDuplication); // NOTE!
ObjectValues::iterator it = value_.map_->lower_bound(actualKey);
if (it != value_.map_->end() && (*it).first == actualKey)
return (*it).second;
- ObjectValues::value_type defaultValue(actualKey, null);
+ ObjectValues::value_type defaultValue(actualKey, nullRef);
+ it = value_.map_->insert(it, defaultValue);
+ Value& value = (*it).second;
+ return value;
+}
+
+// @param key is not null-terminated.
+Value& Value::resolveReference(char const* key, char const* end)
+{
+ JSON_ASSERT_MESSAGE(
+ type_ == nullValue || type_ == objectValue,
+ "in Json::Value::resolveReference(key, end): requires objectValue");
+ if (type_ == nullValue)
+ *this = Value(objectValue);
+ CZString actualKey(
+ key, static_cast<unsigned>(end-key), CZString::duplicateOnCopy);
+ ObjectValues::iterator it = value_.map_->lower_bound(actualKey);
+ if (it != value_.map_->end() && (*it).first == actualKey)
+ return (*it).second;
+
+ ObjectValues::value_type defaultValue(actualKey, nullRef);
it = value_.map_->insert(it, defaultValue);
Value& value = (*it).second;
return value;
-#else
- return value_.map_->resolveReference(key, isStatic);
-#endif
}
Value Value::get(ArrayIndex index, const Value& defaultValue) const {
const Value* value = &((*this)[index]);
- return value == &null ? defaultValue : *value;
+ return value == &nullRef ? defaultValue : *value;
}
bool Value::isValidIndex(ArrayIndex index) const { return index < size(); }
-const Value& Value::operator[](const char* key) const {
+Value const* Value::find(char const* key, char const* end) const
+{
JSON_ASSERT_MESSAGE(
type_ == nullValue || type_ == objectValue,
- "in Json::Value::operator[](char const*)const: requires objectValue");
- if (type_ == nullValue)
- return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
- CZString actualKey(key, CZString::noDuplication);
+ "in Json::Value::find(key, end, found): requires objectValue or nullValue");
+ if (type_ == nullValue) return NULL;
+ CZString actualKey(key, static_cast<unsigned>(end-key), CZString::noDuplication);
ObjectValues::const_iterator it = value_.map_->find(actualKey);
- if (it == value_.map_->end())
- return null;
- return (*it).second;
-#else
- const Value* value = value_.map_->find(key);
- return value ? *value : null;
-#endif
+ if (it == value_.map_->end()) return NULL;
+ return &(*it).second;
+}
+const Value& Value::operator[](const char* key) const
+{
+ Value const* found = find(key, key + strlen(key));
+ if (!found) return nullRef;
+ return *found;
+}
+Value const& Value::operator[](std::string const& key) const
+{
+ Value const* found = find(key.data(), key.data() + key.length());
+ if (!found) return nullRef;
+ return *found;
}
-Value& Value::operator[](const std::string& key) {
- return (*this)[key.c_str()];
+Value& Value::operator[](const char* key) {
+ return resolveReference(key, key + strlen(key));
}
-const Value& Value::operator[](const std::string& key) const {
- return (*this)[key.c_str()];
+Value& Value::operator[](const std::string& key) {
+ return resolveReference(key.data(), key.data() + key.length());
}
Value& Value::operator[](const StaticString& key) {
- return resolveReference(key, true);
+ return resolveReference(key.c_str());
}
#ifdef JSON_USE_CPPTL
Value& Value::operator[](const CppTL::ConstString& key) {
- return (*this)[key.c_str()];
+ return resolveReference(key.c_str(), key.end_c_str());
}
-
-const Value& Value::operator[](const CppTL::ConstString& key) const {
- return (*this)[key.c_str()];
+Value const& Value::operator[](CppTL::ConstString const& key) const
+{
+ Value const* found = find(key.c_str(), key.end_c_str());
+ if (!found) return nullRef;
+ return *found;
}
#endif
Value& Value::append(const Value& value) { return (*this)[size()] = value; }
-Value Value::get(const char* key, const Value& defaultValue) const {
- const Value* value = &((*this)[key]);
- return value == &null ? defaultValue : *value;
+Value Value::get(char const* key, char const* end, Value const& defaultValue) const
+{
+ Value const* found = find(key, end);
+ return !found ? defaultValue : *found;
}
-
-Value Value::get(const std::string& key, const Value& defaultValue) const {
- return get(key.c_str(), defaultValue);
+Value Value::get(char const* key, Value const& defaultValue) const
+{
+ return get(key, key + strlen(key), defaultValue);
+}
+Value Value::get(std::string const& key, Value const& defaultValue) const
+{
+ return get(key.data(), key.data() + key.length(), defaultValue);
}
-Value Value::removeMember(const char* key) {
- JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == objectValue,
- "in Json::Value::removeMember(): requires objectValue");
- if (type_ == nullValue)
- return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
- CZString actualKey(key, CZString::noDuplication);
+
+bool Value::removeMember(const char* key, const char* end, Value* removed)
+{
+ if (type_ != objectValue) {
+ return false;
+ }
+ CZString actualKey(key, static_cast<unsigned>(end-key), CZString::noDuplication);
ObjectValues::iterator it = value_.map_->find(actualKey);
if (it == value_.map_->end())
- return null;
- Value old(it->second);
+ return false;
+ *removed = it->second;
value_.map_->erase(it);
- return old;
-#else
- Value* value = value_.map_->find(key);
- if (value) {
- Value old(*value);
- value_.map_.remove(key);
- return old;
- } else {
- return null;
- }
-#endif
+ return true;
+}
+bool Value::removeMember(const char* key, Value* removed)
+{
+ return removeMember(key, key + strlen(key), removed);
+}
+bool Value::removeMember(std::string const& key, Value* removed)
+{
+ return removeMember(key.data(), key.data() + key.length(), removed);
}
+Value Value::removeMember(const char* key)
+{
+ JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == objectValue,
+ "in Json::Value::removeMember(): requires objectValue");
+ if (type_ == nullValue)
+ return nullRef;
-Value Value::removeMember(const std::string& key) {
+ Value removed; // null
+ removeMember(key, key + strlen(key), &removed);
+ return removed; // still null if removeMember() did nothing
+}
+Value Value::removeMember(const std::string& key)
+{
return removeMember(key.c_str());
}
+bool Value::removeIndex(ArrayIndex index, Value* removed) {
+ if (type_ != arrayValue) {
+ return false;
+ }
+ CZString key(index);
+ ObjectValues::iterator it = value_.map_->find(key);
+ if (it == value_.map_->end()) {
+ return false;
+ }
+ *removed = it->second;
+ ArrayIndex oldSize = size();
+ // shift left all items left, into the place of the "removed"
+ for (ArrayIndex i = index; i < (oldSize - 1); ++i){
+ CZString key(i);
+ (*value_.map_)[key] = (*this)[i + 1];
+ }
+ // erase the last one ("leftover")
+ CZString keyLast(oldSize - 1);
+ ObjectValues::iterator itLast = value_.map_->find(keyLast);
+ value_.map_->erase(itLast);
+ return true;
+}
+
#ifdef JSON_USE_CPPTL
Value Value::get(const CppTL::ConstString& key,
const Value& defaultValue) const {
- return get(key.c_str(), defaultValue);
+ return get(key.c_str(), key.end_c_str(), defaultValue);
}
#endif
-bool Value::isMember(const char* key) const {
- const Value* value = &((*this)[key]);
- return value != &null;
+bool Value::isMember(char const* key, char const* end) const
+{
+ Value const* value = find(key, end);
+ return NULL != value;
}
-
-bool Value::isMember(const std::string& key) const {
- return isMember(key.c_str());
+bool Value::isMember(char const* key) const
+{
+ return isMember(key, key + strlen(key));
+}
+bool Value::isMember(std::string const& key) const
+{
+ return isMember(key.data(), key.data() + key.length());
}
#ifdef JSON_USE_CPPTL
bool Value::isMember(const CppTL::ConstString& key) const {
- return isMember(key.c_str());
+ return isMember(key.c_str(), key.end_c_str());
}
#endif
@@ -1043,19 +1176,12 @@ Value::Members Value::getMemberNames() const {
return Value::Members();
Members members;
members.reserve(value_.map_->size());
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
ObjectValues::const_iterator it = value_.map_->begin();
ObjectValues::const_iterator itEnd = value_.map_->end();
- for (; it != itEnd; ++it)
- members.push_back(std::string((*it).first.c_str()));
-#else
- ValueInternalMap::IteratorState it;
- ValueInternalMap::IteratorState itEnd;
- value_.map_->makeBeginIterator(it);
- value_.map_->makeEndIterator(itEnd);
- for (; !ValueInternalMap::equals(it, itEnd); ValueInternalMap::increment(it))
- members.push_back(std::string(ValueInternalMap::key(it)));
-#endif
+ for (; it != itEnd; ++it) {
+ members.push_back(std::string((*it).first.data(),
+ (*it).first.length()));
+ }
return members;
}
//
@@ -1181,14 +1307,22 @@ bool Value::isArray() const { return type_ == arrayValue; }
bool Value::isObject() const { return type_ == objectValue; }
-void Value::setComment(const char* comment, CommentPlacement placement) {
+void Value::setComment(const char* comment, size_t len, CommentPlacement placement) {
if (!comments_)
comments_ = new CommentInfo[numberOfCommentPlacement];
- comments_[placement].setComment(comment);
+ if ((len > 0) && (comment[len-1] == '\n')) {
+ // Always discard trailing newline, to aid indentation.
+ len -= 1;
+ }
+ comments_[placement].setComment(comment, len);
+}
+
+void Value::setComment(const char* comment, CommentPlacement placement) {
+ setComment(comment, strlen(comment), placement);
}
void Value::setComment(const std::string& comment, CommentPlacement placement) {
- setComment(comment.c_str(), placement);
+ setComment(comment.c_str(), comment.length(), placement);
}
bool Value::hasComment(CommentPlacement placement) const {
@@ -1216,28 +1350,11 @@ std::string Value::toStyledString() const {
Value::const_iterator Value::begin() const {
switch (type_) {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- case arrayValue:
- if (value_.array_) {
- ValueInternalArray::IteratorState it;
- value_.array_->makeBeginIterator(it);
- return const_iterator(it);
- }
- break;
- case objectValue:
- if (value_.map_) {
- ValueInternalMap::IteratorState it;
- value_.map_->makeBeginIterator(it);
- return const_iterator(it);
- }
- break;
-#else
case arrayValue:
case objectValue:
if (value_.map_)
return const_iterator(value_.map_->begin());
break;
-#endif
default:
break;
}
@@ -1246,28 +1363,11 @@ Value::const_iterator Value::begin() const {
Value::const_iterator Value::end() const {
switch (type_) {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- case arrayValue:
- if (value_.array_) {
- ValueInternalArray::IteratorState it;
- value_.array_->makeEndIterator(it);
- return const_iterator(it);
- }
- break;
- case objectValue:
- if (value_.map_) {
- ValueInternalMap::IteratorState it;
- value_.map_->makeEndIterator(it);
- return const_iterator(it);
- }
- break;
-#else
case arrayValue:
case objectValue:
if (value_.map_)
return const_iterator(value_.map_->end());
break;
-#endif
default:
break;
}
@@ -1276,28 +1376,11 @@ Value::const_iterator Value::end() const {
Value::iterator Value::begin() {
switch (type_) {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- case arrayValue:
- if (value_.array_) {
- ValueInternalArray::IteratorState it;
- value_.array_->makeBeginIterator(it);
- return iterator(it);
- }
- break;
- case objectValue:
- if (value_.map_) {
- ValueInternalMap::IteratorState it;
- value_.map_->makeBeginIterator(it);
- return iterator(it);
- }
- break;
-#else
case arrayValue:
case objectValue:
if (value_.map_)
return iterator(value_.map_->begin());
break;
-#endif
default:
break;
}
@@ -1306,28 +1389,11 @@ Value::iterator Value::begin() {
Value::iterator Value::end() {
switch (type_) {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
- case arrayValue:
- if (value_.array_) {
- ValueInternalArray::IteratorState it;
- value_.array_->makeEndIterator(it);
- return iterator(it);
- }
- break;
- case objectValue:
- if (value_.map_) {
- ValueInternalMap::IteratorState it;
- value_.map_->makeEndIterator(it);
- return iterator(it);
- }
- break;
-#else
case arrayValue:
case objectValue:
if (value_.map_)
return iterator(value_.map_->end());
break;
-#endif
default:
break;
}
@@ -1428,7 +1494,7 @@ const Value& Path::resolve(const Value& root) const {
// Error: unable to resolve path (object value expected at position...)
}
node = &((*node)[arg.key_]);
- if (node == &Value::null) {
+ if (node == &Value::nullRef) {
// Error: unable to resolve path (object has no member named '' at
// position...)
}
@@ -1449,7 +1515,7 @@ Value Path::resolve(const Value& root, const Value& defaultValue) const {
if (!node->isObject())
return defaultValue;
node = &((*node)[arg.key_]);
- if (node == &Value::null)
+ if (node == &Value::nullRef)
return defaultValue;
}
}
diff --git a/3rdparty/jsoncpp/src/lib_json/json_valueiterator.inl b/3rdparty/jsoncpp/src/lib_json/json_valueiterator.inl
index a9f7df63a7c..d01d3c018c9 100644
--- a/3rdparty/jsoncpp/src/lib_json/json_valueiterator.inl
+++ b/3rdparty/jsoncpp/src/lib_json/json_valueiterator.inl
@@ -16,68 +16,29 @@ namespace Json {
// //////////////////////////////////////////////////////////////////
ValueIteratorBase::ValueIteratorBase()
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
: current_(), isNull_(true) {
}
-#else
- : isArray_(true), isNull_(true) {
- iterator_.array_ = ValueInternalArray::IteratorState();
-}
-#endif
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
ValueIteratorBase::ValueIteratorBase(
const Value::ObjectValues::iterator& current)
: current_(current), isNull_(false) {}
-#else
-ValueIteratorBase::ValueIteratorBase(
- const ValueInternalArray::IteratorState& state)
- : isArray_(true) {
- iterator_.array_ = state;
-}
-
-ValueIteratorBase::ValueIteratorBase(
- const ValueInternalMap::IteratorState& state)
- : isArray_(false) {
- iterator_.map_ = state;
-}
-#endif
Value& ValueIteratorBase::deref() const {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
return current_->second;
-#else
- if (isArray_)
- return ValueInternalArray::dereference(iterator_.array_);
- return ValueInternalMap::value(iterator_.map_);
-#endif
}
void ValueIteratorBase::increment() {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
++current_;
-#else
- if (isArray_)
- ValueInternalArray::increment(iterator_.array_);
- ValueInternalMap::increment(iterator_.map_);
-#endif
}
void ValueIteratorBase::decrement() {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
--current_;
-#else
- if (isArray_)
- ValueInternalArray::decrement(iterator_.array_);
- ValueInternalMap::decrement(iterator_.map_);
-#endif
}
ValueIteratorBase::difference_type
ValueIteratorBase::computeDistance(const SelfType& other) const {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
#ifdef JSON_USE_CPPTL_SMALLMAP
- return current_ - other.current_;
+ return other.current_ - current_;
#else
// Iterator for null value are initialized using the default
// constructor, which initialize current_ to the default
@@ -100,80 +61,58 @@ ValueIteratorBase::computeDistance(const SelfType& other) const {
}
return myDistance;
#endif
-#else
- if (isArray_)
- return ValueInternalArray::distance(iterator_.array_,
- other.iterator_.array_);
- return ValueInternalMap::distance(iterator_.map_, other.iterator_.map_);
-#endif
}
bool ValueIteratorBase::isEqual(const SelfType& other) const {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
if (isNull_) {
return other.isNull_;
}
return current_ == other.current_;
-#else
- if (isArray_)
- return ValueInternalArray::equals(iterator_.array_, other.iterator_.array_);
- return ValueInternalMap::equals(iterator_.map_, other.iterator_.map_);
-#endif
}
void ValueIteratorBase::copy(const SelfType& other) {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
current_ = other.current_;
isNull_ = other.isNull_;
-#else
- if (isArray_)
- iterator_.array_ = other.iterator_.array_;
- iterator_.map_ = other.iterator_.map_;
-#endif
}
Value ValueIteratorBase::key() const {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
const Value::CZString czstring = (*current_).first;
- if (czstring.c_str()) {
+ if (czstring.data()) {
if (czstring.isStaticString())
- return Value(StaticString(czstring.c_str()));
- return Value(czstring.c_str());
+ return Value(StaticString(czstring.data()));
+ return Value(czstring.data(), czstring.data() + czstring.length());
}
return Value(czstring.index());
-#else
- if (isArray_)
- return Value(ValueInternalArray::indexOf(iterator_.array_));
- bool isStatic;
- const char* memberName = ValueInternalMap::key(iterator_.map_, isStatic);
- if (isStatic)
- return Value(StaticString(memberName));
- return Value(memberName);
-#endif
}
UInt ValueIteratorBase::index() const {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
const Value::CZString czstring = (*current_).first;
- if (!czstring.c_str())
+ if (!czstring.data())
return czstring.index();
return Value::UInt(-1);
-#else
- if (isArray_)
- return Value::UInt(ValueInternalArray::indexOf(iterator_.array_));
- return Value::UInt(-1);
-#endif
}
-const char* ValueIteratorBase::memberName() const {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
- const char* name = (*current_).first.c_str();
+std::string ValueIteratorBase::name() const {
+ char const* key;
+ char const* end;
+ key = memberName(&end);
+ if (!key) return std::string();
+ return std::string(key, end);
+}
+
+char const* ValueIteratorBase::memberName() const {
+ const char* name = (*current_).first.data();
return name ? name : "";
-#else
- if (!isArray_)
- return ValueInternalMap::key(iterator_.map_);
- return "";
-#endif
+}
+
+char const* ValueIteratorBase::memberName(char const** end) const {
+ const char* name = (*current_).first.data();
+ if (!name) {
+ *end = NULL;
+ return NULL;
+ }
+ *end = name + (*current_).first.length();
+ return name;
}
// //////////////////////////////////////////////////////////////////
@@ -186,19 +125,9 @@ const char* ValueIteratorBase::memberName() const {
ValueConstIterator::ValueConstIterator() {}
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
ValueConstIterator::ValueConstIterator(
const Value::ObjectValues::iterator& current)
: ValueIteratorBase(current) {}
-#else
-ValueConstIterator::ValueConstIterator(
- const ValueInternalArray::IteratorState& state)
- : ValueIteratorBase(state) {}
-
-ValueConstIterator::ValueConstIterator(
- const ValueInternalMap::IteratorState& state)
- : ValueIteratorBase(state) {}
-#endif
ValueConstIterator& ValueConstIterator::
operator=(const ValueIteratorBase& other) {
@@ -216,16 +145,8 @@ operator=(const ValueIteratorBase& other) {
ValueIterator::ValueIterator() {}
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
ValueIterator::ValueIterator(const Value::ObjectValues::iterator& current)
: ValueIteratorBase(current) {}
-#else
-ValueIterator::ValueIterator(const ValueInternalArray::IteratorState& state)
- : ValueIteratorBase(state) {}
-
-ValueIterator::ValueIterator(const ValueInternalMap::IteratorState& state)
- : ValueIteratorBase(state) {}
-#endif
ValueIterator::ValueIterator(const ValueConstIterator& other)
: ValueIteratorBase(other) {}
diff --git a/3rdparty/jsoncpp/src/lib_json/json_writer.cpp b/3rdparty/jsoncpp/src/lib_json/json_writer.cpp
index 5113c38be1d..2f940c8a43f 100644
--- a/3rdparty/jsoncpp/src/lib_json/json_writer.cpp
+++ b/3rdparty/jsoncpp/src/lib_json/json_writer.cpp
@@ -7,15 +7,35 @@
#include <json/writer.h>
#include "json_tool.h"
#endif // if !defined(JSON_IS_AMALGAMATION)
-#include <utility>
-#include <assert.h>
-#include <stdio.h>
-#include <string.h>
-#include <sstream>
#include <iomanip>
-#include <math.h>
+#include <memory>
+#include <sstream>
+#include <utility>
+#include <set>
+#include <cassert>
+#include <cstring>
+#include <cstdio>
+
+#if defined(_MSC_VER) && _MSC_VER >= 1200 && _MSC_VER < 1800 // Between VC++ 6.0 and VC++ 11.0
+#include <float.h>
+#define isfinite _finite
+#elif defined(__sun) && defined(__SVR4) //Solaris
+#include <ieeefp.h>
+#define isfinite finite
+#else
+#include <cmath>
+#define isfinite std::isfinite
+#endif
#if defined(_MSC_VER) && _MSC_VER < 1500 // VC++ 8.0 and below
+#define snprintf _snprintf
+#elif defined(__ANDROID__)
+#define snprintf snprintf
+#elif __cplusplus >= 201103L
+#define snprintf std::snprintf
+#endif
+
+#if defined(__BORLANDC__)
#include <float.h>
#define isfinite _finite
#define snprintf _snprintf
@@ -26,13 +46,14 @@
#pragma warning(disable : 4996)
#endif
-#if defined(__sun) && defined(__SVR4) //Solaris
-#include <ieeefp.h>
-#define isfinite finite
-#endif
-
namespace Json {
+#if __cplusplus >= 201103L
+typedef std::unique_ptr<StreamWriter> StreamWriterPtr;
+#else
+typedef std::auto_ptr<StreamWriter> StreamWriterPtr;
+#endif
+
static bool containsControlCharacter(const char* str) {
while (*str) {
if (isControlCharacter(*(str++)))
@@ -41,6 +62,16 @@ static bool containsControlCharacter(const char* str) {
return false;
}
+static bool containsControlCharacter0(const char* str, unsigned len) {
+ char const* end = str + len;
+ while (end != str) {
+ if (isControlCharacter(*str) || 0==*str)
+ return true;
+ ++str;
+ }
+ return false;
+}
+
std::string valueToString(LargestInt value) {
UIntToStringBuffer buffer;
char* current = buffer + sizeof(buffer);
@@ -175,6 +206,84 @@ std::string valueToQuotedString(const char* value) {
return result;
}
+// https://github.com/upcaste/upcaste/blob/master/src/upcore/src/cstring/strnpbrk.cpp
+static char const* strnpbrk(char const* s, char const* accept, size_t n) {
+ assert((s || !n) && accept);
+
+ char const* const end = s + n;
+ for (char const* cur = s; cur < end; ++cur) {
+ int const c = *cur;
+ for (char const* a = accept; *a; ++a) {
+ if (*a == c) {
+ return cur;
+ }
+ }
+ }
+ return NULL;
+}
+static std::string valueToQuotedStringN(const char* value, unsigned length) {
+ if (value == NULL)
+ return "";
+ // Not sure how to handle unicode...
+ if (strnpbrk(value, "\"\\\b\f\n\r\t", length) == NULL &&
+ !containsControlCharacter0(value, length))
+ return std::string("\"") + value + "\"";
+ // We have to walk value and escape any special characters.
+ // Appending to std::string is not efficient, but this should be rare.
+ // (Note: forward slashes are *not* rare, but I am not escaping them.)
+ std::string::size_type maxsize =
+ length * 2 + 3; // allescaped+quotes+NULL
+ std::string result;
+ result.reserve(maxsize); // to avoid lots of mallocs
+ result += "\"";
+ char const* end = value + length;
+ for (const char* c = value; c != end; ++c) {
+ switch (*c) {
+ case '\"':
+ result += "\\\"";
+ break;
+ case '\\':
+ result += "\\\\";
+ break;
+ case '\b':
+ result += "\\b";
+ break;
+ case '\f':
+ result += "\\f";
+ break;
+ case '\n':
+ result += "\\n";
+ break;
+ case '\r':
+ result += "\\r";
+ break;
+ case '\t':
+ result += "\\t";
+ break;
+ // case '/':
+ // Even though \/ is considered a legal escape in JSON, a bare
+ // slash is also legal, so I see no reason to escape it.
+ // (I hope I am not misunderstanding something.)
+ // blep notes: actually escaping \/ may be useful in javascript to avoid </
+ // sequence.
+ // Should add a flag to allow this compatibility mode and prevent this
+ // sequence from occurring.
+ default:
+ if ((isControlCharacter(*c)) || (*c == 0)) {
+ std::ostringstream oss;
+ oss << "\\u" << std::hex << std::uppercase << std::setfill('0')
+ << std::setw(4) << static_cast<int>(*c);
+ result += oss.str();
+ } else {
+ result += *c;
+ }
+ break;
+ }
+ }
+ result += "\"";
+ return result;
+}
+
// Class Writer
// //////////////////////////////////////////////////////////////////
Writer::~Writer() {}
@@ -216,8 +325,14 @@ void FastWriter::writeValue(const Value& value) {
document_ += valueToString(value.asDouble());
break;
case stringValue:
- document_ += valueToQuotedString(value.asCString());
+ {
+ // Is NULL possible for value.string_?
+ char const* str;
+ char const* end;
+ bool ok = value.getString(&str, &end);
+ if (ok) document_ += valueToQuotedStringN(str, static_cast<unsigned>(end-str));
break;
+ }
case booleanValue:
document_ += valueToString(value.asBool());
break;
@@ -239,7 +354,7 @@ void FastWriter::writeValue(const Value& value) {
const std::string& name = *it;
if (it != members.begin())
document_ += ',';
- document_ += valueToQuotedString(name.c_str());
+ document_ += valueToQuotedStringN(name.data(), static_cast<unsigned>(name.length()));
document_ += yamlCompatiblityEnabled_ ? ": " : ":";
writeValue(value[name]);
}
@@ -280,8 +395,15 @@ void StyledWriter::writeValue(const Value& value) {
pushValue(valueToString(value.asDouble()));
break;
case stringValue:
- pushValue(valueToQuotedString(value.asCString()));
+ {
+ // Is NULL possible for value.string_?
+ char const* str;
+ char const* end;
+ bool ok = value.getString(&str, &end);
+ if (ok) pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end-str)));
+ else pushValue("");
break;
+ }
case booleanValue:
pushValue(valueToString(value.asBool()));
break;
@@ -376,6 +498,9 @@ bool StyledWriter::isMultineArray(const Value& value) {
addChildValues_ = true;
int lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
for (int index = 0; index < size; ++index) {
+ if (hasCommentForValue(value[index])) {
+ isMultiLine = true;
+ }
writeValue(value[index]);
lineLength += int(childValues_[index].length());
}
@@ -421,26 +546,27 @@ void StyledWriter::writeCommentBeforeValue(const Value& root) {
document_ += "\n";
writeIndent();
- std::string normalizedComment = normalizeEOL(root.getComment(commentBefore));
- std::string::const_iterator iter = normalizedComment.begin();
- while (iter != normalizedComment.end()) {
+ const std::string& comment = root.getComment(commentBefore);
+ std::string::const_iterator iter = comment.begin();
+ while (iter != comment.end()) {
document_ += *iter;
- if (*iter == '\n' && *(iter + 1) == '/')
+ if (*iter == '\n' &&
+ (iter != comment.end() && *(iter + 1) == '/'))
writeIndent();
++iter;
}
- // Comments are stripped of newlines, so add one here
+ // Comments are stripped of trailing newlines, so add one here
document_ += "\n";
}
void StyledWriter::writeCommentAfterValueOnSameLine(const Value& root) {
if (root.hasComment(commentAfterOnSameLine))
- document_ += " " + normalizeEOL(root.getComment(commentAfterOnSameLine));
+ document_ += " " + root.getComment(commentAfterOnSameLine);
if (root.hasComment(commentAfter)) {
document_ += "\n";
- document_ += normalizeEOL(root.getComment(commentAfter));
+ document_ += root.getComment(commentAfter);
document_ += "\n";
}
}
@@ -451,25 +577,6 @@ bool StyledWriter::hasCommentForValue(const Value& value) {
value.hasComment(commentAfter);
}
-std::string StyledWriter::normalizeEOL(const std::string& text) {
- std::string normalized;
- normalized.reserve(text.length());
- const char* begin = text.c_str();
- const char* end = begin + text.length();
- const char* current = begin;
- while (current != end) {
- char c = *current++;
- if (c == '\r') // mac or dos EOL
- {
- if (*current == '\n') // convert dos EOL
- ++current;
- normalized += '\n';
- } else // handle unix EOL & other char
- normalized += c;
- }
- return normalized;
-}
-
// Class StyledStreamWriter
// //////////////////////////////////////////////////////////////////
@@ -481,7 +588,10 @@ void StyledStreamWriter::write(std::ostream& out, const Value& root) {
document_ = &out;
addChildValues_ = false;
indentString_ = "";
+ indented_ = true;
writeCommentBeforeValue(root);
+ if (!indented_) writeIndent();
+ indented_ = true;
writeValue(root);
writeCommentAfterValueOnSameLine(root);
*document_ << "\n";
@@ -503,8 +613,15 @@ void StyledStreamWriter::writeValue(const Value& value) {
pushValue(valueToString(value.asDouble()));
break;
case stringValue:
- pushValue(valueToQuotedString(value.asCString()));
+ {
+ // Is NULL possible for value.string_?
+ char const* str;
+ char const* end;
+ bool ok = value.getString(&str, &end);
+ if (ok) pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end-str)));
+ else pushValue("");
break;
+ }
case booleanValue:
pushValue(valueToString(value.asBool()));
break;
@@ -557,8 +674,10 @@ void StyledStreamWriter::writeArrayValue(const Value& value) {
if (hasChildValue)
writeWithIndent(childValues_[index]);
else {
- writeIndent();
+ if (!indented_) writeIndent();
+ indented_ = true;
writeValue(childValue);
+ indented_ = false;
}
if (++index == size) {
writeCommentAfterValueOnSameLine(childValue);
@@ -599,6 +718,9 @@ bool StyledStreamWriter::isMultineArray(const Value& value) {
addChildValues_ = true;
int lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
for (int index = 0; index < size; ++index) {
+ if (hasCommentForValue(value[index])) {
+ isMultiLine = true;
+ }
writeValue(value[index]);
lineLength += int(childValues_[index].length());
}
@@ -616,24 +738,17 @@ void StyledStreamWriter::pushValue(const std::string& value) {
}
void StyledStreamWriter::writeIndent() {
- /*
- Some comments in this method would have been nice. ;-)
-
- if ( !document_.empty() )
- {
- char last = document_[document_.length()-1];
- if ( last == ' ' ) // already indented
- return;
- if ( last != '\n' ) // Comments may add new-line
- *document_ << '\n';
- }
- */
+ // blep intended this to look at the so-far-written string
+ // to determine whether we are already indented, but
+ // with a stream we cannot do that. So we rely on some saved state.
+ // The caller checks indented_.
*document_ << '\n' << indentString_;
}
void StyledStreamWriter::writeWithIndent(const std::string& value) {
- writeIndent();
+ if (!indented_) writeIndent();
*document_ << value;
+ indented_ = false;
}
void StyledStreamWriter::indent() { indentString_ += indentation_; }
@@ -646,19 +761,30 @@ void StyledStreamWriter::unindent() {
void StyledStreamWriter::writeCommentBeforeValue(const Value& root) {
if (!root.hasComment(commentBefore))
return;
- *document_ << normalizeEOL(root.getComment(commentBefore));
- *document_ << "\n";
+
+ if (!indented_) writeIndent();
+ const std::string& comment = root.getComment(commentBefore);
+ std::string::const_iterator iter = comment.begin();
+ while (iter != comment.end()) {
+ *document_ << *iter;
+ if (*iter == '\n' &&
+ (iter != comment.end() && *(iter + 1) == '/'))
+ // writeIndent(); // would include newline
+ *document_ << indentString_;
+ ++iter;
+ }
+ indented_ = false;
}
void StyledStreamWriter::writeCommentAfterValueOnSameLine(const Value& root) {
if (root.hasComment(commentAfterOnSameLine))
- *document_ << " " + normalizeEOL(root.getComment(commentAfterOnSameLine));
+ *document_ << ' ' << root.getComment(commentAfterOnSameLine);
if (root.hasComment(commentAfter)) {
- *document_ << "\n";
- *document_ << normalizeEOL(root.getComment(commentAfter));
- *document_ << "\n";
+ writeIndent();
+ *document_ << root.getComment(commentAfter);
}
+ indented_ = false;
}
bool StyledStreamWriter::hasCommentForValue(const Value& value) {
@@ -667,28 +793,386 @@ bool StyledStreamWriter::hasCommentForValue(const Value& value) {
value.hasComment(commentAfter);
}
-std::string StyledStreamWriter::normalizeEOL(const std::string& text) {
- std::string normalized;
- normalized.reserve(text.length());
- const char* begin = text.c_str();
- const char* end = begin + text.length();
- const char* current = begin;
- while (current != end) {
- char c = *current++;
- if (c == '\r') // mac or dos EOL
+//////////////////////////
+// BuiltStyledStreamWriter
+
+/// Scoped enums are not available until C++11.
+struct CommentStyle {
+ /// Decide whether to write comments.
+ enum Enum {
+ None, ///< Drop all comments.
+ Most, ///< Recover odd behavior of previous versions (not implemented yet).
+ All ///< Keep all comments.
+ };
+};
+
+struct BuiltStyledStreamWriter : public StreamWriter
+{
+ BuiltStyledStreamWriter(
+ std::string const& indentation,
+ CommentStyle::Enum cs,
+ std::string const& colonSymbol,
+ std::string const& nullSymbol,
+ std::string const& endingLineFeedSymbol);
+ virtual int write(Value const& root, std::ostream* sout);
+private:
+ void writeValue(Value const& value);
+ void writeArrayValue(Value const& value);
+ bool isMultineArray(Value const& value);
+ void pushValue(std::string const& value);
+ void writeIndent();
+ void writeWithIndent(std::string const& value);
+ void indent();
+ void unindent();
+ void writeCommentBeforeValue(Value const& root);
+ void writeCommentAfterValueOnSameLine(Value const& root);
+ static bool hasCommentForValue(const Value& value);
+
+ typedef std::vector<std::string> ChildValues;
+
+ ChildValues childValues_;
+ std::string indentString_;
+ int rightMargin_;
+ std::string indentation_;
+ CommentStyle::Enum cs_;
+ std::string colonSymbol_;
+ std::string nullSymbol_;
+ std::string endingLineFeedSymbol_;
+ bool addChildValues_ : 1;
+ bool indented_ : 1;
+};
+BuiltStyledStreamWriter::BuiltStyledStreamWriter(
+ std::string const& indentation,
+ CommentStyle::Enum cs,
+ std::string const& colonSymbol,
+ std::string const& nullSymbol,
+ std::string const& endingLineFeedSymbol)
+ : rightMargin_(74)
+ , indentation_(indentation)
+ , cs_(cs)
+ , colonSymbol_(colonSymbol)
+ , nullSymbol_(nullSymbol)
+ , endingLineFeedSymbol_(endingLineFeedSymbol)
+ , addChildValues_(false)
+ , indented_(false)
+{
+}
+int BuiltStyledStreamWriter::write(Value const& root, std::ostream* sout)
+{
+ sout_ = sout;
+ addChildValues_ = false;
+ indented_ = true;
+ indentString_ = "";
+ writeCommentBeforeValue(root);
+ if (!indented_) writeIndent();
+ indented_ = true;
+ writeValue(root);
+ writeCommentAfterValueOnSameLine(root);
+ *sout_ << endingLineFeedSymbol_;
+ sout_ = NULL;
+ return 0;
+}
+void BuiltStyledStreamWriter::writeValue(Value const& value) {
+ switch (value.type()) {
+ case nullValue:
+ pushValue(nullSymbol_);
+ break;
+ case intValue:
+ pushValue(valueToString(value.asLargestInt()));
+ break;
+ case uintValue:
+ pushValue(valueToString(value.asLargestUInt()));
+ break;
+ case realValue:
+ pushValue(valueToString(value.asDouble()));
+ break;
+ case stringValue:
+ {
+ // Is NULL is possible for value.string_?
+ char const* str;
+ char const* end;
+ bool ok = value.getString(&str, &end);
+ if (ok) pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end-str)));
+ else pushValue("");
+ break;
+ }
+ case booleanValue:
+ pushValue(valueToString(value.asBool()));
+ break;
+ case arrayValue:
+ writeArrayValue(value);
+ break;
+ case objectValue: {
+ Value::Members members(value.getMemberNames());
+ if (members.empty())
+ pushValue("{}");
+ else {
+ writeWithIndent("{");
+ indent();
+ Value::Members::iterator it = members.begin();
+ for (;;) {
+ std::string const& name = *it;
+ Value const& childValue = value[name];
+ writeCommentBeforeValue(childValue);
+ writeWithIndent(valueToQuotedStringN(name.data(), static_cast<unsigned>(name.length())));
+ *sout_ << colonSymbol_;
+ writeValue(childValue);
+ if (++it == members.end()) {
+ writeCommentAfterValueOnSameLine(childValue);
+ break;
+ }
+ *sout_ << ",";
+ writeCommentAfterValueOnSameLine(childValue);
+ }
+ unindent();
+ writeWithIndent("}");
+ }
+ } break;
+ }
+}
+
+void BuiltStyledStreamWriter::writeArrayValue(Value const& value) {
+ unsigned size = value.size();
+ if (size == 0)
+ pushValue("[]");
+ else {
+ bool isMultiLine = (cs_ == CommentStyle::All) || isMultineArray(value);
+ if (isMultiLine) {
+ writeWithIndent("[");
+ indent();
+ bool hasChildValue = !childValues_.empty();
+ unsigned index = 0;
+ for (;;) {
+ Value const& childValue = value[index];
+ writeCommentBeforeValue(childValue);
+ if (hasChildValue)
+ writeWithIndent(childValues_[index]);
+ else {
+ if (!indented_) writeIndent();
+ indented_ = true;
+ writeValue(childValue);
+ indented_ = false;
+ }
+ if (++index == size) {
+ writeCommentAfterValueOnSameLine(childValue);
+ break;
+ }
+ *sout_ << ",";
+ writeCommentAfterValueOnSameLine(childValue);
+ }
+ unindent();
+ writeWithIndent("]");
+ } else // output on a single line
{
- if (*current == '\n') // convert dos EOL
- ++current;
- normalized += '\n';
- } else // handle unix EOL & other char
- normalized += c;
+ assert(childValues_.size() == size);
+ *sout_ << "[";
+ if (!indentation_.empty()) *sout_ << " ";
+ for (unsigned index = 0; index < size; ++index) {
+ if (index > 0)
+ *sout_ << ", ";
+ *sout_ << childValues_[index];
+ }
+ if (!indentation_.empty()) *sout_ << " ";
+ *sout_ << "]";
+ }
+ }
+}
+
+bool BuiltStyledStreamWriter::isMultineArray(Value const& value) {
+ int size = value.size();
+ bool isMultiLine = size * 3 >= rightMargin_;
+ childValues_.clear();
+ for (int index = 0; index < size && !isMultiLine; ++index) {
+ Value const& childValue = value[index];
+ isMultiLine =
+ isMultiLine || ((childValue.isArray() || childValue.isObject()) &&
+ childValue.size() > 0);
}
- return normalized;
+ if (!isMultiLine) // check if line length > max line length
+ {
+ childValues_.reserve(size);
+ addChildValues_ = true;
+ int lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
+ for (int index = 0; index < size; ++index) {
+ if (hasCommentForValue(value[index])) {
+ isMultiLine = true;
+ }
+ writeValue(value[index]);
+ lineLength += int(childValues_[index].length());
+ }
+ addChildValues_ = false;
+ isMultiLine = isMultiLine || lineLength >= rightMargin_;
+ }
+ return isMultiLine;
+}
+
+void BuiltStyledStreamWriter::pushValue(std::string const& value) {
+ if (addChildValues_)
+ childValues_.push_back(value);
+ else
+ *sout_ << value;
+}
+
+void BuiltStyledStreamWriter::writeIndent() {
+ // blep intended this to look at the so-far-written string
+ // to determine whether we are already indented, but
+ // with a stream we cannot do that. So we rely on some saved state.
+ // The caller checks indented_.
+
+ if (!indentation_.empty()) {
+ // In this case, drop newlines too.
+ *sout_ << '\n' << indentString_;
+ }
+}
+
+void BuiltStyledStreamWriter::writeWithIndent(std::string const& value) {
+ if (!indented_) writeIndent();
+ *sout_ << value;
+ indented_ = false;
+}
+
+void BuiltStyledStreamWriter::indent() { indentString_ += indentation_; }
+
+void BuiltStyledStreamWriter::unindent() {
+ assert(indentString_.size() >= indentation_.size());
+ indentString_.resize(indentString_.size() - indentation_.size());
+}
+
+void BuiltStyledStreamWriter::writeCommentBeforeValue(Value const& root) {
+ if (cs_ == CommentStyle::None) return;
+ if (!root.hasComment(commentBefore))
+ return;
+
+ if (!indented_) writeIndent();
+ const std::string& comment = root.getComment(commentBefore);
+ std::string::const_iterator iter = comment.begin();
+ while (iter != comment.end()) {
+ *sout_ << *iter;
+ if (*iter == '\n' &&
+ (iter != comment.end() && *(iter + 1) == '/'))
+ // writeIndent(); // would write extra newline
+ *sout_ << indentString_;
+ ++iter;
+ }
+ indented_ = false;
+}
+
+void BuiltStyledStreamWriter::writeCommentAfterValueOnSameLine(Value const& root) {
+ if (cs_ == CommentStyle::None) return;
+ if (root.hasComment(commentAfterOnSameLine))
+ *sout_ << " " + root.getComment(commentAfterOnSameLine);
+
+ if (root.hasComment(commentAfter)) {
+ writeIndent();
+ *sout_ << root.getComment(commentAfter);
+ }
+}
+
+// static
+bool BuiltStyledStreamWriter::hasCommentForValue(const Value& value) {
+ return value.hasComment(commentBefore) ||
+ value.hasComment(commentAfterOnSameLine) ||
+ value.hasComment(commentAfter);
+}
+
+///////////////
+// StreamWriter
+
+StreamWriter::StreamWriter()
+ : sout_(NULL)
+{
+}
+StreamWriter::~StreamWriter()
+{
+}
+StreamWriter::Factory::~Factory()
+{}
+StreamWriterBuilder::StreamWriterBuilder()
+{
+ setDefaults(&settings_);
+}
+StreamWriterBuilder::~StreamWriterBuilder()
+{}
+StreamWriter* StreamWriterBuilder::newStreamWriter() const
+{
+ std::string indentation = settings_["indentation"].asString();
+ std::string cs_str = settings_["commentStyle"].asString();
+ bool eyc = settings_["enableYAMLCompatibility"].asBool();
+ bool dnp = settings_["dropNullPlaceholders"].asBool();
+ CommentStyle::Enum cs = CommentStyle::All;
+ if (cs_str == "All") {
+ cs = CommentStyle::All;
+ } else if (cs_str == "None") {
+ cs = CommentStyle::None;
+ } else {
+ throwRuntimeError("commentStyle must be 'All' or 'None'");
+ }
+ std::string colonSymbol = " : ";
+ if (eyc) {
+ colonSymbol = ": ";
+ } else if (indentation.empty()) {
+ colonSymbol = ":";
+ }
+ std::string nullSymbol = "null";
+ if (dnp) {
+ nullSymbol = "";
+ }
+ std::string endingLineFeedSymbol = "";
+ return new BuiltStyledStreamWriter(
+ indentation, cs,
+ colonSymbol, nullSymbol, endingLineFeedSymbol);
+}
+static void getValidWriterKeys(std::set<std::string>* valid_keys)
+{
+ valid_keys->clear();
+ valid_keys->insert("indentation");
+ valid_keys->insert("commentStyle");
+ valid_keys->insert("enableYAMLCompatibility");
+ valid_keys->insert("dropNullPlaceholders");
+}
+bool StreamWriterBuilder::validate(Json::Value* invalid) const
+{
+ Json::Value my_invalid;
+ if (!invalid) invalid = &my_invalid; // so we do not need to test for NULL
+ Json::Value& inv = *invalid;
+ std::set<std::string> valid_keys;
+ getValidWriterKeys(&valid_keys);
+ Value::Members keys = settings_.getMemberNames();
+ size_t n = keys.size();
+ for (size_t i = 0; i < n; ++i) {
+ std::string const& key = keys[i];
+ if (valid_keys.find(key) == valid_keys.end()) {
+ inv[key] = settings_[key];
+ }
+ }
+ return 0u == inv.size();
+}
+Value& StreamWriterBuilder::operator[](std::string key)
+{
+ return settings_[key];
+}
+// static
+void StreamWriterBuilder::setDefaults(Json::Value* settings)
+{
+ //! [StreamWriterBuilderDefaults]
+ (*settings)["commentStyle"] = "All";
+ (*settings)["indentation"] = "\t";
+ (*settings)["enableYAMLCompatibility"] = false;
+ (*settings)["dropNullPlaceholders"] = false;
+ //! [StreamWriterBuilderDefaults]
+}
+
+std::string writeString(StreamWriter::Factory const& builder, Value const& root) {
+ std::ostringstream sout;
+ StreamWriterPtr const writer(builder.newStreamWriter());
+ writer->write(root, &sout);
+ return sout.str();
}
-std::ostream& operator<<(std::ostream& sout, const Value& root) {
- Json::StyledStreamWriter writer;
- writer.write(sout, root);
+std::ostream& operator<<(std::ostream& sout, Value const& root) {
+ StreamWriterBuilder builder;
+ StreamWriterPtr const writer(builder.newStreamWriter());
+ writer->write(root, &sout);
return sout;
}
diff --git a/3rdparty/jsoncpp/src/test_lib_json/CMakeLists.txt b/3rdparty/jsoncpp/src/test_lib_json/CMakeLists.txt
index 420d659968c..f54c85f6817 100644
--- a/3rdparty/jsoncpp/src/test_lib_json/CMakeLists.txt
+++ b/3rdparty/jsoncpp/src/test_lib_json/CMakeLists.txt
@@ -1,7 +1,4 @@
-
-IF(JSONCPP_LIB_BUILD_SHARED)
- ADD_DEFINITIONS( -DJSON_DLL )
-ENDIF(JSONCPP_LIB_BUILD_SHARED)
+# vim: et ts=4 sts=4 sw=4 tw=0
ADD_EXECUTABLE( jsoncpp_test
jsontest.cpp
@@ -9,14 +6,33 @@ ADD_EXECUTABLE( jsoncpp_test
main.cpp
)
-TARGET_LINK_LIBRARIES(jsoncpp_test jsoncpp_lib)
+
+IF(BUILD_SHARED_LIBS)
+ ADD_DEFINITIONS( -DJSON_DLL )
+ TARGET_LINK_LIBRARIES(jsoncpp_test jsoncpp_lib)
+ELSE(BUILD_SHARED_LIBS)
+ TARGET_LINK_LIBRARIES(jsoncpp_test jsoncpp_lib_static)
+ENDIF(BUILD_SHARED_LIBS)
+
+# another way to solve issue #90
+#set_target_properties(jsoncpp_test PROPERTIES COMPILE_FLAGS -ffloat-store)
# Run unit tests in post-build
# (default cmake workflow hides away the test result into a file, resulting in poor dev workflow?!?)
IF(JSONCPP_WITH_POST_BUILD_UNITTEST)
- ADD_CUSTOM_COMMAND( TARGET jsoncpp_test
- POST_BUILD
- COMMAND $<TARGET_FILE:jsoncpp_test>)
+ IF(BUILD_SHARED_LIBS)
+ # First, copy the shared lib, for Microsoft.
+ # Then, run the test executable.
+ ADD_CUSTOM_COMMAND( TARGET jsoncpp_test
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:jsoncpp_lib> $<TARGET_FILE_DIR:jsoncpp_test>
+ COMMAND $<TARGET_FILE:jsoncpp_test>)
+ ELSE(BUILD_SHARED_LIBS)
+ # Just run the test executable.
+ ADD_CUSTOM_COMMAND( TARGET jsoncpp_test
+ POST_BUILD
+ COMMAND $<TARGET_FILE:jsoncpp_test>)
+ ENDIF(BUILD_SHARED_LIBS)
ENDIF(JSONCPP_WITH_POST_BUILD_UNITTEST)
SET_TARGET_PROPERTIES(jsoncpp_test PROPERTIES OUTPUT_NAME jsoncpp_test)
diff --git a/3rdparty/jsoncpp/src/test_lib_json/jsontest.cpp b/3rdparty/jsoncpp/src/test_lib_json/jsontest.cpp
index ef9c543fa47..bd9463fa589 100644
--- a/3rdparty/jsoncpp/src/test_lib_json/jsontest.cpp
+++ b/3rdparty/jsoncpp/src/test_lib_json/jsontest.cpp
@@ -323,7 +323,7 @@ void Runner::listTests() const {
}
int Runner::runCommandLine(int argc, const char* argv[]) const {
- typedef std::deque<std::string> TestNames;
+ // typedef std::deque<std::string> TestNames;
Runner subrunner;
for (int index = 1; index < argc; ++index) {
std::string opt = argv[index];
diff --git a/3rdparty/jsoncpp/src/test_lib_json/jsontest.h b/3rdparty/jsoncpp/src/test_lib_json/jsontest.h
index 5c56a40b0ff..d6b7cf386a2 100644
--- a/3rdparty/jsoncpp/src/test_lib_json/jsontest.h
+++ b/3rdparty/jsoncpp/src/test_lib_json/jsontest.h
@@ -178,8 +178,8 @@ private:
template <typename T, typename U>
TestResult& checkEqual(TestResult& result,
- const T& expected,
- const U& actual,
+ T expected,
+ U actual,
const char* file,
unsigned int line,
const char* expr) {
@@ -214,7 +214,7 @@ TestResult& checkStringEqual(TestResult& result,
#define JSONTEST_ASSERT_PRED(expr) \
{ \
JsonTest::PredicateContext _minitest_Context = { \
- result_->predicateId_, __FILE__, __LINE__, #expr \
+ result_->predicateId_, __FILE__, __LINE__, #expr, NULL, NULL \
}; \
result_->predicateStackTail_->next_ = &_minitest_Context; \
result_->predicateId_ += 1; \
diff --git a/3rdparty/jsoncpp/src/test_lib_json/main.cpp b/3rdparty/jsoncpp/src/test_lib_json/main.cpp
index 51c5e7442b1..0d4284e5373 100644
--- a/3rdparty/jsoncpp/src/test_lib_json/main.cpp
+++ b/3rdparty/jsoncpp/src/test_lib_json/main.cpp
@@ -6,7 +6,7 @@
#include "jsontest.h"
#include <json/config.h>
#include <json/json.h>
-#include <stdexcept>
+#include <cstring>
// Make numeric limits more convenient to talk about.
// Assumes int type in 32 bits.
@@ -17,8 +17,8 @@
#define kint64min Json::Value::minInt64
#define kuint64max Json::Value::maxUInt64
-static const double kdint64max = double(kint64max);
-static const float kfint64max = float(kint64max);
+//static const double kdint64max = double(kint64max);
+//static const float kfint64max = float(kint64max);
static const float kfint32max = float(kint32max);
static const float kfuint32max = float(kuint32max);
@@ -198,6 +198,18 @@ JSONTEST_FIXTURE(ValueTest, objects) {
object1_["some other id"] = "foo";
JSONTEST_ASSERT_EQUAL(Json::Value("foo"), object1_["some other id"]);
+ JSONTEST_ASSERT_EQUAL(Json::Value("foo"), object1_["some other id"]);
+
+ // Remove.
+ Json::Value got;
+ bool did;
+ did = object1_.removeMember("some other id", &got);
+ JSONTEST_ASSERT_EQUAL(Json::Value("foo"), got);
+ JSONTEST_ASSERT_EQUAL(true, did);
+ got = Json::Value("bar");
+ did = object1_.removeMember("some other id", &got);
+ JSONTEST_ASSERT_EQUAL(Json::Value("bar"), got);
+ JSONTEST_ASSERT_EQUAL(false, did);
}
JSONTEST_FIXTURE(ValueTest, arrays) {
@@ -240,6 +252,24 @@ JSONTEST_FIXTURE(ValueTest, arrays) {
array1_[2] = Json::Value(17);
JSONTEST_ASSERT_EQUAL(Json::Value(), array1_[1]);
JSONTEST_ASSERT_EQUAL(Json::Value(17), array1_[2]);
+ Json::Value got;
+ JSONTEST_ASSERT_EQUAL(true, array1_.removeIndex(2, &got));
+ JSONTEST_ASSERT_EQUAL(Json::Value(17), got);
+ JSONTEST_ASSERT_EQUAL(false, array1_.removeIndex(2, &got)); // gone now
+}
+JSONTEST_FIXTURE(ValueTest, arrayIssue252)
+{
+ int count = 5;
+ Json::Value root;
+ Json::Value item;
+ root["array"] = Json::Value::nullRef;
+ for (int i = 0; i < count; i++)
+ {
+ item["a"] = i;
+ item["b"] = i;
+ root["array"][i] = item;
+ }
+ //JSONTEST_ASSERT_EQUAL(5, root["array"].size());
}
JSONTEST_FIXTURE(ValueTest, null) {
@@ -265,6 +295,8 @@ JSONTEST_FIXTURE(ValueTest, null) {
JSONTEST_ASSERT_EQUAL(0.0, null_.asDouble());
JSONTEST_ASSERT_EQUAL(0.0, null_.asFloat());
JSONTEST_ASSERT_STRING_EQUAL("", null_.asString());
+
+ JSONTEST_ASSERT_EQUAL(Json::Value::null, null_);
}
JSONTEST_FIXTURE(ValueTest, strings) {
@@ -1499,6 +1531,126 @@ JSONTEST_FIXTURE(ValueTest, offsetAccessors) {
JSONTEST_ASSERT(y.getOffsetLimit() == 0);
}
+JSONTEST_FIXTURE(ValueTest, StaticString) {
+ char mutant[] = "hello";
+ Json::StaticString ss(mutant);
+ std::string regular(mutant);
+ mutant[1] = 'a';
+ JSONTEST_ASSERT_STRING_EQUAL("hallo", ss.c_str());
+ JSONTEST_ASSERT_STRING_EQUAL("hello", regular.c_str());
+ {
+ Json::Value root;
+ root["top"] = ss;
+ JSONTEST_ASSERT_STRING_EQUAL("hallo", root["top"].asString());
+ mutant[1] = 'u';
+ JSONTEST_ASSERT_STRING_EQUAL("hullo", root["top"].asString());
+ }
+ {
+ Json::Value root;
+ root["top"] = regular;
+ JSONTEST_ASSERT_STRING_EQUAL("hello", root["top"].asString());
+ mutant[1] = 'u';
+ JSONTEST_ASSERT_STRING_EQUAL("hello", root["top"].asString());
+ }
+}
+
+JSONTEST_FIXTURE(ValueTest, CommentBefore) {
+ Json::Value val; // fill val
+ val.setComment(std::string("// this comment should appear before"), Json::commentBefore);
+ Json::StreamWriterBuilder wbuilder;
+ wbuilder.settings_["commentStyle"] = "All";
+ {
+ char const expected[] = "// this comment should appear before\nnull";
+ std::string result = Json::writeString(wbuilder, val);
+ JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+ std::string res2 = val.toStyledString();
+ std::string exp2 = "\n";
+ exp2 += expected;
+ exp2 += "\n";
+ JSONTEST_ASSERT_STRING_EQUAL(exp2, res2);
+ }
+ Json::Value other = "hello";
+ val.swapPayload(other);
+ {
+ char const expected[] = "// this comment should appear before\n\"hello\"";
+ std::string result = Json::writeString(wbuilder, val);
+ JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+ std::string res2 = val.toStyledString();
+ std::string exp2 = "\n";
+ exp2 += expected;
+ exp2 += "\n";
+ JSONTEST_ASSERT_STRING_EQUAL(exp2, res2);
+ JSONTEST_ASSERT_STRING_EQUAL("null\n", other.toStyledString());
+ }
+ val = "hello";
+ // val.setComment("// this comment should appear before", Json::CommentPlacement::commentBefore);
+ // Assignment over-writes comments.
+ {
+ char const expected[] = "\"hello\"";
+ std::string result = Json::writeString(wbuilder, val);
+ JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+ std::string res2 = val.toStyledString();
+ std::string exp2 = "";
+ exp2 += expected;
+ exp2 += "\n";
+ JSONTEST_ASSERT_STRING_EQUAL(exp2, res2);
+ }
+}
+
+JSONTEST_FIXTURE(ValueTest, zeroes) {
+ char const cstr[] = "h\0i";
+ std::string binary(cstr, sizeof(cstr)); // include trailing 0
+ JSONTEST_ASSERT_EQUAL(4U, binary.length());
+ Json::StreamWriterBuilder b;
+ {
+ Json::Value root;
+ root = binary;
+ JSONTEST_ASSERT_STRING_EQUAL(binary, root.asString());
+ }
+ {
+ char const top[] = "top";
+ Json::Value root;
+ root[top] = binary;
+ JSONTEST_ASSERT_STRING_EQUAL(binary, root[top].asString());
+ Json::Value removed;
+ bool did;
+ did = root.removeMember(top, top + sizeof(top) - 1U,
+ &removed);
+ JSONTEST_ASSERT(did);
+ JSONTEST_ASSERT_STRING_EQUAL(binary, removed.asString());
+ did = root.removeMember(top, top + sizeof(top) - 1U,
+ &removed);
+ JSONTEST_ASSERT(!did);
+ JSONTEST_ASSERT_STRING_EQUAL(binary, removed.asString()); // still
+ }
+}
+
+JSONTEST_FIXTURE(ValueTest, zeroesInKeys) {
+ char const cstr[] = "h\0i";
+ std::string binary(cstr, sizeof(cstr)); // include trailing 0
+ JSONTEST_ASSERT_EQUAL(4U, binary.length());
+ {
+ Json::Value root;
+ root[binary] = "there";
+ JSONTEST_ASSERT_STRING_EQUAL("there", root[binary].asString());
+ JSONTEST_ASSERT(!root.isMember("h"));
+ JSONTEST_ASSERT(root.isMember(binary));
+ JSONTEST_ASSERT_STRING_EQUAL("there", root.get(binary, Json::Value::nullRef).asString());
+ Json::Value removed;
+ bool did;
+ did = root.removeMember(binary.data(), binary.data() + binary.length(),
+ &removed);
+ JSONTEST_ASSERT(did);
+ JSONTEST_ASSERT_STRING_EQUAL("there", removed.asString());
+ did = root.removeMember(binary.data(), binary.data() + binary.length(),
+ &removed);
+ JSONTEST_ASSERT(!did);
+ JSONTEST_ASSERT_STRING_EQUAL("there", removed.asString()); // still
+ JSONTEST_ASSERT(!root.isMember(binary));
+ JSONTEST_ASSERT_STRING_EQUAL("", root.get(binary, Json::Value::nullRef).asString());
+ }
+}
+
struct WriterTest : JsonTest::TestCase {};
JSONTEST_FIXTURE(WriterTest, dropNullPlaceholders) {
@@ -1510,6 +1662,39 @@ JSONTEST_FIXTURE(WriterTest, dropNullPlaceholders) {
JSONTEST_ASSERT(writer.write(nullValue) == "\n");
}
+struct StreamWriterTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(StreamWriterTest, dropNullPlaceholders) {
+ Json::StreamWriterBuilder b;
+ Json::Value nullValue;
+ b.settings_["dropNullPlaceholders"] = false;
+ JSONTEST_ASSERT(Json::writeString(b, nullValue) == "null");
+ b.settings_["dropNullPlaceholders"] = true;
+ JSONTEST_ASSERT(Json::writeString(b, nullValue) == "");
+}
+
+JSONTEST_FIXTURE(StreamWriterTest, writeZeroes) {
+ std::string binary("hi", 3); // include trailing 0
+ JSONTEST_ASSERT_EQUAL(3, binary.length());
+ std::string expected("\"hi\\u0000\""); // unicoded zero
+ Json::StreamWriterBuilder b;
+ {
+ Json::Value root;
+ root = binary;
+ JSONTEST_ASSERT_STRING_EQUAL(binary, root.asString());
+ std::string out = Json::writeString(b, root);
+ JSONTEST_ASSERT_EQUAL(expected.size(), out.size());
+ JSONTEST_ASSERT_STRING_EQUAL(expected, out);
+ }
+ {
+ Json::Value root;
+ root["top"] = binary;
+ JSONTEST_ASSERT_STRING_EQUAL(binary, root["top"].asString());
+ std::string out = Json::writeString(b, root["top"]);
+ JSONTEST_ASSERT_STRING_EQUAL(expected, out);
+ }
+}
+
struct ReaderTest : JsonTest::TestCase {};
JSONTEST_FIXTURE(ReaderTest, parseWithNoErrors) {
@@ -1601,12 +1786,561 @@ JSONTEST_FIXTURE(ReaderTest, parseWithDetailError) {
JSONTEST_ASSERT(errors.at(0).message == "Bad escape sequence in string");
}
+struct CharReaderTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithNoErrors) {
+ Json::CharReaderBuilder b;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ Json::Value root;
+ char const doc[] = "{ \"property\" : \"value\" }";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs.size() == 0);
+ delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithNoErrorsTestingOffsets) {
+ Json::CharReaderBuilder b;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ Json::Value root;
+ char const doc[] =
+ "{ \"property\" : [\"value\", \"value2\"], \"obj\" : "
+ "{ \"nested\" : 123, \"bool\" : true}, \"null\" : "
+ "null, \"false\" : false }";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs.size() == 0);
+ delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithOneError) {
+ Json::CharReaderBuilder b;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ Json::Value root;
+ char const doc[] =
+ "{ \"property\" :: \"value\" }";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(!ok);
+ JSONTEST_ASSERT(errs ==
+ "* Line 1, Column 15\n Syntax error: value, object or array "
+ "expected.\n");
+ delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseChineseWithOneError) {
+ Json::CharReaderBuilder b;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ Json::Value root;
+ char const doc[] =
+ "{ \"pr佐藤erty\" :: \"value\" }";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(!ok);
+ JSONTEST_ASSERT(errs ==
+ "* Line 1, Column 19\n Syntax error: value, object or array "
+ "expected.\n");
+ delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithDetailError) {
+ Json::CharReaderBuilder b;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ Json::Value root;
+ char const doc[] =
+ "{ \"property\" : \"v\\alue\" }";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(!ok);
+ JSONTEST_ASSERT(errs ==
+ "* Line 1, Column 16\n Bad escape sequence in string\nSee "
+ "Line 1, Column 20 for detail.\n");
+ delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithStackLimit) {
+ Json::CharReaderBuilder b;
+ Json::Value root;
+ char const doc[] =
+ "{ \"property\" : \"value\" }";
+ {
+ b.settings_["stackLimit"] = 2;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs == "");
+ JSONTEST_ASSERT_EQUAL("value", root["property"]);
+ delete reader;
+ }
+ {
+ b.settings_["stackLimit"] = 1;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ JSONTEST_ASSERT_THROWS(reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs));
+ delete reader;
+ }
+}
+
+struct CharReaderStrictModeTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderStrictModeTest, dupKeys) {
+ Json::CharReaderBuilder b;
+ Json::Value root;
+ char const doc[] =
+ "{ \"property\" : \"value\", \"key\" : \"val1\", \"key\" : \"val2\" }";
+ {
+ b.strictMode(&b.settings_);
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(!ok);
+ JSONTEST_ASSERT_STRING_EQUAL(
+ "* Line 1, Column 41\n"
+ " Duplicate key: 'key'\n",
+ errs);
+ JSONTEST_ASSERT_EQUAL("val1", root["key"]); // so far
+ delete reader;
+ }
+}
+struct CharReaderFailIfExtraTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, issue164) {
+ // This is interpretted as a string value followed by a colon.
+ Json::CharReaderBuilder b;
+ Json::Value root;
+ char const doc[] =
+ " \"property\" : \"value\" }";
+ {
+ b.settings_["failIfExtra"] = false;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs == "");
+ JSONTEST_ASSERT_EQUAL("property", root);
+ delete reader;
+ }
+ {
+ b.settings_["failIfExtra"] = true;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(!ok);
+ JSONTEST_ASSERT_STRING_EQUAL(errs,
+ "* Line 1, Column 13\n"
+ " Extra non-whitespace after JSON value.\n");
+ JSONTEST_ASSERT_EQUAL("property", root);
+ delete reader;
+ }
+ {
+ b.settings_["failIfExtra"] = false;
+ b.strictMode(&b.settings_);
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(!ok);
+ JSONTEST_ASSERT_STRING_EQUAL(errs,
+ "* Line 1, Column 13\n"
+ " Extra non-whitespace after JSON value.\n");
+ JSONTEST_ASSERT_EQUAL("property", root);
+ delete reader;
+ }
+}
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, issue107) {
+ // This is interpretted as an int value followed by a colon.
+ Json::CharReaderBuilder b;
+ Json::Value root;
+ char const doc[] =
+ "1:2:3";
+ b.settings_["failIfExtra"] = true;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(!ok);
+ JSONTEST_ASSERT_STRING_EQUAL(
+ "* Line 1, Column 2\n"
+ " Extra non-whitespace after JSON value.\n",
+ errs);
+ JSONTEST_ASSERT_EQUAL(1, root.asInt());
+ delete reader;
+}
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, commentAfterObject) {
+ Json::CharReaderBuilder b;
+ Json::Value root;
+ {
+ char const doc[] =
+ "{ \"property\" : \"value\" } //trailing\n//comment\n";
+ b.settings_["failIfExtra"] = true;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL("value", root["property"]);
+ delete reader;
+ }
+}
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, commentAfterArray) {
+ Json::CharReaderBuilder b;
+ Json::Value root;
+ char const doc[] =
+ "[ \"property\" , \"value\" ] //trailing\n//comment\n";
+ b.settings_["failIfExtra"] = true;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL("value", root[1u]);
+ delete reader;
+}
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, commentAfterBool) {
+ Json::CharReaderBuilder b;
+ Json::Value root;
+ char const doc[] =
+ " true /*trailing\ncomment*/";
+ b.settings_["failIfExtra"] = true;
+ Json::CharReader* reader(b.newCharReader());
+ std::string errs;
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(true, root.asBool());
+ delete reader;
+}
+struct CharReaderAllowDropNullTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderAllowDropNullTest, issue178) {
+ Json::CharReaderBuilder b;
+ b.settings_["allowDroppedNullPlaceholders"] = true;
+ Json::Value root;
+ std::string errs;
+ Json::CharReader* reader(b.newCharReader());
+ {
+ char const doc[] = "{\"a\":,\"b\":true}";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(2u, root.size());
+ JSONTEST_ASSERT_EQUAL(Json::nullValue, root.get("a", true));
+ }
+ {
+ char const doc[] = "{\"a\":}";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(1u, root.size());
+ JSONTEST_ASSERT_EQUAL(Json::nullValue, root.get("a", true));
+ }
+ {
+ char const doc[] = "[]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs == "");
+ JSONTEST_ASSERT_EQUAL(0u, root.size());
+ JSONTEST_ASSERT_EQUAL(Json::arrayValue, root);
+ }
+ {
+ char const doc[] = "[null]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs == "");
+ JSONTEST_ASSERT_EQUAL(1u, root.size());
+ }
+ {
+ char const doc[] = "[,]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(2u, root.size());
+ }
+ {
+ char const doc[] = "[,,,]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(4u, root.size());
+ }
+ {
+ char const doc[] = "[null,]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(2u, root.size());
+ }
+ {
+ char const doc[] = "[,null]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs == "");
+ JSONTEST_ASSERT_EQUAL(2u, root.size());
+ }
+ {
+ char const doc[] = "[,,]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(3u, root.size());
+ }
+ {
+ char const doc[] = "[null,,]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(3u, root.size());
+ }
+ {
+ char const doc[] = "[,null,]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(3u, root.size());
+ }
+ {
+ char const doc[] = "[,,null]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs == "");
+ JSONTEST_ASSERT_EQUAL(3u, root.size());
+ }
+ {
+ char const doc[] = "[[],,,]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(4u, root.size());
+ JSONTEST_ASSERT_EQUAL(Json::arrayValue, root[0u]);
+ }
+ {
+ char const doc[] = "[,[],,]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(4u, root.size());
+ JSONTEST_ASSERT_EQUAL(Json::arrayValue, root[1u]);
+ }
+ {
+ char const doc[] = "[,,,[]]";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT(errs == "");
+ JSONTEST_ASSERT_EQUAL(4u, root.size());
+ JSONTEST_ASSERT_EQUAL(Json::arrayValue, root[3u]);
+ }
+ delete reader;
+}
+
+struct CharReaderAllowSingleQuotesTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderAllowSingleQuotesTest, issue182) {
+ Json::CharReaderBuilder b;
+ b.settings_["allowSingleQuotes"] = true;
+ Json::Value root;
+ std::string errs;
+ Json::CharReader* reader(b.newCharReader());
+ {
+ char const doc[] = "{'a':true,\"b\":true}";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(2u, root.size());
+ JSONTEST_ASSERT_EQUAL(true, root.get("a", false));
+ JSONTEST_ASSERT_EQUAL(true, root.get("b", false));
+ }
+ {
+ char const doc[] = "{'a': 'x', \"b\":'y'}";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(2u, root.size());
+ JSONTEST_ASSERT_STRING_EQUAL("x", root["a"].asString());
+ JSONTEST_ASSERT_STRING_EQUAL("y", root["b"].asString());
+ }
+ delete reader;
+}
+
+struct CharReaderAllowZeroesTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderAllowZeroesTest, issue176) {
+ Json::CharReaderBuilder b;
+ b.settings_["allowSingleQuotes"] = true;
+ Json::Value root;
+ std::string errs;
+ Json::CharReader* reader(b.newCharReader());
+ {
+ char const doc[] = "{'a':true,\"b\":true}";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(2u, root.size());
+ JSONTEST_ASSERT_EQUAL(true, root.get("a", false));
+ JSONTEST_ASSERT_EQUAL(true, root.get("b", false));
+ }
+ {
+ char const doc[] = "{'a': 'x', \"b\":'y'}";
+ bool ok = reader->parse(
+ doc, doc + std::strlen(doc),
+ &root, &errs);
+ JSONTEST_ASSERT(ok);
+ JSONTEST_ASSERT_STRING_EQUAL("", errs);
+ JSONTEST_ASSERT_EQUAL(2u, root.size());
+ JSONTEST_ASSERT_STRING_EQUAL("x", root["a"].asString());
+ JSONTEST_ASSERT_STRING_EQUAL("y", root["b"].asString());
+ }
+ delete reader;
+}
+
+struct BuilderTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(BuilderTest, settings) {
+ {
+ Json::Value errs;
+ Json::CharReaderBuilder rb;
+ JSONTEST_ASSERT_EQUAL(false, rb.settings_.isMember("foo"));
+ JSONTEST_ASSERT_EQUAL(true, rb.validate(&errs));
+ rb["foo"] = "bar";
+ JSONTEST_ASSERT_EQUAL(true, rb.settings_.isMember("foo"));
+ JSONTEST_ASSERT_EQUAL(false, rb.validate(&errs));
+ }
+ {
+ Json::Value errs;
+ Json::StreamWriterBuilder wb;
+ JSONTEST_ASSERT_EQUAL(false, wb.settings_.isMember("foo"));
+ JSONTEST_ASSERT_EQUAL(true, wb.validate(&errs));
+ wb["foo"] = "bar";
+ JSONTEST_ASSERT_EQUAL(true, wb.settings_.isMember("foo"));
+ JSONTEST_ASSERT_EQUAL(false, wb.validate(&errs));
+ }
+}
+
+struct IteratorTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(IteratorTest, distance) {
+ Json::Value json;
+ json["k1"] = "a";
+ json["k2"] = "b";
+ int dist = 0;
+ std::string str;
+ for (Json::ValueIterator it = json.begin(); it != json.end(); ++it) {
+ dist = it - json.begin();
+ str = it->asString().c_str();
+ }
+ JSONTEST_ASSERT_EQUAL(1, dist);
+ JSONTEST_ASSERT_STRING_EQUAL("b", str);
+}
+
+JSONTEST_FIXTURE(IteratorTest, names) {
+ Json::Value json;
+ json["k1"] = "a";
+ json["k2"] = "b";
+ Json::ValueIterator it = json.begin();
+ JSONTEST_ASSERT(it != json.end());
+ JSONTEST_ASSERT_EQUAL(Json::Value("k1"), it.key());
+ JSONTEST_ASSERT_STRING_EQUAL("k1", it.name());
+ JSONTEST_ASSERT_EQUAL(-1, it.index());
+ ++it;
+ JSONTEST_ASSERT(it != json.end());
+ JSONTEST_ASSERT_EQUAL(Json::Value("k2"), it.key());
+ JSONTEST_ASSERT_STRING_EQUAL("k2", it.name());
+ JSONTEST_ASSERT_EQUAL(-1, it.index());
+ ++it;
+ JSONTEST_ASSERT(it == json.end());
+}
+
+JSONTEST_FIXTURE(IteratorTest, indexes) {
+ Json::Value json;
+ json[0] = "a";
+ json[1] = "b";
+ Json::ValueIterator it = json.begin();
+ JSONTEST_ASSERT(it != json.end());
+ JSONTEST_ASSERT_EQUAL(Json::Value(Json::ArrayIndex(0)), it.key());
+ JSONTEST_ASSERT_STRING_EQUAL("", it.name());
+ JSONTEST_ASSERT_EQUAL(0, it.index());
+ ++it;
+ JSONTEST_ASSERT(it != json.end());
+ JSONTEST_ASSERT_EQUAL(Json::Value(Json::ArrayIndex(1)), it.key());
+ JSONTEST_ASSERT_STRING_EQUAL("", it.name());
+ JSONTEST_ASSERT_EQUAL(1, it.index());
+ ++it;
+ JSONTEST_ASSERT(it == json.end());
+}
+
int main(int argc, const char* argv[]) {
JsonTest::Runner runner;
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, checkNormalizeFloatingPointStr);
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, memberCount);
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, objects);
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, arrays);
+ JSONTEST_REGISTER_FIXTURE(runner, ValueTest, arrayIssue252);
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, null);
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, strings);
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, bools);
@@ -1623,6 +2357,15 @@ int main(int argc, const char* argv[]) {
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareType);
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, offsetAccessors);
JSONTEST_REGISTER_FIXTURE(runner, ValueTest, typeChecksThrowExceptions);
+ JSONTEST_REGISTER_FIXTURE(runner, ValueTest, StaticString);
+ JSONTEST_REGISTER_FIXTURE(runner, ValueTest, CommentBefore);
+ //JSONTEST_REGISTER_FIXTURE(runner, ValueTest, nulls);
+ JSONTEST_REGISTER_FIXTURE(runner, ValueTest, zeroes);
+ JSONTEST_REGISTER_FIXTURE(runner, ValueTest, zeroesInKeys);
+
+ JSONTEST_REGISTER_FIXTURE(runner, WriterTest, dropNullPlaceholders);
+ JSONTEST_REGISTER_FIXTURE(runner, StreamWriterTest, dropNullPlaceholders);
+ JSONTEST_REGISTER_FIXTURE(runner, StreamWriterTest, writeZeroes);
JSONTEST_REGISTER_FIXTURE(runner, ReaderTest, parseWithNoErrors);
JSONTEST_REGISTER_FIXTURE(
@@ -1631,7 +2374,33 @@ int main(int argc, const char* argv[]) {
JSONTEST_REGISTER_FIXTURE(runner, ReaderTest, parseChineseWithOneError);
JSONTEST_REGISTER_FIXTURE(runner, ReaderTest, parseWithDetailError);
- JSONTEST_REGISTER_FIXTURE(runner, WriterTest, dropNullPlaceholders);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseWithNoErrors);
+ JSONTEST_REGISTER_FIXTURE(
+ runner, CharReaderTest, parseWithNoErrorsTestingOffsets);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseWithOneError);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseChineseWithOneError);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseWithDetailError);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseWithStackLimit);
+
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderStrictModeTest, dupKeys);
+
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest, issue164);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest, issue107);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest, commentAfterObject);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest, commentAfterArray);
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest, commentAfterBool);
+
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderAllowDropNullTest, issue178);
+
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderAllowSingleQuotesTest, issue182);
+
+ JSONTEST_REGISTER_FIXTURE(runner, CharReaderAllowZeroesTest, issue176);
+
+ JSONTEST_REGISTER_FIXTURE(runner, BuilderTest, settings);
+
+ JSONTEST_REGISTER_FIXTURE(runner, IteratorTest, distance);
+ JSONTEST_REGISTER_FIXTURE(runner, IteratorTest, names);
+ JSONTEST_REGISTER_FIXTURE(runner, IteratorTest, indexes);
return runner.runCommandLine(argc, argv);
}
diff --git a/3rdparty/jsoncpp/test/cleantests.py b/3rdparty/jsoncpp/test/cleantests.py
index c38fd8ffdd1..9772d542b3a 100644
--- a/3rdparty/jsoncpp/test/cleantests.py
+++ b/3rdparty/jsoncpp/test/cleantests.py
@@ -1,10 +1,16 @@
-# removes all files created during testing
+# Copyright 2007 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+"""Removes all files created during testing."""
+
import glob
import os
paths = []
for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]:
- paths += glob.glob( 'data/' + pattern )
+ paths += glob.glob('data/' + pattern)
for path in paths:
- os.unlink( path )
+ os.unlink(path)
diff --git a/3rdparty/jsoncpp/test/data/test_comment_00.expected b/3rdparty/jsoncpp/test/data/test_comment_00.expected
new file mode 100644
index 00000000000..284a797d714
--- /dev/null
+++ b/3rdparty/jsoncpp/test/data/test_comment_00.expected
@@ -0,0 +1,4 @@
+// Comment for array
+.=[]
+// Comment within array
+.[0]="one-element"
diff --git a/3rdparty/jsoncpp/test/data/test_comment_00.json b/3rdparty/jsoncpp/test/data/test_comment_00.json
new file mode 100644
index 00000000000..4df577a8ae2
--- /dev/null
+++ b/3rdparty/jsoncpp/test/data/test_comment_00.json
@@ -0,0 +1,5 @@
+// Comment for array
+[
+ // Comment within array
+ "one-element"
+]
diff --git a/3rdparty/jsoncpp/test/data/test_comment_01.expected b/3rdparty/jsoncpp/test/data/test_comment_01.expected
index 2a7f00c450b..1ed01ba100c 100644
--- a/3rdparty/jsoncpp/test/data/test_comment_01.expected
+++ b/3rdparty/jsoncpp/test/data/test_comment_01.expected
@@ -1,5 +1,7 @@
.={}
+// Comment for array
.test=[]
+// Comment within array
.test[0]={}
.test[0].a="aaa"
.test[1]={}
diff --git a/3rdparty/jsoncpp/test/data/test_comment_01.json b/3rdparty/jsoncpp/test/data/test_comment_01.json
index 7363490a91d..6defe400404 100644
--- a/3rdparty/jsoncpp/test/data/test_comment_01.json
+++ b/3rdparty/jsoncpp/test/data/test_comment_01.json
@@ -1,6 +1,8 @@
{
"test":
+ // Comment for array
[
+ // Comment within array
{ "a" : "aaa" }, // Comment for a
{ "b" : "bbb" }, // Comment for b
{ "c" : "ccc" } // Comment for c
diff --git a/3rdparty/jsoncpp/test/data/test_comment_02.expected b/3rdparty/jsoncpp/test/data/test_comment_02.expected
index 88d2bd0dc1b..8986dbac05e 100644
--- a/3rdparty/jsoncpp/test/data/test_comment_02.expected
+++ b/3rdparty/jsoncpp/test/data/test_comment_02.expected
@@ -11,4 +11,13 @@
// Multiline comment cpp-style
// Second line
.cpp-test.c=3
-.cpp-test.d=4
+// Comment before double
+.cpp-test.d=4.1
+// Comment before string
+.cpp-test.e="e-string"
+// Comment before true
+.cpp-test.f=true
+// Comment before false
+.cpp-test.g=false
+// Comment before null
+.cpp-test.h=null
diff --git a/3rdparty/jsoncpp/test/data/test_comment_02.json b/3rdparty/jsoncpp/test/data/test_comment_02.json
index 297d889036a..f5042e0dd68 100644
--- a/3rdparty/jsoncpp/test/data/test_comment_02.json
+++ b/3rdparty/jsoncpp/test/data/test_comment_02.json
@@ -12,6 +12,15 @@
// Multiline comment cpp-style
// Second line
"c" : 3,
- "d" : 4
+ // Comment before double
+ "d" : 4.1,
+ // Comment before string
+ "e" : "e-string",
+ // Comment before true
+ "f" : true,
+ // Comment before false
+ "g" : false,
+ // Comment before null
+ "h" : null
}
}
diff --git a/3rdparty/jsoncpp/test/generate_expected.py b/3rdparty/jsoncpp/test/generate_expected.py
index f668da23861..0b74f51ceef 100644
--- a/3rdparty/jsoncpp/test/generate_expected.py
+++ b/3rdparty/jsoncpp/test/generate_expected.py
@@ -1,10 +1,15 @@
+# Copyright 2007 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
from __future__ import print_function
import glob
import os.path
-for path in glob.glob( '*.json' ):
+for path in glob.glob('*.json'):
text = file(path,'rt').read()
target = os.path.splitext(path)[0] + '.expected'
- if os.path.exists( target ):
+ if os.path.exists(target):
print('skipping:', target)
else:
print('creating:', target)
diff --git a/3rdparty/jsoncpp/test/pyjsontestrunner.py b/3rdparty/jsoncpp/test/pyjsontestrunner.py
index 3f08a8a7325..7f38356ae98 100644
--- a/3rdparty/jsoncpp/test/pyjsontestrunner.py
+++ b/3rdparty/jsoncpp/test/pyjsontestrunner.py
@@ -1,4 +1,11 @@
-# Simple implementation of a json test runner to run the test against json-py.
+# Copyright 2007 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+"""Simple implementation of a json test runner to run the test against
+json-py."""
+
from __future__ import print_function
import sys
import os.path
@@ -15,50 +22,50 @@ actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
-def valueTreeToString( fout, value, path = '.' ):
+def valueTreeToString(fout, value, path = '.'):
ty = type(value)
if ty is types.DictType:
- fout.write( '%s={}\n' % path )
+ fout.write('%s={}\n' % path)
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
- valueTreeToString( fout, value[name], path + suffix + name )
+ valueTreeToString(fout, value[name], path + suffix + name)
elif ty is types.ListType:
- fout.write( '%s=[]\n' % path )
- for index, childValue in zip( xrange(0,len(value)), value ):
- valueTreeToString( fout, childValue, path + '[%d]' % index )
+ fout.write('%s=[]\n' % path)
+ for index, childValue in zip(xrange(0,len(value)), value):
+ valueTreeToString(fout, childValue, path + '[%d]' % index)
elif ty is types.StringType:
- fout.write( '%s="%s"\n' % (path,value) )
+ fout.write('%s="%s"\n' % (path,value))
elif ty is types.IntType:
- fout.write( '%s=%d\n' % (path,value) )
+ fout.write('%s=%d\n' % (path,value))
elif ty is types.FloatType:
- fout.write( '%s=%.16g\n' % (path,value) )
+ fout.write('%s=%.16g\n' % (path,value))
elif value is True:
- fout.write( '%s=true\n' % path )
+ fout.write('%s=true\n' % path)
elif value is False:
- fout.write( '%s=false\n' % path )
+ fout.write('%s=false\n' % path)
elif value is None:
- fout.write( '%s=null\n' % path )
+ fout.write('%s=null\n' % path)
else:
assert False and "Unexpected value type"
-def parseAndSaveValueTree( input, actual_path ):
- root = json.loads( input )
- fout = file( actual_path, 'wt' )
- valueTreeToString( fout, root )
+def parseAndSaveValueTree(input, actual_path):
+ root = json.loads(input)
+ fout = file(actual_path, 'wt')
+ valueTreeToString(fout, root)
fout.close()
return root
-def rewriteValueTree( value, rewrite_path ):
- rewrite = json.dumps( value )
+def rewriteValueTree(value, rewrite_path):
+ rewrite = json.dumps(value)
#rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
- file( rewrite_path, 'wt').write( rewrite + '\n' )
+ file(rewrite_path, 'wt').write(rewrite + '\n')
return rewrite
-input = file( input_path, 'rt' ).read()
-root = parseAndSaveValueTree( input, actual_path )
-rewrite = rewriteValueTree( json.write( root ), rewrite_path )
-rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
+input = file(input_path, 'rt').read()
+root = parseAndSaveValueTree(input, actual_path)
+rewrite = rewriteValueTree(json.write(root), rewrite_path)
+rewrite_root = parseAndSaveValueTree(rewrite, rewrite_actual_path)
-sys.exit( 0 )
+sys.exit(0)
diff --git a/3rdparty/jsoncpp/test/runjsontests.py b/3rdparty/jsoncpp/test/runjsontests.py
index 9422d57d9dc..d21c02f3802 100644
--- a/3rdparty/jsoncpp/test/runjsontests.py
+++ b/3rdparty/jsoncpp/test/runjsontests.py
@@ -1,17 +1,41 @@
+# Copyright 2007 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
from __future__ import print_function
+from __future__ import unicode_literals
+from io import open
+from glob import glob
import sys
import os
import os.path
-from glob import glob
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes '
-def compareOutputs( expected, actual, message ):
+def getStatusOutput(cmd):
+ """
+ Return int, unicode (for both Python 2 and 3).
+ Note: os.popen().close() would return None for 0.
+ """
+ print(cmd, file=sys.stderr)
+ pipe = os.popen(cmd)
+ process_output = pipe.read()
+ try:
+ # We have been using os.popen(). When we read() the result
+ # we get 'str' (bytes) in py2, and 'str' (unicode) in py3.
+ # Ugh! There must be a better way to handle this.
+ process_output = process_output.decode('utf-8')
+ except AttributeError:
+ pass # python3
+ status = pipe.close()
+ return status, process_output
+def compareOutputs(expected, actual, message):
expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n')
diff_line = 0
- max_line_to_compare = min( len(expected), len(actual) )
+ max_line_to_compare = min(len(expected), len(actual))
for index in range(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip():
diff_line = index + 1
@@ -20,7 +44,7 @@ def compareOutputs( expected, actual, message ):
diff_line = max_line_to_compare+1
if diff_line == 0:
return None
- def safeGetLine( lines, index ):
+ def safeGetLine(lines, index):
index += -1
if index >= len(lines):
return ''
@@ -30,65 +54,65 @@ def compareOutputs( expected, actual, message ):
Actual: '%s'
""" % (message, diff_line,
safeGetLine(expected,diff_line),
- safeGetLine(actual,diff_line) )
+ safeGetLine(actual,diff_line))
-def safeReadFile( path ):
+def safeReadFile(path):
try:
- return file( path, 'rt' ).read()
+ return open(path, 'rt', encoding = 'utf-8').read()
except IOError as e:
return '<File "%s" is missing: %s>' % (path,e)
-def runAllTests( jsontest_executable_path, input_dir = None,
- use_valgrind=False, with_json_checker=False ):
+def runAllTests(jsontest_executable_path, input_dir = None,
+ use_valgrind=False, with_json_checker=False,
+ writerClass='StyledWriter'):
if not input_dir:
- input_dir = os.path.join( os.getcwd(), 'data' )
- tests = glob( os.path.join( input_dir, '*.json' ) )
+ input_dir = os.path.join(os.getcwd(), 'data')
+ tests = glob(os.path.join(input_dir, '*.json'))
if with_json_checker:
- test_jsonchecker = glob( os.path.join( input_dir, '../jsonchecker', '*.json' ) )
+ test_jsonchecker = glob(os.path.join(input_dir, '../jsonchecker', '*.json'))
else:
test_jsonchecker = []
failed_tests = []
valgrind_path = use_valgrind and VALGRIND_CMD or ''
for input_path in tests + test_jsonchecker:
- expect_failure = os.path.basename( input_path ).startswith( 'fail' )
+ expect_failure = os.path.basename(input_path).startswith('fail')
is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
print('TESTING:', input_path, end=' ')
options = is_json_checker_test and '--json-checker' or ''
- pipe = os.popen( '%s%s %s "%s"' % (
- valgrind_path, jsontest_executable_path, options,
- input_path) )
- process_output = pipe.read()
- status = pipe.close()
+ options += ' --json-writer %s'%writerClass
+ cmd = '%s%s %s "%s"' % ( valgrind_path, jsontest_executable_path, options,
+ input_path)
+ status, process_output = getStatusOutput(cmd)
if is_json_checker_test:
if expect_failure:
- if status is None:
+ if not status:
print('FAILED')
- failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
- safeReadFile(input_path)) )
+ failed_tests.append((input_path, 'Parsing should have failed:\n%s' %
+ safeReadFile(input_path)))
else:
print('OK')
else:
- if status is not None:
+ if status:
print('FAILED')
- failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
+ failed_tests.append((input_path, 'Parsing failed:\n' + process_output))
else:
print('OK')
else:
base_path = os.path.splitext(input_path)[0]
- actual_output = safeReadFile( base_path + '.actual' )
- actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
- file(base_path + '.process-output','wt').write( process_output )
+ actual_output = safeReadFile(base_path + '.actual')
+ actual_rewrite_output = safeReadFile(base_path + '.actual-rewrite')
+ open(base_path + '.process-output', 'wt', encoding = 'utf-8').write(process_output)
if status:
print('parsing failed')
- failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
+ failed_tests.append((input_path, 'Parsing failed:\n' + process_output))
else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
- expected_output = file( expected_output_path, 'rt' ).read()
- detail = ( compareOutputs( expected_output, actual_output, 'input' )
- or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
+ expected_output = open(expected_output_path, 'rt', encoding = 'utf-8').read()
+ detail = (compareOutputs(expected_output, actual_output, 'input')
+ or compareOutputs(expected_output, actual_rewrite_output, 'rewrite'))
if detail:
print('FAILED')
- failed_tests.append( (input_path, detail) )
+ failed_tests.append((input_path, detail))
else:
print('OK')
@@ -100,7 +124,7 @@ def runAllTests( jsontest_executable_path, input_dir = None,
print(failed_test[1])
print()
print('Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
- len(failed_tests) ))
+ len(failed_tests)))
return 1
else:
print('All %d tests passed.' % len(tests))
@@ -108,7 +132,7 @@ def runAllTests( jsontest_executable_path, input_dir = None,
def main():
from optparse import OptionParser
- parser = OptionParser( usage="%prog [options] <path to jsontestrunner.exe> [test case directory]" )
+ parser = OptionParser(usage="%prog [options] <path to jsontestrunner.exe> [test case directory]")
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
@@ -119,17 +143,32 @@ def main():
options, args = parser.parse_args()
if len(args) < 1 or len(args) > 2:
- parser.error( 'Must provides at least path to jsontestrunner executable.' )
- sys.exit( 1 )
+ parser.error('Must provides at least path to jsontestrunner executable.')
+ sys.exit(1)
- jsontest_executable_path = os.path.normpath( os.path.abspath( args[0] ) )
+ jsontest_executable_path = os.path.normpath(os.path.abspath(args[0]))
if len(args) > 1:
- input_path = os.path.normpath( os.path.abspath( args[1] ) )
+ input_path = os.path.normpath(os.path.abspath(args[1]))
else:
input_path = None
- status = runAllTests( jsontest_executable_path, input_path,
- use_valgrind=options.valgrind, with_json_checker=options.with_json_checker )
- sys.exit( status )
+ status = runAllTests(jsontest_executable_path, input_path,
+ use_valgrind=options.valgrind,
+ with_json_checker=options.with_json_checker,
+ writerClass='StyledWriter')
+ if status:
+ sys.exit(status)
+ status = runAllTests(jsontest_executable_path, input_path,
+ use_valgrind=options.valgrind,
+ with_json_checker=options.with_json_checker,
+ writerClass='StyledStreamWriter')
+ if status:
+ sys.exit(status)
+ status = runAllTests(jsontest_executable_path, input_path,
+ use_valgrind=options.valgrind,
+ with_json_checker=options.with_json_checker,
+ writerClass='BuiltStyledStreamWriter')
+ if status:
+ sys.exit(status)
if __name__ == '__main__':
main()
diff --git a/3rdparty/jsoncpp/test/rununittests.py b/3rdparty/jsoncpp/test/rununittests.py
index 6279f80e3c6..08850ec1a1d 100644
--- a/3rdparty/jsoncpp/test/rununittests.py
+++ b/3rdparty/jsoncpp/test/rununittests.py
@@ -1,4 +1,11 @@
+# Copyright 2009 Baptiste Lepilleur
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
from __future__ import print_function
+from __future__ import unicode_literals
+from io import open
from glob import glob
import sys
import os
@@ -9,37 +16,41 @@ import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
class TestProxy(object):
- def __init__( self, test_exe_path, use_valgrind=False ):
- self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
+ def __init__(self, test_exe_path, use_valgrind=False):
+ self.test_exe_path = os.path.normpath(os.path.abspath(test_exe_path))
self.use_valgrind = use_valgrind
- def run( self, options ):
+ def run(self, options):
if self.use_valgrind:
cmd = VALGRIND_CMD.split()
else:
cmd = []
- cmd.extend( [self.test_exe_path, '--test-auto'] + options )
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+ cmd.extend([self.test_exe_path, '--test-auto'] + options)
+ try:
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except:
+ print(cmd)
+ raise
stdout = process.communicate()[0]
if process.returncode:
return False, stdout
return True, stdout
-def runAllTests( exe_path, use_valgrind=False ):
- test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
- status, test_names = test_proxy.run( ['--list-tests'] )
+def runAllTests(exe_path, use_valgrind=False):
+ test_proxy = TestProxy(exe_path, use_valgrind=use_valgrind)
+ status, test_names = test_proxy.run(['--list-tests'])
if not status:
print("Failed to obtain unit tests list:\n" + test_names, file=sys.stderr)
return 1
- test_names = [name.strip() for name in test_names.strip().split('\n')]
+ test_names = [name.strip() for name in test_names.decode('utf-8').strip().split('\n')]
failures = []
for name in test_names:
print('TESTING %s:' % name, end=' ')
- succeed, result = test_proxy.run( ['--test', name] )
+ succeed, result = test_proxy.run(['--test', name])
if succeed:
print('OK')
else:
- failures.append( (name, result) )
+ failures.append((name, result))
print('FAILED')
failed_count = len(failures)
pass_count = len(test_names) - failed_count
@@ -47,8 +58,7 @@ def runAllTests( exe_path, use_valgrind=False ):
print()
for name, result in failures:
print(result)
- print('%d/%d tests passed (%d failure(s))' % (
- pass_count, len(test_names), failed_count))
+ print('%d/%d tests passed (%d failure(s))' % ( pass_count, len(test_names), failed_count))
return 1
else:
print('All %d tests passed' % len(test_names))
@@ -56,7 +66,7 @@ def runAllTests( exe_path, use_valgrind=False ):
def main():
from optparse import OptionParser
- parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
+ parser = OptionParser(usage="%prog [options] <path to test_lib_json.exe>")
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
@@ -64,11 +74,11 @@ def main():
options, args = parser.parse_args()
if len(args) != 1:
- parser.error( 'Must provides at least path to test_lib_json executable.' )
- sys.exit( 1 )
+ parser.error('Must provides at least path to test_lib_json executable.')
+ sys.exit(1)
- exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
- sys.exit( exit_code )
+ exit_code = runAllTests(args[0], use_valgrind=options.valgrind)
+ sys.exit(exit_code)
if __name__ == '__main__':
main()
diff --git a/3rdparty/jsoncpp/travis.sh b/3rdparty/jsoncpp/travis.sh
new file mode 100644
index 00000000000..2b25f4766fd
--- /dev/null
+++ b/3rdparty/jsoncpp/travis.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env sh
+# This is called by `.travis.yml` via Travis CI.
+# Travis supplies $TRAVIS_OS_NAME.
+# http://docs.travis-ci.com/user/multi-os/
+# Our .travis.yml also defines:
+# - SHARED_LIB=ON/OFF
+# - STATIC_LIB=ON/OFF
+# - CMAKE_PKG=ON/OFF
+# - BUILD_TYPE=release/debug
+# - VERBOSE_MAKE=false/true
+# - VERBOSE (set or not)
+
+# -e: fail on error
+# -v: show commands
+# -x: show expanded commands
+set -vex
+
+env | sort
+
+cmake -DJSONCPP_WITH_CMAKE_PACKAGE=$CMAKE_PKG -DBUILD_SHARED_LIBS=$SHARED_LIB -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_VERBOSE_MAKEFILE=$VERBOSE_MAKE .
+make
+
+# Python is not available in Travis for osx.
+# https://github.com/travis-ci/travis-ci/issues/2320
+if [ "$TRAVIS_OS_NAME" != "osx" ]
+then
+ make jsoncpp_check
+ valgrind --error-exitcode=42 --leak-check=full ./src/test_lib_json/jsoncpp_test
+fi
diff --git a/3rdparty/jsoncpp/version b/3rdparty/jsoncpp/version
index 6314daca8b0..fdd3be6df54 100644
--- a/3rdparty/jsoncpp/version
+++ b/3rdparty/jsoncpp/version
@@ -1 +1 @@
-1.1.0 \ No newline at end of file
+1.6.2
diff --git a/3rdparty/jsoncpp/version.in b/3rdparty/jsoncpp/version.in
new file mode 100644
index 00000000000..bfc03f7dd72
--- /dev/null
+++ b/3rdparty/jsoncpp/version.in
@@ -0,0 +1 @@
+@JSONCPP_VERSION@