From 08b0a87225d583227801b50084aae5e65f15b030 Mon Sep 17 00:00:00 2001 From: InoriRus Date: Wed, 13 Apr 2022 20:36:34 +1000 Subject: [PATCH] more APIs --- appveyor.yml | 2 +- source/.clang-tidy | 2 +- source/3rdparty/CMakeLists.txt | 1 + source/3rdparty/cpuinfo/CMakeLists.txt | 45 + source/3rdparty/cpuinfo/LICENSE | 27 + source/3rdparty/cpuinfo/deps/clog/LICENSE | 26 + .../3rdparty/cpuinfo/deps/clog/include/clog.h | 100 + source/3rdparty/cpuinfo/deps/clog/src/clog.c | 423 ++ .../3rdparty/cpuinfo/include/cpuinfo-mock.h | 78 + source/3rdparty/cpuinfo/include/cpuinfo.h | 1894 +++++ source/3rdparty/cpuinfo/src/api.c | 410 ++ source/3rdparty/cpuinfo/src/arm/android/api.h | 20 + .../cpuinfo/src/arm/android/properties.c | 67 + source/3rdparty/cpuinfo/src/arm/api.h | 122 + source/3rdparty/cpuinfo/src/arm/cache.c | 1737 +++++ .../cpuinfo/src/arm/linux/aarch32-isa.c | 271 + .../cpuinfo/src/arm/linux/aarch64-isa.c | 139 + source/3rdparty/cpuinfo/src/arm/linux/api.h | 384 + .../3rdparty/cpuinfo/src/arm/linux/chipset.c | 3937 ++++++++++ .../3rdparty/cpuinfo/src/arm/linux/clusters.c | 493 ++ source/3rdparty/cpuinfo/src/arm/linux/cp.h | 44 + .../3rdparty/cpuinfo/src/arm/linux/cpuinfo.c | 908 +++ source/3rdparty/cpuinfo/src/arm/linux/hwcap.c | 159 + source/3rdparty/cpuinfo/src/arm/linux/init.c | 765 ++ source/3rdparty/cpuinfo/src/arm/linux/midr.c | 863 +++ source/3rdparty/cpuinfo/src/arm/mach/init.c | 619 ++ source/3rdparty/cpuinfo/src/arm/midr.h | 260 + source/3rdparty/cpuinfo/src/arm/tlb.c | 133 + source/3rdparty/cpuinfo/src/arm/uarch.c | 370 + source/3rdparty/cpuinfo/src/cache.c | 18 + source/3rdparty/cpuinfo/src/cpuinfo/common.h | 40 + .../cpuinfo/src/cpuinfo/internal-api.h | 62 + source/3rdparty/cpuinfo/src/cpuinfo/log.h | 17 + source/3rdparty/cpuinfo/src/cpuinfo/utils.h | 19 + source/3rdparty/cpuinfo/src/emscripten/init.c | 277 + source/3rdparty/cpuinfo/src/init.c | 59 + source/3rdparty/cpuinfo/src/linux/api.h | 59 + source/3rdparty/cpuinfo/src/linux/cpulist.c | 214 + source/3rdparty/cpuinfo/src/linux/mockfile.c | 105 + source/3rdparty/cpuinfo/src/linux/multiline.c | 106 + .../3rdparty/cpuinfo/src/linux/processors.c | 406 ++ source/3rdparty/cpuinfo/src/linux/smallfile.c | 70 + source/3rdparty/cpuinfo/src/mach/api.h | 16 + source/3rdparty/cpuinfo/src/mach/topology.c | 73 + source/3rdparty/cpuinfo/src/x86/api.h | 159 + .../cpuinfo/src/x86/cache/descriptor.c | 1726 +++++ .../cpuinfo/src/x86/cache/deterministic.c | 257 + source/3rdparty/cpuinfo/src/x86/cache/init.c | 88 + source/3rdparty/cpuinfo/src/x86/cpuid.h | 79 + source/3rdparty/cpuinfo/src/x86/info.c | 19 + source/3rdparty/cpuinfo/src/x86/init.c | 75 + source/3rdparty/cpuinfo/src/x86/isa.c | 724 ++ source/3rdparty/cpuinfo/src/x86/linux/api.h | 20 + .../3rdparty/cpuinfo/src/x86/linux/cpuinfo.c | 207 + source/3rdparty/cpuinfo/src/x86/linux/init.c | 629 ++ source/3rdparty/cpuinfo/src/x86/mach/init.c | 356 + source/3rdparty/cpuinfo/src/x86/mockcpuid.c | 70 + source/3rdparty/cpuinfo/src/x86/name.c | 708 ++ source/3rdparty/cpuinfo/src/x86/topology.c | 127 + source/3rdparty/cpuinfo/src/x86/uarch.c | 241 + source/3rdparty/cpuinfo/src/x86/vendor.c | 189 + source/3rdparty/cpuinfo/src/x86/windows/api.h | 41 + .../3rdparty/cpuinfo/src/x86/windows/init.c | 634 ++ source/3rdparty/gtest/LICENSE | 28 + .../gtest/include/gtest/gtest-death-test.h | 343 + .../gtest/include/gtest/gtest-matchers.h | 750 ++ .../gtest/include/gtest/gtest-message.h | 219 + .../gtest/include/gtest/gtest-param-test.h | 511 ++ .../gtest/include/gtest/gtest-printers.h | 926 +++ .../3rdparty/gtest/include/gtest/gtest-spi.h | 238 + .../gtest/include/gtest/gtest-test-part.h | 184 + .../gtest/include/gtest/gtest-typed-test.h | 337 + source/3rdparty/gtest/include/gtest/gtest.h | 2477 +++++++ .../gtest/include/gtest/gtest_pred_impl.h | 359 + .../3rdparty/gtest/include/gtest/gtest_prod.h | 61 + .../include/gtest/internal/custom/README.md | 56 + .../gtest/internal/custom/gtest-port.h | 37 + .../gtest/internal/custom/gtest-printers.h | 42 + .../include/gtest/internal/custom/gtest.h | 37 + .../internal/gtest-death-test-internal.h | 304 + .../include/gtest/internal/gtest-filepath.h | 211 + .../include/gtest/internal/gtest-internal.h | 1432 ++++ .../include/gtest/internal/gtest-param-util.h | 934 +++ .../include/gtest/internal/gtest-port-arch.h | 111 + .../gtest/include/gtest/internal/gtest-port.h | 2223 ++++++ .../include/gtest/internal/gtest-string.h | 172 + .../include/gtest/internal/gtest-type-util.h | 183 + source/3rdparty/gtest/src/gtest-all.cc | 48 + source/3rdparty/gtest/src/gtest-death-test.cc | 1653 +++++ source/3rdparty/gtest/src/gtest-filepath.cc | 382 + .../3rdparty/gtest/src/gtest-internal-inl.h | 1218 ++++ source/3rdparty/gtest/src/gtest-matchers.cc | 97 + source/3rdparty/gtest/src/gtest-port.cc | 1403 ++++ source/3rdparty/gtest/src/gtest-printers.cc | 442 ++ source/3rdparty/gtest/src/gtest-test-part.cc | 108 + source/3rdparty/gtest/src/gtest-typed-test.cc | 121 + source/3rdparty/gtest/src/gtest.cc | 6359 +++++++++++++++++ source/3rdparty/gtest/src/gtest_main.cc | 54 + source/CMakeLists.txt | 18 +- source/KytyScripts.cpp | 8 +- source/emulator/CMakeLists.txt | 3 +- source/emulator/include/Emulator/Audio.h | 11 + source/emulator/include/Emulator/Libs/Errno.h | 58 +- .../emulator/include/Emulator/Libs/Printf.h | 14 +- source/emulator/include/Emulator/Network.h | 6 + .../emulator/include/Emulator/RuntimeLinker.h | 1 + .../emulator/include/Emulator/VirtualMemory.h | 4 + source/emulator/src/Audio.cpp | 288 + source/emulator/src/Graphics/Tile.cpp | 42 + source/emulator/src/Graphics/Window.cpp | 7 +- source/emulator/src/Kernel/Memory.cpp | 2 + source/emulator/src/Kernel/Pthread.cpp | 13 +- source/emulator/src/Kyty.cpp | 25 +- source/emulator/src/Libs/LibAudio.cpp | 17 + source/emulator/src/Libs/LibC.cpp | 73 +- source/emulator/src/Libs/LibKernel.cpp | 17 + source/emulator/src/Libs/LibNet.cpp | 14 + source/emulator/src/Libs/LibSaveData.cpp | 62 + source/emulator/src/Libs/LibSysmodule.cpp | 16 +- source/emulator/src/Libs/LibUserService.cpp | 20 + source/emulator/src/Libs/Libs.cpp | 4 +- source/emulator/src/Libs/Printf.cpp | 52 +- source/emulator/src/Network.cpp | 125 +- source/emulator/src/RuntimeLinker.cpp | 20 +- source/emulator/src/VirtualMemory.cpp | 16 +- source/include/Kyty/Core/MSpace.h | 37 + source/include/Kyty/UnitTest.h | 54 + source/lib/Core/src/MSpace.cpp | 555 ++ source/src_script.cmake | 1 + source/unit_test/.clang-tidy | 50 + source/unit_test/CMakeLists.txt | 39 + source/unit_test/src/UnitTest.cpp | 22 + .../unit_test/src/core/UnitTestCoreMSpace.cpp | 230 + .../unit_test/src/core/UnitTestCoreString.cpp | 1022 +++ 134 files changed, 49099 insertions(+), 96 deletions(-) create mode 100644 source/3rdparty/cpuinfo/CMakeLists.txt create mode 100644 source/3rdparty/cpuinfo/LICENSE create mode 100644 source/3rdparty/cpuinfo/deps/clog/LICENSE create mode 100644 source/3rdparty/cpuinfo/deps/clog/include/clog.h create mode 100644 source/3rdparty/cpuinfo/deps/clog/src/clog.c create mode 100644 source/3rdparty/cpuinfo/include/cpuinfo-mock.h create mode 100644 source/3rdparty/cpuinfo/include/cpuinfo.h create mode 100644 source/3rdparty/cpuinfo/src/api.c create mode 100644 source/3rdparty/cpuinfo/src/arm/android/api.h create mode 100644 source/3rdparty/cpuinfo/src/arm/android/properties.c create mode 100644 source/3rdparty/cpuinfo/src/arm/api.h create mode 100644 source/3rdparty/cpuinfo/src/arm/cache.c create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/aarch32-isa.c create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/aarch64-isa.c create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/api.h create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/chipset.c create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/clusters.c create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/cp.h create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/cpuinfo.c create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/hwcap.c create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/init.c create mode 100644 source/3rdparty/cpuinfo/src/arm/linux/midr.c create mode 100644 source/3rdparty/cpuinfo/src/arm/mach/init.c create mode 100644 source/3rdparty/cpuinfo/src/arm/midr.h create mode 100644 source/3rdparty/cpuinfo/src/arm/tlb.c create mode 100644 source/3rdparty/cpuinfo/src/arm/uarch.c create mode 100644 source/3rdparty/cpuinfo/src/cache.c create mode 100644 source/3rdparty/cpuinfo/src/cpuinfo/common.h create mode 100644 source/3rdparty/cpuinfo/src/cpuinfo/internal-api.h create mode 100644 source/3rdparty/cpuinfo/src/cpuinfo/log.h create mode 100644 source/3rdparty/cpuinfo/src/cpuinfo/utils.h create mode 100644 source/3rdparty/cpuinfo/src/emscripten/init.c create mode 100644 source/3rdparty/cpuinfo/src/init.c create mode 100644 source/3rdparty/cpuinfo/src/linux/api.h create mode 100644 source/3rdparty/cpuinfo/src/linux/cpulist.c create mode 100644 source/3rdparty/cpuinfo/src/linux/mockfile.c create mode 100644 source/3rdparty/cpuinfo/src/linux/multiline.c create mode 100644 source/3rdparty/cpuinfo/src/linux/processors.c create mode 100644 source/3rdparty/cpuinfo/src/linux/smallfile.c create mode 100644 source/3rdparty/cpuinfo/src/mach/api.h create mode 100644 source/3rdparty/cpuinfo/src/mach/topology.c create mode 100644 source/3rdparty/cpuinfo/src/x86/api.h create mode 100644 source/3rdparty/cpuinfo/src/x86/cache/descriptor.c create mode 100644 source/3rdparty/cpuinfo/src/x86/cache/deterministic.c create mode 100644 source/3rdparty/cpuinfo/src/x86/cache/init.c create mode 100644 source/3rdparty/cpuinfo/src/x86/cpuid.h create mode 100644 source/3rdparty/cpuinfo/src/x86/info.c create mode 100644 source/3rdparty/cpuinfo/src/x86/init.c create mode 100644 source/3rdparty/cpuinfo/src/x86/isa.c create mode 100644 source/3rdparty/cpuinfo/src/x86/linux/api.h create mode 100644 source/3rdparty/cpuinfo/src/x86/linux/cpuinfo.c create mode 100644 source/3rdparty/cpuinfo/src/x86/linux/init.c create mode 100644 source/3rdparty/cpuinfo/src/x86/mach/init.c create mode 100644 source/3rdparty/cpuinfo/src/x86/mockcpuid.c create mode 100644 source/3rdparty/cpuinfo/src/x86/name.c create mode 100644 source/3rdparty/cpuinfo/src/x86/topology.c create mode 100644 source/3rdparty/cpuinfo/src/x86/uarch.c create mode 100644 source/3rdparty/cpuinfo/src/x86/vendor.c create mode 100644 source/3rdparty/cpuinfo/src/x86/windows/api.h create mode 100644 source/3rdparty/cpuinfo/src/x86/windows/init.c create mode 100644 source/3rdparty/gtest/LICENSE create mode 100644 source/3rdparty/gtest/include/gtest/gtest-death-test.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest-matchers.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest-message.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest-param-test.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest-printers.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest-spi.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest-test-part.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest-typed-test.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest_pred_impl.h create mode 100644 source/3rdparty/gtest/include/gtest/gtest_prod.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/custom/README.md create mode 100644 source/3rdparty/gtest/include/gtest/internal/custom/gtest-port.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/custom/gtest-printers.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/custom/gtest.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/gtest-death-test-internal.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/gtest-filepath.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/gtest-internal.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/gtest-param-util.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/gtest-port-arch.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/gtest-port.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/gtest-string.h create mode 100644 source/3rdparty/gtest/include/gtest/internal/gtest-type-util.h create mode 100644 source/3rdparty/gtest/src/gtest-all.cc create mode 100644 source/3rdparty/gtest/src/gtest-death-test.cc create mode 100644 source/3rdparty/gtest/src/gtest-filepath.cc create mode 100644 source/3rdparty/gtest/src/gtest-internal-inl.h create mode 100644 source/3rdparty/gtest/src/gtest-matchers.cc create mode 100644 source/3rdparty/gtest/src/gtest-port.cc create mode 100644 source/3rdparty/gtest/src/gtest-printers.cc create mode 100644 source/3rdparty/gtest/src/gtest-test-part.cc create mode 100644 source/3rdparty/gtest/src/gtest-typed-test.cc create mode 100644 source/3rdparty/gtest/src/gtest.cc create mode 100644 source/3rdparty/gtest/src/gtest_main.cc create mode 100644 source/include/Kyty/Core/MSpace.h create mode 100644 source/include/Kyty/UnitTest.h create mode 100644 source/lib/Core/src/MSpace.cpp create mode 100644 source/unit_test/.clang-tidy create mode 100644 source/unit_test/CMakeLists.txt create mode 100644 source/unit_test/src/UnitTest.cpp create mode 100644 source/unit_test/src/core/UnitTestCoreMSpace.cpp create mode 100644 source/unit_test/src/core/UnitTestCoreString.cpp diff --git a/appveyor.yml b/appveyor.yml index 9243ce4..d8ac4aa 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,4 +1,4 @@ -version: 0.0.13.build-{build} +version: 0.0.14.build-{build} image: Visual Studio 2019 environment: matrix: diff --git a/source/.clang-tidy b/source/.clang-tidy index 4f4e0df..e8ff46b 100644 --- a/source/.clang-tidy +++ b/source/.clang-tidy @@ -9,7 +9,7 @@ CheckOptions: - key: cppcoreguidelines-pro-type-member-init.IgnoreArrays value: 1 - key: cppcoreguidelines-macro-usage.AllowedRegexp - value: (KYTY_*)|(EXIT*)|(ASSERT*) + value: (KYTY_*)|(EXIT*)|(ASSERT*)|(UT_*) - key: readability-identifier-naming.ClassCase value: CamelCase - key: readability-identifier-naming.MemberCase diff --git a/source/3rdparty/CMakeLists.txt b/source/3rdparty/CMakeLists.txt index 794efda..d1cab1a 100644 --- a/source/3rdparty/CMakeLists.txt +++ b/source/3rdparty/CMakeLists.txt @@ -1,3 +1,4 @@ +add_subdirectory(cpuinfo) add_subdirectory(lua) add_subdirectory(sdl2) add_subdirectory(rijndael) diff --git a/source/3rdparty/cpuinfo/CMakeLists.txt b/source/3rdparty/cpuinfo/CMakeLists.txt new file mode 100644 index 0000000..353b375 --- /dev/null +++ b/source/3rdparty/cpuinfo/CMakeLists.txt @@ -0,0 +1,45 @@ +file(GLOB cpuinfo_src + src/init.c + src/api.c + src/cache.c + src/x86/init.c + src/x86/info.c + src/x86/vendor.c + src/x86/uarch.c + src/x86/name.c + src/x86/topology.c + src/x86/isa.c + src/x86/cache/init.c + src/x86/cache/descriptor.c + src/x86/cache/deterministic.c + src/x86/windows/init.c + deps/clog/src/clog.c +) + +if (MINGW) + if (CLANG) + set_source_files_properties(${cpuinfo_src} PROPERTIES COMPILE_FLAGS "-Wno-unused-variable -Wno-implicit-function-declaration") + else() + set_source_files_properties(${cpuinfo_src} PROPERTIES COMPILE_FLAGS "-Wno-unused-variable -Wno-implicit-function-declaration -Wno-format -Wno-format-extra-args") + endif() +endif() + +if (MSVC) + if (CLANG) + set_source_files_properties(${cpuinfo_src} PROPERTIES COMPILE_FLAGS "-Wno-unused-variable -Wno-deprecated-declarations -Wno-implicit-function-declaration") + else() + set_source_files_properties(${cpuinfo_src} PROPERTIES COMPILE_FLAGS "") + endif() +endif() + +include_directories(include src deps/clog/include) + +#add_library(cpuinfo STATIC ${cpuinfo_src}) +add_library(cpuinfo_obj OBJECT ${cpuinfo_src}) +add_library(cpuinfo STATIC $) + + +target_include_directories(cpuinfo PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") + +target_include_directories(cpuinfo_obj PRIVATE $) + diff --git a/source/3rdparty/cpuinfo/LICENSE b/source/3rdparty/cpuinfo/LICENSE new file mode 100644 index 0000000..3f9a4f0 --- /dev/null +++ b/source/3rdparty/cpuinfo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019 Google LLC +Copyright (c) 2017-2018 Facebook Inc. +Copyright (C) 2012-2017 Georgia Institute of Technology +Copyright (C) 2010-2012 Marat Dukhan + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/3rdparty/cpuinfo/deps/clog/LICENSE b/source/3rdparty/cpuinfo/deps/clog/LICENSE new file mode 100644 index 0000000..306de3d --- /dev/null +++ b/source/3rdparty/cpuinfo/deps/clog/LICENSE @@ -0,0 +1,26 @@ +Copyright (C) 2018 Marat Dukhan +Copyright (c) 2017-2018 Facebook Inc. +Copyright (c) 2017 Georgia Institute of Technology + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/3rdparty/cpuinfo/deps/clog/include/clog.h b/source/3rdparty/cpuinfo/deps/clog/include/clog.h new file mode 100644 index 0000000..4143761 --- /dev/null +++ b/source/3rdparty/cpuinfo/deps/clog/include/clog.h @@ -0,0 +1,100 @@ +#pragma once + +#include +#include +#include + +#define CLOG_NONE 0 +#define CLOG_FATAL 1 +#define CLOG_ERROR 2 +#define CLOG_WARNING 3 +#define CLOG_INFO 4 +#define CLOG_DEBUG 5 + +#ifndef CLOG_VISIBILITY + #if defined(__ELF__) + #define CLOG_VISIBILITY __attribute__((__visibility__("internal"))) + #elif defined(__MACH__) + #define CLOG_VISIBILITY __attribute__((__visibility__("hidden"))) + #else + #define CLOG_VISIBILITY + #endif +#endif + +#ifndef CLOG_ARGUMENTS_FORMAT + #if defined(__GNUC__) + #define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2))) + #else + #define CLOG_ARGUMENTS_FORMAT + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +CLOG_VISIBILITY void clog_vlog_debug(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_info(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_warning(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_error(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_fatal(const char* module, const char* format, va_list args); + +#define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_debug_function_name(const char* format, ...) { \ + if (level >= CLOG_DEBUG) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_debug(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_info_function_name(const char* format, ...) { \ + if (level >= CLOG_INFO) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_info(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_warning_function_name(const char* format, ...) { \ + if (level >= CLOG_WARNING) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_warning(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_error_function_name(const char* format, ...) { \ + if (level >= CLOG_ERROR) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_error(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_fatal_function_name(const char* format, ...) { \ + if (level >= CLOG_FATAL) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_fatal(module, format, args); \ + va_end(args); \ + } \ + abort(); \ + } + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/source/3rdparty/cpuinfo/deps/clog/src/clog.c b/source/3rdparty/cpuinfo/deps/clog/src/clog.c new file mode 100644 index 0000000..fe5d43e --- /dev/null +++ b/source/3rdparty/cpuinfo/deps/clog/src/clog.c @@ -0,0 +1,423 @@ +#include +#include +#include +#include +#ifdef _WIN32 + #include +#else + #include +#endif +#ifdef __ANDROID__ + #include +#endif + +#ifndef CLOG_LOG_TO_STDIO + #ifdef __ANDROID__ + #define CLOG_LOG_TO_STDIO 0 + #else + #define CLOG_LOG_TO_STDIO 1 + #endif +#endif + +#include + + +/* Messages up to this size are formatted entirely on-stack, and don't allocate heap memory */ +#define CLOG_STACK_BUFFER_SIZE 1024 + +#define CLOG_FATAL_PREFIX "Fatal error: " +#define CLOG_FATAL_PREFIX_LENGTH 13 +#define CLOG_FATAL_PREFIX_FORMAT "Fatal error in %s: " +#define CLOG_ERROR_PREFIX "Error: " +#define CLOG_ERROR_PREFIX_LENGTH 7 +#define CLOG_ERROR_PREFIX_FORMAT "Error in %s: " +#define CLOG_WARNING_PREFIX "Warning: " +#define CLOG_WARNING_PREFIX_LENGTH 9 +#define CLOG_WARNING_PREFIX_FORMAT "Warning in %s: " +#define CLOG_INFO_PREFIX "Note: " +#define CLOG_INFO_PREFIX_LENGTH 6 +#define CLOG_INFO_PREFIX_FORMAT "Note (%s): " +#define CLOG_DEBUG_PREFIX "Debug: " +#define CLOG_DEBUG_PREFIX_LENGTH 7 +#define CLOG_DEBUG_PREFIX_FORMAT "Debug (%s): " +#define CLOG_SUFFIX_LENGTH 1 + +void clog_vlog_fatal(const char* module, const char* format, va_list args) { + #if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO + __android_log_vprint(ANDROID_LOG_FATAL, module, format, args); + #else + char stack_buffer[CLOG_STACK_BUFFER_SIZE]; + char* heap_buffer = NULL; + char* out_buffer = &stack_buffer[0]; + + /* The first call to vsnprintf will clobber args, thus need a copy in case a second vsnprintf call is needed */ + va_list args_copy; + va_copy(args_copy, args); + + int prefix_chars = CLOG_FATAL_PREFIX_LENGTH; + if (module == NULL) { + memcpy(stack_buffer, CLOG_FATAL_PREFIX, CLOG_FATAL_PREFIX_LENGTH); + } else { + prefix_chars = snprintf(stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_FATAL_PREFIX_FORMAT, module); + if (prefix_chars < 0) { + /* Format error in prefix (possible if prefix is modified): skip prefix and continue as if nothing happened. */ + prefix_chars = 0; + } + } + + int format_chars; + if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) { + /* + * Prefix + suffix alone would overflow the on-stack buffer, thus need to use on-heap buffer. + * Do not even try to format the string into on-stack buffer. + */ + format_chars = vsnprintf(NULL, 0, format, args); + } else { + format_chars = + vsnprintf( + &stack_buffer[prefix_chars], + CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH, + format, + args); + } + if (format_chars < 0) { + /* Format error in the message: silently ignore this particular message. */ + goto cleanup; + } + if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH > CLOG_STACK_BUFFER_SIZE) { + /* Allocate a buffer on heap, and vsnprintf to this buffer */ + heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + if (heap_buffer == NULL) { + goto cleanup; + } + + if (prefix_chars > CLOG_STACK_BUFFER_SIZE) { + /* Prefix didn't fit into on-stack buffer, re-format it again to on-heap buffer */ + snprintf(heap_buffer, prefix_chars + 1 /* for '\0'-terminator */, CLOG_FATAL_PREFIX_FORMAT, module); + } else { + /* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */ + memcpy(heap_buffer, stack_buffer, prefix_chars); + } + vsnprintf(heap_buffer + prefix_chars, format_chars + CLOG_SUFFIX_LENGTH, format, args_copy); + out_buffer = heap_buffer; + } + out_buffer[prefix_chars + format_chars] = '\n'; + #ifdef _WIN32 + DWORD bytes_written; + WriteFile( + GetStdHandle(STD_ERROR_HANDLE), + out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH, + &bytes_written, NULL); + #else + write(STDERR_FILENO, out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + #endif + +cleanup: + free(heap_buffer); + va_end(args_copy); + #endif +} + +void clog_vlog_error(const char* module, const char* format, va_list args) { + #if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO + __android_log_vprint(ANDROID_LOG_ERROR, module, format, args); + #else + char stack_buffer[CLOG_STACK_BUFFER_SIZE]; + char* heap_buffer = NULL; + char* out_buffer = &stack_buffer[0]; + + /* The first call to vsnprintf will clobber args, thus need a copy in case a second vsnprintf call is needed */ + va_list args_copy; + va_copy(args_copy, args); + + int prefix_chars = CLOG_ERROR_PREFIX_LENGTH; + if (module == NULL) { + memcpy(stack_buffer, CLOG_ERROR_PREFIX, CLOG_ERROR_PREFIX_LENGTH); + } else { + prefix_chars = snprintf(stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_ERROR_PREFIX_FORMAT, module); + if (prefix_chars < 0) { + /* Format error in prefix (possible if prefix is modified): skip prefix and continue as if nothing happened. */ + prefix_chars = 0; + } + } + + int format_chars; + if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) { + /* + * Prefix + suffix alone would overflow the on-stack buffer, thus need to use on-heap buffer. + * Do not even try to format the string into on-stack buffer. + */ + format_chars = vsnprintf(NULL, 0, format, args); + } else { + format_chars = + vsnprintf( + &stack_buffer[prefix_chars], + CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH, + format, + args); + } + if (format_chars < 0) { + /* Format error in the message: silently ignore this particular message. */ + goto cleanup; + } + if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH > CLOG_STACK_BUFFER_SIZE) { + /* Allocate a buffer on heap, and vsnprintf to this buffer */ + heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + if (heap_buffer == NULL) { + goto cleanup; + } + + if (prefix_chars > CLOG_STACK_BUFFER_SIZE) { + /* Prefix didn't fit into on-stack buffer, re-format it again to on-heap buffer */ + snprintf(heap_buffer, prefix_chars + 1 /* for '\0'-terminator */, CLOG_ERROR_PREFIX_FORMAT, module); + } else { + /* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */ + memcpy(heap_buffer, stack_buffer, prefix_chars); + } + vsnprintf(heap_buffer + prefix_chars, format_chars + CLOG_SUFFIX_LENGTH, format, args_copy); + out_buffer = heap_buffer; + } + out_buffer[prefix_chars + format_chars] = '\n'; + #ifdef _WIN32 + DWORD bytes_written; + WriteFile( + GetStdHandle(STD_ERROR_HANDLE), + out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH, + &bytes_written, NULL); + #else + write(STDERR_FILENO, out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + #endif + +cleanup: + free(heap_buffer); + va_end(args_copy); + #endif +} + +void clog_vlog_warning(const char* module, const char* format, va_list args) { + #if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO + __android_log_vprint(ANDROID_LOG_WARN, module, format, args); + #else + char stack_buffer[CLOG_STACK_BUFFER_SIZE]; + char* heap_buffer = NULL; + char* out_buffer = &stack_buffer[0]; + + /* The first call to vsnprintf will clobber args, thus need a copy in case a second vsnprintf call is needed */ + va_list args_copy; + va_copy(args_copy, args); + + int prefix_chars = CLOG_WARNING_PREFIX_LENGTH; + if (module == NULL) { + memcpy(stack_buffer, CLOG_WARNING_PREFIX, CLOG_WARNING_PREFIX_LENGTH); + } else { + prefix_chars = snprintf(stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_WARNING_PREFIX_FORMAT, module); + if (prefix_chars < 0) { + /* Format error in prefix (possible if prefix is modified): skip prefix and continue as if nothing happened. */ + prefix_chars = 0; + } + } + + int format_chars; + if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) { + /* + * Prefix + suffix alone would overflow the on-stack buffer, thus need to use on-heap buffer. + * Do not even try to format the string into on-stack buffer. + */ + format_chars = vsnprintf(NULL, 0, format, args); + } else { + format_chars = + vsnprintf( + &stack_buffer[prefix_chars], + CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH, + format, + args); + } + if (format_chars < 0) { + /* Format error in the message: silently ignore this particular message. */ + goto cleanup; + } + if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH > CLOG_STACK_BUFFER_SIZE) { + /* Allocate a buffer on heap, and vsnprintf to this buffer */ + heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + if (heap_buffer == NULL) { + goto cleanup; + } + + if (prefix_chars > CLOG_STACK_BUFFER_SIZE) { + /* Prefix didn't fit into on-stack buffer, re-format it again to on-heap buffer */ + snprintf(heap_buffer, prefix_chars + 1 /* for '\0'-terminator */, CLOG_WARNING_PREFIX_FORMAT, module); + } else { + /* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */ + memcpy(heap_buffer, stack_buffer, prefix_chars); + } + vsnprintf(heap_buffer + prefix_chars, format_chars + CLOG_SUFFIX_LENGTH, format, args_copy); + out_buffer = heap_buffer; + } + out_buffer[prefix_chars + format_chars] = '\n'; + #ifdef _WIN32 + DWORD bytes_written; + WriteFile( + GetStdHandle(STD_ERROR_HANDLE), + out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH, + &bytes_written, NULL); + #else + write(STDERR_FILENO, out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + #endif + +cleanup: + free(heap_buffer); + va_end(args_copy); + #endif +} + +void clog_vlog_info(const char* module, const char* format, va_list args) { + #if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO + __android_log_vprint(ANDROID_LOG_INFO, module, format, args); + #else + char stack_buffer[CLOG_STACK_BUFFER_SIZE]; + char* heap_buffer = NULL; + char* out_buffer = &stack_buffer[0]; + + /* The first call to vsnprintf will clobber args, thus need a copy in case a second vsnprintf call is needed */ + va_list args_copy; + va_copy(args_copy, args); + + int prefix_chars = CLOG_INFO_PREFIX_LENGTH; + if (module == NULL) { + memcpy(stack_buffer, CLOG_INFO_PREFIX, CLOG_INFO_PREFIX_LENGTH); + } else { + prefix_chars = snprintf(stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_INFO_PREFIX_FORMAT, module); + if (prefix_chars < 0) { + /* Format error in prefix (possible if prefix is modified): skip prefix and continue as if nothing happened. */ + prefix_chars = 0; + } + } + + int format_chars; + if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) { + /* + * Prefix + suffix alone would overflow the on-stack buffer, thus need to use on-heap buffer. + * Do not even try to format the string into on-stack buffer. + */ + format_chars = vsnprintf(NULL, 0, format, args); + } else { + format_chars = + vsnprintf( + &stack_buffer[prefix_chars], + CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH, + format, + args); + } + if (format_chars < 0) { + /* Format error in the message: silently ignore this particular message. */ + goto cleanup; + } + if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH > CLOG_STACK_BUFFER_SIZE) { + /* Allocate a buffer on heap, and vsnprintf to this buffer */ + heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + if (heap_buffer == NULL) { + goto cleanup; + } + + if (prefix_chars > CLOG_STACK_BUFFER_SIZE) { + /* Prefix didn't fit into on-stack buffer, re-format it again to on-heap buffer */ + snprintf(heap_buffer, prefix_chars + 1 /* for '\0'-terminator */, CLOG_INFO_PREFIX_FORMAT, module); + } else { + /* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */ + memcpy(heap_buffer, stack_buffer, prefix_chars); + } + vsnprintf(heap_buffer + prefix_chars, format_chars + CLOG_SUFFIX_LENGTH, format, args_copy); + out_buffer = heap_buffer; + } + out_buffer[prefix_chars + format_chars] = '\n'; + #ifdef _WIN32 + DWORD bytes_written; + WriteFile( + GetStdHandle(STD_OUTPUT_HANDLE), + out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH, + &bytes_written, NULL); + #else + write(STDOUT_FILENO, out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + #endif + +cleanup: + free(heap_buffer); + va_end(args_copy); + #endif +} + +void clog_vlog_debug(const char* module, const char* format, va_list args) { + #if defined(__ANDROID__) && !CLOG_LOG_TO_STDIO + __android_log_vprint(ANDROID_LOG_DEBUG, module, format, args); + #else + char stack_buffer[CLOG_STACK_BUFFER_SIZE]; + char* heap_buffer = NULL; + char* out_buffer = &stack_buffer[0]; + + /* The first call to vsnprintf will clobber args, thus need a copy in case a second vsnprintf call is needed */ + va_list args_copy; + va_copy(args_copy, args); + + int prefix_chars = CLOG_DEBUG_PREFIX_LENGTH; + if (module == NULL) { + memcpy(stack_buffer, CLOG_DEBUG_PREFIX, CLOG_DEBUG_PREFIX_LENGTH); + } else { + prefix_chars = snprintf(stack_buffer, CLOG_STACK_BUFFER_SIZE, CLOG_DEBUG_PREFIX_FORMAT, module); + if (prefix_chars < 0) { + /* Format error in prefix (possible if prefix is modified): skip prefix and continue as if nothing happened. */ + prefix_chars = 0; + } + } + + int format_chars; + if (prefix_chars + CLOG_SUFFIX_LENGTH >= CLOG_STACK_BUFFER_SIZE) { + /* + * Prefix + suffix alone would overflow the on-stack buffer, thus need to use on-heap buffer. + * Do not even try to format the string into on-stack buffer. + */ + format_chars = vsnprintf(NULL, 0, format, args); + } else { + format_chars = + vsnprintf( + &stack_buffer[prefix_chars], + CLOG_STACK_BUFFER_SIZE - prefix_chars - CLOG_SUFFIX_LENGTH, + format, + args); + } + if (format_chars < 0) { + /* Format error in the message: silently ignore this particular message. */ + goto cleanup; + } + if (prefix_chars + format_chars + CLOG_SUFFIX_LENGTH > CLOG_STACK_BUFFER_SIZE) { + /* Allocate a buffer on heap, and vsnprintf to this buffer */ + heap_buffer = malloc(prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + if (heap_buffer == NULL) { + goto cleanup; + } + + if (prefix_chars > CLOG_STACK_BUFFER_SIZE) { + /* Prefix didn't fit into on-stack buffer, re-format it again to on-heap buffer */ + snprintf(heap_buffer, prefix_chars + 1 /* for '\0'-terminator */, CLOG_DEBUG_PREFIX_FORMAT, module); + } else { + /* Copy pre-formatted prefix from on-stack buffer to on-heap buffer */ + memcpy(heap_buffer, stack_buffer, prefix_chars); + } + vsnprintf(heap_buffer + prefix_chars, format_chars + CLOG_SUFFIX_LENGTH, format, args_copy); + out_buffer = heap_buffer; + } + out_buffer[prefix_chars + format_chars] = '\n'; + #ifdef _WIN32 + DWORD bytes_written; + WriteFile( + GetStdHandle(STD_OUTPUT_HANDLE), + out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH, + &bytes_written, NULL); + #else + write(STDOUT_FILENO, out_buffer, prefix_chars + format_chars + CLOG_SUFFIX_LENGTH); + #endif + +cleanup: + free(heap_buffer); + va_end(args_copy); + #endif +} diff --git a/source/3rdparty/cpuinfo/include/cpuinfo-mock.h b/source/3rdparty/cpuinfo/include/cpuinfo-mock.h new file mode 100644 index 0000000..3c1f637 --- /dev/null +++ b/source/3rdparty/cpuinfo/include/cpuinfo-mock.h @@ -0,0 +1,78 @@ +#pragma once +#ifndef CPUINFO_MOCK_H +#define CPUINFO_MOCK_H + +#include +#include + +#include +#if defined(__linux__) + #include +#endif + +#if !defined(CPUINFO_MOCK) || !(CPUINFO_MOCK) + #error This header is intended only for test use +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + + +#if CPUINFO_ARCH_ARM + void CPUINFO_ABI cpuinfo_set_fpsid(uint32_t fpsid); + void CPUINFO_ABI cpuinfo_set_wcid(uint32_t wcid); +#endif /* CPUINFO_ARCH_ARM */ + +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + struct cpuinfo_mock_cpuid { + uint32_t input_eax; + uint32_t input_ecx; + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; + }; + + void CPUINFO_ABI cpuinfo_mock_set_cpuid(struct cpuinfo_mock_cpuid* dump, size_t entries); + void CPUINFO_ABI cpuinfo_mock_get_cpuid(uint32_t eax, uint32_t regs[4]); + void CPUINFO_ABI cpuinfo_mock_get_cpuidex(uint32_t eax, uint32_t ecx, uint32_t regs[4]); +#endif /* CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 */ + +struct cpuinfo_mock_file { + const char* path; + size_t size; + const char* content; + size_t offset; +}; + +struct cpuinfo_mock_property { + const char* key; + const char* value; +}; + +#if defined(__linux__) + void CPUINFO_ABI cpuinfo_mock_filesystem(struct cpuinfo_mock_file* files); + int CPUINFO_ABI cpuinfo_mock_open(const char* path, int oflag); + int CPUINFO_ABI cpuinfo_mock_close(int fd); + ssize_t CPUINFO_ABI cpuinfo_mock_read(int fd, void* buffer, size_t capacity); + + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + void CPUINFO_ABI cpuinfo_set_hwcap(uint32_t hwcap); + #endif + #if CPUINFO_ARCH_ARM + void CPUINFO_ABI cpuinfo_set_hwcap2(uint32_t hwcap2); + #endif +#endif + +#if defined(__ANDROID__) + void CPUINFO_ABI cpuinfo_mock_android_properties(struct cpuinfo_mock_property* properties); + void CPUINFO_ABI cpuinfo_mock_gl_renderer(const char* renderer); +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* CPUINFO_MOCK_H */ diff --git a/source/3rdparty/cpuinfo/include/cpuinfo.h b/source/3rdparty/cpuinfo/include/cpuinfo.h new file mode 100644 index 0000000..258abd0 --- /dev/null +++ b/source/3rdparty/cpuinfo/include/cpuinfo.h @@ -0,0 +1,1894 @@ +#pragma once +#ifndef CPUINFO_H +#define CPUINFO_H + +#ifndef __cplusplus + #include +#endif + +#ifdef __APPLE__ + #include +#endif + +#include + +/* Identify architecture and define corresponding macro */ + +#if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86) + #define CPUINFO_ARCH_X86 1 +#endif + +#if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) + #define CPUINFO_ARCH_X86_64 1 +#endif + +#if defined(__arm__) || defined(_M_ARM) + #define CPUINFO_ARCH_ARM 1 +#endif + +#if defined(__aarch64__) || defined(_M_ARM64) + #define CPUINFO_ARCH_ARM64 1 +#endif + +#if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64) + #define CPUINFO_ARCH_PPC64 1 +#endif + +#if defined(__asmjs__) + #define CPUINFO_ARCH_ASMJS 1 +#endif + +#if defined(__wasm__) + #if defined(__wasm_simd128__) + #define CPUINFO_ARCH_WASMSIMD 1 + #else + #define CPUINFO_ARCH_WASM 1 + #endif +#endif + +/* Define other architecture-specific macros as 0 */ + +#ifndef CPUINFO_ARCH_X86 + #define CPUINFO_ARCH_X86 0 +#endif + +#ifndef CPUINFO_ARCH_X86_64 + #define CPUINFO_ARCH_X86_64 0 +#endif + +#ifndef CPUINFO_ARCH_ARM + #define CPUINFO_ARCH_ARM 0 +#endif + +#ifndef CPUINFO_ARCH_ARM64 + #define CPUINFO_ARCH_ARM64 0 +#endif + +#ifndef CPUINFO_ARCH_PPC64 + #define CPUINFO_ARCH_PPC64 0 +#endif + +#ifndef CPUINFO_ARCH_ASMJS + #define CPUINFO_ARCH_ASMJS 0 +#endif + +#ifndef CPUINFO_ARCH_WASM + #define CPUINFO_ARCH_WASM 0 +#endif + +#ifndef CPUINFO_ARCH_WASMSIMD + #define CPUINFO_ARCH_WASMSIMD 0 +#endif + +#if CPUINFO_ARCH_X86 && defined(_MSC_VER) + #define CPUINFO_ABI __cdecl +#elif CPUINFO_ARCH_X86 && defined(__GNUC__) + #define CPUINFO_ABI __attribute__((__cdecl__)) +#else + #define CPUINFO_ABI +#endif + +#define CPUINFO_CACHE_UNIFIED 0x00000001 +#define CPUINFO_CACHE_INCLUSIVE 0x00000002 +#define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004 + +struct cpuinfo_cache { + /** Cache size in bytes */ + uint32_t size; + /** Number of ways of associativity */ + uint32_t associativity; + /** Number of sets */ + uint32_t sets; + /** Number of partitions */ + uint32_t partitions; + /** Line size in bytes */ + uint32_t line_size; + /** + * Binary characteristics of the cache (unified cache, inclusive cache, cache with complex indexing). + * + * @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE, CPUINFO_CACHE_COMPLEX_INDEXING + */ + uint32_t flags; + /** Index of the first logical processor that shares this cache */ + uint32_t processor_start; + /** Number of logical processors that share this cache */ + uint32_t processor_count; +}; + +struct cpuinfo_trace_cache { + uint32_t uops; + uint32_t associativity; +}; + +#define CPUINFO_PAGE_SIZE_4KB 0x1000 +#define CPUINFO_PAGE_SIZE_1MB 0x100000 +#define CPUINFO_PAGE_SIZE_2MB 0x200000 +#define CPUINFO_PAGE_SIZE_4MB 0x400000 +#define CPUINFO_PAGE_SIZE_16MB 0x1000000 +#define CPUINFO_PAGE_SIZE_1GB 0x40000000 + +struct cpuinfo_tlb { + uint32_t entries; + uint32_t associativity; + uint64_t pages; +}; + +/** Vendor of processor core design */ +enum cpuinfo_vendor { + /** Processor vendor is not known to the library, or the library failed to get vendor information from the OS. */ + cpuinfo_vendor_unknown = 0, + + /* Active vendors of modern CPUs */ + + /** + * Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor microarchitectures. + * + * Sold its ARM design subsidiary in 2006. The last ARM processor design was released in 2004. + */ + cpuinfo_vendor_intel = 1, + /** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor microarchitectures. */ + cpuinfo_vendor_amd = 2, + /** ARM Holdings plc. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_arm = 3, + /** Qualcomm Incorporated. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_qualcomm = 4, + /** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_apple = 5, + /** Samsung Electronics Co., Ltd. Vendir if ARM64 processor microarchitectures. */ + cpuinfo_vendor_samsung = 6, + /** Nvidia Corporation. Vendor of ARM64-compatible processor microarchitectures. */ + cpuinfo_vendor_nvidia = 7, + /** MIPS Technologies, Inc. Vendor of MIPS processor microarchitectures. */ + cpuinfo_vendor_mips = 8, + /** International Business Machines Corporation. Vendor of PowerPC processor microarchitectures. */ + cpuinfo_vendor_ibm = 9, + /** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures. */ + cpuinfo_vendor_ingenic = 10, + /** + * VIA Technologies, Inc. Vendor of x86 and x86-64 processor microarchitectures. + * + * Processors are designed by Centaur Technology, a subsidiary of VIA Technologies. + */ + cpuinfo_vendor_via = 11, + /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */ + cpuinfo_vendor_cavium = 12, + /** Broadcom, Inc. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_broadcom = 13, + /** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor microarchitectures. */ + cpuinfo_vendor_apm = 14, + /** + * Huawei Technologies Co., Ltd. Vendor of ARM64 processor microarchitectures. + * + * Processors are designed by HiSilicon, a subsidiary of Huawei. + */ + cpuinfo_vendor_huawei = 15, + /** + * Hygon (Chengdu Haiguang Integrated Circuit Design Co., Ltd), Vendor of x86-64 processor microarchitectures. + * + * Processors are variants of AMD cores. + */ + cpuinfo_vendor_hygon = 16, + + /* Active vendors of embedded CPUs */ + + /** Texas Instruments Inc. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_texas_instruments = 30, + /** Marvell Technology Group Ltd. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_marvell = 31, + /** RDC Semiconductor Co., Ltd. Vendor of x86 processor microarchitectures. */ + cpuinfo_vendor_rdc = 32, + /** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */ + cpuinfo_vendor_dmp = 33, + /** Motorola, Inc. Vendor of PowerPC and ARM processor microarchitectures. */ + cpuinfo_vendor_motorola = 34, + + /* Defunct CPU vendors */ + + /** + * Transmeta Corporation. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 2004. + * Transmeta processors implemented VLIW ISA and used binary translation to execute x86 code. + */ + cpuinfo_vendor_transmeta = 50, + /** + * Cyrix Corporation. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1996. + */ + cpuinfo_vendor_cyrix = 51, + /** + * Rise Technology. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1999. + */ + cpuinfo_vendor_rise = 52, + /** + * National Semiconductor. Vendor of x86 processor microarchitectures. + * + * Sold its x86 design subsidiary in 1999. The last processor design was released in 1998. + */ + cpuinfo_vendor_nsc = 53, + /** + * Silicon Integrated Systems. Vendor of x86 processor microarchitectures. + * + * Sold its x86 design subsidiary in 2001. The last processor design was released in 2001. + */ + cpuinfo_vendor_sis = 54, + /** + * NexGen. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1994. + * NexGen designed the first x86 microarchitecture which decomposed x86 instructions into simple microoperations. + */ + cpuinfo_vendor_nexgen = 55, + /** + * United Microelectronics Corporation. Vendor of x86 processor microarchitectures. + * + * Ceased x86 in the early 1990s. The last processor design was released in 1991. + * Designed U5C and U5D processors. Both are 486 level. + */ + cpuinfo_vendor_umc = 56, + /** + * Digital Equipment Corporation. Vendor of ARM processor microarchitecture. + * + * Sold its ARM designs in 1997. The last processor design was released in 1997. + */ + cpuinfo_vendor_dec = 57, +}; + +/** + * Processor microarchitecture + * + * Processors with different microarchitectures often have different instruction performance characteristics, + * and may have dramatically different pipeline organization. + */ +enum cpuinfo_uarch { + /** Microarchitecture is unknown, or the library failed to get information about the microarchitecture from OS */ + cpuinfo_uarch_unknown = 0, + + /** Pentium and Pentium MMX microarchitecture. */ + cpuinfo_uarch_p5 = 0x00100100, + /** Intel Quark microarchitecture. */ + cpuinfo_uarch_quark = 0x00100101, + + /** Pentium Pro, Pentium II, and Pentium III. */ + cpuinfo_uarch_p6 = 0x00100200, + /** Pentium M. */ + cpuinfo_uarch_dothan = 0x00100201, + /** Intel Core microarchitecture. */ + cpuinfo_uarch_yonah = 0x00100202, + /** Intel Core 2 microarchitecture on 65 nm process. */ + cpuinfo_uarch_conroe = 0x00100203, + /** Intel Core 2 microarchitecture on 45 nm process. */ + cpuinfo_uarch_penryn = 0x00100204, + /** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st gen). */ + cpuinfo_uarch_nehalem = 0x00100205, + /** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */ + cpuinfo_uarch_sandy_bridge = 0x00100206, + /** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */ + cpuinfo_uarch_ivy_bridge = 0x00100207, + /** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */ + cpuinfo_uarch_haswell = 0x00100208, + /** Intel Broadwell microarchitecture. */ + cpuinfo_uarch_broadwell = 0x00100209, + /** Intel Sky Lake microarchitecture (14 nm, including Kaby/Coffee/Whiskey/Amber/Comet/Cascade/Cooper Lake). */ + cpuinfo_uarch_sky_lake = 0x0010020A, + /** DEPRECATED (Intel Kaby Lake microarchitecture). */ + cpuinfo_uarch_kaby_lake = 0x0010020A, + /** Intel Palm Cove microarchitecture (10 nm, Cannon Lake). */ + cpuinfo_uarch_palm_cove = 0x0010020B, + /** Intel Sunny Cove microarchitecture (10 nm, Ice Lake). */ + cpuinfo_uarch_sunny_cove = 0x0010020C, + + /** Pentium 4 with Willamette, Northwood, or Foster cores. */ + cpuinfo_uarch_willamette = 0x00100300, + /** Pentium 4 with Prescott and later cores. */ + cpuinfo_uarch_prescott = 0x00100301, + + /** Intel Atom on 45 nm process. */ + cpuinfo_uarch_bonnell = 0x00100400, + /** Intel Atom on 32 nm process. */ + cpuinfo_uarch_saltwell = 0x00100401, + /** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */ + cpuinfo_uarch_silvermont = 0x00100402, + /** Intel Airmont microarchitecture (14 nm out-of-order Atom). */ + cpuinfo_uarch_airmont = 0x00100403, + /** Intel Goldmont microarchitecture (Denverton, Apollo Lake). */ + cpuinfo_uarch_goldmont = 0x00100404, + /** Intel Goldmont Plus microarchitecture (Gemini Lake). */ + cpuinfo_uarch_goldmont_plus = 0x00100405, + + /** Intel Knights Ferry HPC boards. */ + cpuinfo_uarch_knights_ferry = 0x00100500, + /** Intel Knights Corner HPC boards (aka Xeon Phi). */ + cpuinfo_uarch_knights_corner = 0x00100501, + /** Intel Knights Landing microarchitecture (second-gen MIC). */ + cpuinfo_uarch_knights_landing = 0x00100502, + /** Intel Knights Hill microarchitecture (third-gen MIC). */ + cpuinfo_uarch_knights_hill = 0x00100503, + /** Intel Knights Mill Xeon Phi. */ + cpuinfo_uarch_knights_mill = 0x00100504, + + /** Intel/Marvell XScale series. */ + cpuinfo_uarch_xscale = 0x00100600, + + /** AMD K5. */ + cpuinfo_uarch_k5 = 0x00200100, + /** AMD K6 and alike. */ + cpuinfo_uarch_k6 = 0x00200101, + /** AMD Athlon and Duron. */ + cpuinfo_uarch_k7 = 0x00200102, + /** AMD Athlon 64, Opteron 64. */ + cpuinfo_uarch_k8 = 0x00200103, + /** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */ + cpuinfo_uarch_k10 = 0x00200104, + /** + * AMD Bulldozer microarchitecture + * Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs. + */ + cpuinfo_uarch_bulldozer = 0x00200105, + /** + * AMD Piledriver microarchitecture + * Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu Dhabi Opteron CPUs. + */ + cpuinfo_uarch_piledriver = 0x00200106, + /** AMD Steamroller microarchitecture (Kaveri APUs). */ + cpuinfo_uarch_steamroller = 0x00200107, + /** AMD Excavator microarchitecture (Carizzo APUs). */ + cpuinfo_uarch_excavator = 0x00200108, + /** AMD Zen microarchitecture (12/14 nm Ryzen and EPYC CPUs). */ + cpuinfo_uarch_zen = 0x00200109, + /** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */ + cpuinfo_uarch_zen2 = 0x0020010A, + /** AMD Zen 3 microarchitecture. */ + cpuinfo_uarch_zen3 = 0x0020010B, + + /** NSC Geode and AMD Geode GX and LX. */ + cpuinfo_uarch_geode = 0x00200200, + /** AMD Bobcat mobile microarchitecture. */ + cpuinfo_uarch_bobcat = 0x00200201, + /** AMD Jaguar mobile microarchitecture. */ + cpuinfo_uarch_jaguar = 0x00200202, + /** AMD Puma mobile microarchitecture. */ + cpuinfo_uarch_puma = 0x00200203, + + /** ARM7 series. */ + cpuinfo_uarch_arm7 = 0x00300100, + /** ARM9 series. */ + cpuinfo_uarch_arm9 = 0x00300101, + /** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */ + cpuinfo_uarch_arm11 = 0x00300102, + + /** ARM Cortex-A5. */ + cpuinfo_uarch_cortex_a5 = 0x00300205, + /** ARM Cortex-A7. */ + cpuinfo_uarch_cortex_a7 = 0x00300207, + /** ARM Cortex-A8. */ + cpuinfo_uarch_cortex_a8 = 0x00300208, + /** ARM Cortex-A9. */ + cpuinfo_uarch_cortex_a9 = 0x00300209, + /** ARM Cortex-A12. */ + cpuinfo_uarch_cortex_a12 = 0x00300212, + /** ARM Cortex-A15. */ + cpuinfo_uarch_cortex_a15 = 0x00300215, + /** ARM Cortex-A17. */ + cpuinfo_uarch_cortex_a17 = 0x00300217, + + /** ARM Cortex-A32. */ + cpuinfo_uarch_cortex_a32 = 0x00300332, + /** ARM Cortex-A35. */ + cpuinfo_uarch_cortex_a35 = 0x00300335, + /** ARM Cortex-A53. */ + cpuinfo_uarch_cortex_a53 = 0x00300353, + /** ARM Cortex-A55 revision 0 (restricted dual-issue capabilities compared to revision 1+). */ + cpuinfo_uarch_cortex_a55r0 = 0x00300354, + /** ARM Cortex-A55. */ + cpuinfo_uarch_cortex_a55 = 0x00300355, + /** ARM Cortex-A57. */ + cpuinfo_uarch_cortex_a57 = 0x00300357, + /** ARM Cortex-A65. */ + cpuinfo_uarch_cortex_a65 = 0x00300365, + /** ARM Cortex-A72. */ + cpuinfo_uarch_cortex_a72 = 0x00300372, + /** ARM Cortex-A73. */ + cpuinfo_uarch_cortex_a73 = 0x00300373, + /** ARM Cortex-A75. */ + cpuinfo_uarch_cortex_a75 = 0x00300375, + /** ARM Cortex-A76. */ + cpuinfo_uarch_cortex_a76 = 0x00300376, + /** ARM Cortex-A77. */ + cpuinfo_uarch_cortex_a77 = 0x00300377, + /** ARM Cortex-A78. */ + cpuinfo_uarch_cortex_a78 = 0x00300378, + + /** ARM Neoverse N1. */ + cpuinfo_uarch_neoverse_n1 = 0x00300400, + /** ARM Neoverse E1. */ + cpuinfo_uarch_neoverse_e1 = 0x00300401, + /** ARM Neoverse V1. */ + cpuinfo_uarch_neoverse_v1 = 0x00300402, + /** ARM Neoverse N2. */ + cpuinfo_uarch_neoverse_n2 = 0x00300403, + + /** ARM Cortex-X1. */ + cpuinfo_uarch_cortex_x1 = 0x00300500, + + /** Qualcomm Scorpion. */ + cpuinfo_uarch_scorpion = 0x00400100, + /** Qualcomm Krait. */ + cpuinfo_uarch_krait = 0x00400101, + /** Qualcomm Kryo. */ + cpuinfo_uarch_kryo = 0x00400102, + /** Qualcomm Falkor. */ + cpuinfo_uarch_falkor = 0x00400103, + /** Qualcomm Saphira. */ + cpuinfo_uarch_saphira = 0x00400104, + + /** Nvidia Denver. */ + cpuinfo_uarch_denver = 0x00500100, + /** Nvidia Denver 2. */ + cpuinfo_uarch_denver2 = 0x00500101, + /** Nvidia Carmel. */ + cpuinfo_uarch_carmel = 0x00500102, + + /** Samsung Exynos M1 (Exynos 8890 big cores). */ + cpuinfo_uarch_exynos_m1 = 0x00600100, + /** Samsung Exynos M2 (Exynos 8895 big cores). */ + cpuinfo_uarch_exynos_m2 = 0x00600101, + /** Samsung Exynos M3 (Exynos 9810 big cores). */ + cpuinfo_uarch_exynos_m3 = 0x00600102, + /** Samsung Exynos M4 (Exynos 9820 big cores). */ + cpuinfo_uarch_exynos_m4 = 0x00600103, + /** Samsung Exynos M5 (Exynos 9830 big cores). */ + cpuinfo_uarch_exynos_m5 = 0x00600104, + + /* Deprecated synonym for Cortex-A76 */ + cpuinfo_uarch_cortex_a76ae = 0x00300376, + /* Deprecated names for Exynos. */ + cpuinfo_uarch_mongoose_m1 = 0x00600100, + cpuinfo_uarch_mongoose_m2 = 0x00600101, + cpuinfo_uarch_meerkat_m3 = 0x00600102, + cpuinfo_uarch_meerkat_m4 = 0x00600103, + + /** Apple A6 and A6X processors. */ + cpuinfo_uarch_swift = 0x00700100, + /** Apple A7 processor. */ + cpuinfo_uarch_cyclone = 0x00700101, + /** Apple A8 and A8X processor. */ + cpuinfo_uarch_typhoon = 0x00700102, + /** Apple A9 and A9X processor. */ + cpuinfo_uarch_twister = 0x00700103, + /** Apple A10 and A10X processor. */ + cpuinfo_uarch_hurricane = 0x00700104, + /** Apple A11 processor (big cores). */ + cpuinfo_uarch_monsoon = 0x00700105, + /** Apple A11 processor (little cores). */ + cpuinfo_uarch_mistral = 0x00700106, + /** Apple A12 processor (big cores). */ + cpuinfo_uarch_vortex = 0x00700107, + /** Apple A12 processor (little cores). */ + cpuinfo_uarch_tempest = 0x00700108, + /** Apple A13 processor (big cores). */ + cpuinfo_uarch_lightning = 0x00700109, + /** Apple A13 processor (little cores). */ + cpuinfo_uarch_thunder = 0x0070010A, + /** Apple M1 processor (big cores). */ + cpuinfo_uarch_firestorm = 0x0070010B, + /** Apple M1 processor (little cores). */ + cpuinfo_uarch_icestorm = 0x0070010C, + + /** Cavium ThunderX. */ + cpuinfo_uarch_thunderx = 0x00800100, + /** Cavium ThunderX2 (originally Broadcom Vulkan). */ + cpuinfo_uarch_thunderx2 = 0x00800200, + + /** Marvell PJ4. */ + cpuinfo_uarch_pj4 = 0x00900100, + + /** Broadcom Brahma B15. */ + cpuinfo_uarch_brahma_b15 = 0x00A00100, + /** Broadcom Brahma B53. */ + cpuinfo_uarch_brahma_b53 = 0x00A00101, + + /** Applied Micro X-Gene. */ + cpuinfo_uarch_xgene = 0x00B00100, + + /* Hygon Dhyana (a modification of AMD Zen for Chinese market). */ + cpuinfo_uarch_dhyana = 0x01000100, + + /** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */ + cpuinfo_uarch_taishan_v110 = 0x00C00100, +}; + +struct cpuinfo_processor { + /** SMT (hyperthread) ID within a core */ + uint32_t smt_id; + /** Core containing this logical processor */ + const struct cpuinfo_core* core; + /** Cluster of cores containing this logical processor */ + const struct cpuinfo_cluster* cluster; + /** Physical package containing this logical processor */ + const struct cpuinfo_package* package; +#if defined(__linux__) + /** + * Linux-specific ID for the logical processor: + * - Linux kernel exposes information about this logical processor in /sys/devices/system/cpu/cpu/ + * - Bit in the cpu_set_t identifies this logical processor + */ + int linux_id; +#endif +#if defined(_WIN32) || defined(__CYGWIN__) + /** Windows-specific ID for the group containing the logical processor. */ + uint16_t windows_group_id; + /** + * Windows-specific ID of the logical processor within its group: + * - Bit in the KAFFINITY mask identifies this logical processor within its group. + */ + uint16_t windows_processor_id; +#endif +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** APIC ID (unique x86-specific ID of the logical processor) */ + uint32_t apic_id; +#endif + struct { + /** Level 1 instruction cache */ + const struct cpuinfo_cache* l1i; + /** Level 1 data cache */ + const struct cpuinfo_cache* l1d; + /** Level 2 unified or data cache */ + const struct cpuinfo_cache* l2; + /** Level 3 unified or data cache */ + const struct cpuinfo_cache* l3; + /** Level 4 unified or data cache */ + const struct cpuinfo_cache* l4; + } cache; +}; + +struct cpuinfo_core { + /** Index of the first logical processor on this core. */ + uint32_t processor_start; + /** Number of logical processors on this core */ + uint32_t processor_count; + /** Core ID within a package */ + uint32_t core_id; + /** Cluster containing this core */ + const struct cpuinfo_cluster* cluster; + /** Physical package containing this core. */ + const struct cpuinfo_package* package; + /** Vendor of the CPU microarchitecture for this core */ + enum cpuinfo_vendor vendor; + /** CPU microarchitecture for this core */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register for this core */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) for this core */ + uint32_t midr; +#endif + /** Clock rate (non-Turbo) of the core, in Hz */ + uint64_t frequency; +}; + +struct cpuinfo_cluster { + /** Index of the first logical processor in the cluster */ + uint32_t processor_start; + /** Number of logical processors in the cluster */ + uint32_t processor_count; + /** Index of the first core in the cluster */ + uint32_t core_start; + /** Number of cores on the cluster */ + uint32_t core_count; + /** Cluster ID within a package */ + uint32_t cluster_id; + /** Physical package containing the cluster */ + const struct cpuinfo_package* package; + /** CPU microarchitecture vendor of the cores in the cluster */ + enum cpuinfo_vendor vendor; + /** CPU microarchitecture of the cores in the cluster */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register of the cores in the cluster */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) of the cores in the cluster */ + uint32_t midr; +#endif + /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */ + uint64_t frequency; +}; + +#define CPUINFO_PACKAGE_NAME_MAX 48 + +struct cpuinfo_package { + /** SoC or processor chip model name */ + char name[CPUINFO_PACKAGE_NAME_MAX]; + /** Index of the first logical processor on this physical package */ + uint32_t processor_start; + /** Number of logical processors on this physical package */ + uint32_t processor_count; + /** Index of the first core on this physical package */ + uint32_t core_start; + /** Number of cores on this physical package */ + uint32_t core_count; + /** Index of the first cluster of cores on this physical package */ + uint32_t cluster_start; + /** Number of clusters of cores on this physical package */ + uint32_t cluster_count; +}; + +struct cpuinfo_uarch_info { + /** Type of CPU microarchitecture */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register for the microarchitecture */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) for the microarchitecture */ + uint32_t midr; +#endif + /** Number of logical processors with the microarchitecture */ + uint32_t processor_count; + /** Number of cores with the microarchitecture */ + uint32_t core_count; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +bool CPUINFO_ABI cpuinfo_initialize(void); + +void CPUINFO_ABI cpuinfo_deinitialize(void); + +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions instead. */ + struct cpuinfo_x86_isa { + #if CPUINFO_ARCH_X86 + bool rdtsc; + #endif + bool rdtscp; + bool rdpid; + bool sysenter; + #if CPUINFO_ARCH_X86 + bool syscall; + #endif + bool msr; + bool clzero; + bool clflush; + bool clflushopt; + bool mwait; + bool mwaitx; + #if CPUINFO_ARCH_X86 + bool emmx; + #endif + bool fxsave; + bool xsave; + #if CPUINFO_ARCH_X86 + bool fpu; + bool mmx; + bool mmx_plus; + #endif + bool three_d_now; + bool three_d_now_plus; + #if CPUINFO_ARCH_X86 + bool three_d_now_geode; + #endif + bool prefetch; + bool prefetchw; + bool prefetchwt1; + #if CPUINFO_ARCH_X86 + bool daz; + bool sse; + bool sse2; + #endif + bool sse3; + bool ssse3; + bool sse4_1; + bool sse4_2; + bool sse4a; + bool misaligned_sse; + bool avx; + bool fma3; + bool fma4; + bool xop; + bool f16c; + bool avx2; + bool avx512f; + bool avx512pf; + bool avx512er; + bool avx512cd; + bool avx512dq; + bool avx512bw; + bool avx512vl; + bool avx512ifma; + bool avx512vbmi; + bool avx512vbmi2; + bool avx512bitalg; + bool avx512vpopcntdq; + bool avx512vnni; + bool avx512bf16; + bool avx512vp2intersect; + bool avx512_4vnniw; + bool avx512_4fmaps; + bool hle; + bool rtm; + bool xtest; + bool mpx; + #if CPUINFO_ARCH_X86 + bool cmov; + bool cmpxchg8b; + #endif + bool cmpxchg16b; + bool clwb; + bool movbe; + #if CPUINFO_ARCH_X86_64 + bool lahf_sahf; + #endif + bool fs_gs_base; + bool lzcnt; + bool popcnt; + bool tbm; + bool bmi; + bool bmi2; + bool adx; + bool aes; + bool vaes; + bool pclmulqdq; + bool vpclmulqdq; + bool gfni; + bool rdrand; + bool rdseed; + bool sha; + bool rng; + bool ace; + bool ace2; + bool phe; + bool pmm; + bool lwp; + }; + + extern struct cpuinfo_x86_isa cpuinfo_isa; +#endif + +static inline bool cpuinfo_has_x86_rdtsc(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.rdtsc; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdtscp(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdtscp; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdpid(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdpid; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_clzero(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.clzero; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mwait(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mwait; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mwaitx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mwaitx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fxsave(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fxsave; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xsave(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xsave; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fpu(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.fpu; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mmx(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.mmx; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mmx_plus(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.mmx_plus; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.three_d_now; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow_plus(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.three_d_now_plus; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow_geode(void) { + #if CPUINFO_ARCH_X86_64 + return false; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return false; + #else + return cpuinfo_isa.three_d_now_geode; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetch(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetch; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetchw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetchw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetchwt1(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetchwt1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_daz(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.daz; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse2(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse2; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse3; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_ssse3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.ssse3; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4_1(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse4_1; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.sse4_1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4_2(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse4_2; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.sse4_2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4a(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.sse4a; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_misaligned_sse(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.misaligned_sse; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fma3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fma3; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fma4(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fma4; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xop(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xop; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_f16c(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.f16c; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512f(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512f; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512pf(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512pf; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512er(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512er; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512cd(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512cd; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512dq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512dq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vl(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vl; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512ifma(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512ifma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vbmi(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vbmi; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vbmi2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vbmi2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bitalg(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bitalg; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vpopcntdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vnni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vnni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bf16(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vp2intersect(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vp2intersect; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512_4vnniw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512_4vnniw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512_4fmaps(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512_4fmaps; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_hle(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.hle; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rtm(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rtm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xtest(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xtest; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mpx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mpx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmov(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.cmov; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmpxchg8b(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.cmpxchg8b; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmpxchg16b(void) { + #if CPUINFO_ARCH_X86_64 + return cpuinfo_isa.cmpxchg16b; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_clwb(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.clwb; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_movbe(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.movbe; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_lahf_sahf(void) { + #if CPUINFO_ARCH_X86 + return true; + #elif CPUINFO_ARCH_X86_64 + return cpuinfo_isa.lahf_sahf; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_lzcnt(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.lzcnt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_popcnt(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.popcnt; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.popcnt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_tbm(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.tbm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_bmi(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.bmi; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_bmi2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.bmi2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_adx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.adx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_aes(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.aes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_vaes(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.vaes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_pclmulqdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.pclmulqdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_vpclmulqdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.vpclmulqdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_gfni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.gfni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdrand(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdrand; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdseed(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdseed; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sha(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.sha; + #else + return false; + #endif +} + +#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions instead. */ + struct cpuinfo_arm_isa { + #if CPUINFO_ARCH_ARM + bool thumb; + bool thumb2; + bool thumbee; + bool jazelle; + bool armv5e; + bool armv6; + bool armv6k; + bool armv7; + bool armv7mp; + bool armv8; + bool idiv; + + bool vfpv2; + bool vfpv3; + bool d32; + bool fp16; + bool fma; + + bool wmmx; + bool wmmx2; + bool neon; + #endif + #if CPUINFO_ARCH_ARM64 + bool atomics; + bool bf16; + bool sve; + bool svebf16; + bool sve2; + #endif + bool rdm; + bool fp16arith; + bool dot; + bool jscvt; + bool fcma; + + bool aes; + bool sha1; + bool sha2; + bool pmull; + bool crc32; + }; + + extern struct cpuinfo_arm_isa cpuinfo_isa; +#endif + +static inline bool cpuinfo_has_arm_thumb(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.thumb; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_thumb2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.thumb2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v5e(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv5e; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v6(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv6; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v6k(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv6k; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v7(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv7; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v7mp(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv7mp; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v8(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.armv8; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_idiv(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.idiv; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_fp16(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv4(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv4_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_wmmx(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.wmmx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_wmmx2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.wmmx2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fp16(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fma(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_v8(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.armv8; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_atomics(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.atomics; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_rdm(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.rdm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fp16_arith(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fp16arith; + #elif CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fp16arith; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fp16_arith(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fp16arith; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_dot(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.dot; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_jscvt(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.jscvt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fcma(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fcma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_aes(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.aes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sha1(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sha1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sha2(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sha2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_pmull(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.pmull; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_crc32(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.crc32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve2(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_svebf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.svebf16; + #else + return false; + #endif +} + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void); +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void); +const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void); +const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void); +const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void); + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index); +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index); +const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index); +const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index); +const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index); + +uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void); + +/** + * Returns upper bound on cache size. + */ +uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void); + +/** + * Identify the logical processor that executes the current thread. + * + * There is no guarantee that the thread will stay on the same logical processor for any time. + * Callers should treat the result as only a hint, and be prepared to handle NULL return value. + */ +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void); + +/** + * Identify the core that executes the current thread. + * + * There is no guarantee that the thread will stay on the same core for any time. + * Callers should treat the result as only a hint, and be prepared to handle NULL return value. + */ +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void); + +/** + * Identify the microarchitecture index of the core that executes the current thread. + * If the system does not support such identification, the function returns 0. + * + * There is no guarantee that the thread will stay on the same type of core for any time. + * Callers should treat the result as only a hint. + */ +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void); + +/** + * Identify the microarchitecture index of the core that executes the current thread. + * If the system does not support such identification, the function returns the user-specified default value. + * + * There is no guarantee that the thread will stay on the same type of core for any time. + * Callers should treat the result as only a hint. + */ +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* CPUINFO_H */ diff --git a/source/3rdparty/cpuinfo/src/api.c b/source/3rdparty/cpuinfo/src/api.c new file mode 100644 index 0000000..f91b421 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/api.c @@ -0,0 +1,410 @@ +#include +#include + +#include +#include +#include + +#ifdef __linux__ + #include + + #include + #include + #if !defined(__NR_getcpu) + #include + #endif +#endif + +bool cpuinfo_is_initialized = false; + +struct cpuinfo_processor* cpuinfo_processors = NULL; +struct cpuinfo_core* cpuinfo_cores = NULL; +struct cpuinfo_cluster* cpuinfo_clusters = NULL; +struct cpuinfo_package* cpuinfo_packages = NULL; +struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max] = { NULL }; + +uint32_t cpuinfo_processors_count = 0; +uint32_t cpuinfo_cores_count = 0; +uint32_t cpuinfo_clusters_count = 0; +uint32_t cpuinfo_packages_count = 0; +uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max] = { 0 }; +uint32_t cpuinfo_max_cache_size = 0; + +#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + struct cpuinfo_uarch_info* cpuinfo_uarchs = NULL; + uint32_t cpuinfo_uarchs_count = 0; +#else + struct cpuinfo_uarch_info cpuinfo_global_uarch = { cpuinfo_uarch_unknown }; +#endif + +#ifdef __linux__ + uint32_t cpuinfo_linux_cpu_max = 0; + const struct cpuinfo_processor** cpuinfo_linux_cpu_to_processor_map = NULL; + const struct cpuinfo_core** cpuinfo_linux_cpu_to_core_map = NULL; + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + const uint32_t* cpuinfo_linux_cpu_to_uarch_index_map = NULL; + #endif +#endif + + +const struct cpuinfo_processor* cpuinfo_get_processors(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "processors"); + } + return cpuinfo_processors; +} + +const struct cpuinfo_core* cpuinfo_get_cores(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "core"); + } + return cpuinfo_cores; +} + +const struct cpuinfo_cluster* cpuinfo_get_clusters(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "clusters"); + } + return cpuinfo_clusters; +} + +const struct cpuinfo_package* cpuinfo_get_packages(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "packages"); + } + return cpuinfo_packages; +} + +const struct cpuinfo_uarch_info* cpuinfo_get_uarchs() { + if (!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarchs"); + } + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_uarchs; + #else + return &cpuinfo_global_uarch; + #endif +} + +const struct cpuinfo_processor* cpuinfo_get_processor(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "processor"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_processors_count) { + return NULL; + } + return &cpuinfo_processors[index]; +} + +const struct cpuinfo_core* cpuinfo_get_core(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "core"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_cores_count) { + return NULL; + } + return &cpuinfo_cores[index]; +} + +const struct cpuinfo_cluster* cpuinfo_get_cluster(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "cluster"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_clusters_count) { + return NULL; + } + return &cpuinfo_clusters[index]; +} + +const struct cpuinfo_package* cpuinfo_get_package(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "package"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_packages_count) { + return NULL; + } + return &cpuinfo_packages[index]; +} + +const struct cpuinfo_uarch_info* cpuinfo_get_uarch(uint32_t index) { + if (!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarch"); + } + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + if CPUINFO_UNLIKELY(index >= cpuinfo_uarchs_count) { + return NULL; + } + return &cpuinfo_uarchs[index]; + #else + if CPUINFO_UNLIKELY(index != 0) { + return NULL; + } + return &cpuinfo_global_uarch; + #endif +} + +uint32_t cpuinfo_get_processors_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "processors_count"); + } + return cpuinfo_processors_count; +} + +uint32_t cpuinfo_get_cores_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "cores_count"); + } + return cpuinfo_cores_count; +} + +uint32_t cpuinfo_get_clusters_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "clusters_count"); + } + return cpuinfo_clusters_count; +} + +uint32_t cpuinfo_get_packages_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "packages_count"); + } + return cpuinfo_packages_count; +} + +uint32_t cpuinfo_get_uarchs_count(void) { + if (!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarchs_count"); + } + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_uarchs_count; + #else + return 1; + #endif +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1i_caches"); + } + return cpuinfo_cache[cpuinfo_cache_level_1i]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1d_caches"); + } + return cpuinfo_cache[cpuinfo_cache_level_1d]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l2_caches"); + } + return cpuinfo_cache[cpuinfo_cache_level_2]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l3_caches"); + } + return cpuinfo_cache[cpuinfo_cache_level_3]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l4_caches"); + } + return cpuinfo_cache[cpuinfo_cache_level_4]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1i_cache"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_cache_count[cpuinfo_cache_level_1i]) { + return NULL; + } + return &cpuinfo_cache[cpuinfo_cache_level_1i][index]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1d_cache"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_cache_count[cpuinfo_cache_level_1d]) { + return NULL; + } + return &cpuinfo_cache[cpuinfo_cache_level_1d][index]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l2_cache"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_cache_count[cpuinfo_cache_level_2]) { + return NULL; + } + return &cpuinfo_cache[cpuinfo_cache_level_2][index]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l3_cache"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_cache_count[cpuinfo_cache_level_3]) { + return NULL; + } + return &cpuinfo_cache[cpuinfo_cache_level_3][index]; +} + +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l4_cache"); + } + if CPUINFO_UNLIKELY(index >= cpuinfo_cache_count[cpuinfo_cache_level_4]) { + return NULL; + } + return &cpuinfo_cache[cpuinfo_cache_level_4][index]; +} + +uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1i_caches_count"); + } + return cpuinfo_cache_count[cpuinfo_cache_level_1i]; +} + +uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1d_caches_count"); + } + return cpuinfo_cache_count[cpuinfo_cache_level_1d]; +} + +uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l2_caches_count"); + } + return cpuinfo_cache_count[cpuinfo_cache_level_2]; +} + +uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l3_caches_count"); + } + return cpuinfo_cache_count[cpuinfo_cache_level_3]; +} + +uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l4_caches_count"); + } + return cpuinfo_cache_count[cpuinfo_cache_level_4]; +} + +uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "max_cache_size"); + } + return cpuinfo_max_cache_size; +} + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "current_processor"); + } + #ifdef __linux__ + /* Initializing this variable silences a MemorySanitizer error. */ + unsigned cpu = 0; + if CPUINFO_UNLIKELY(syscall(__NR_getcpu, &cpu, NULL, NULL) != 0) { + return 0; + } + if CPUINFO_UNLIKELY((uint32_t) cpu >= cpuinfo_linux_cpu_max) { + return 0; + } + return cpuinfo_linux_cpu_to_processor_map[cpu]; + #else + return NULL; + #endif +} + +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "current_core"); + } + #ifdef __linux__ + /* Initializing this variable silences a MemorySanitizer error. */ + unsigned cpu = 0; + if CPUINFO_UNLIKELY(syscall(__NR_getcpu, &cpu, NULL, NULL) != 0) { + return 0; + } + if CPUINFO_UNLIKELY((uint32_t) cpu >= cpuinfo_linux_cpu_max) { + return 0; + } + return cpuinfo_linux_cpu_to_core_map[cpu]; + #else + return NULL; + #endif +} + +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "current_uarch_index"); + } + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + #ifdef __linux__ + if (cpuinfo_linux_cpu_to_uarch_index_map == NULL) { + /* Special case: avoid syscall on systems with only a single type of cores */ + return 0; + } + + /* General case */ + /* Initializing this variable silences a MemorySanitizer error. */ + unsigned cpu = 0; + if CPUINFO_UNLIKELY(syscall(__NR_getcpu, &cpu, NULL, NULL) != 0) { + return 0; + } + if CPUINFO_UNLIKELY((uint32_t) cpu >= cpuinfo_linux_cpu_max) { + return 0; + } + return cpuinfo_linux_cpu_to_uarch_index_map[cpu]; + #else + /* Fallback: pretend to be on the big core. */ + return 0; + #endif + #else + /* Only ARM/ARM64 processors may include cores of different types in the same package. */ + return 0; + #endif +} + +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index) { + if CPUINFO_UNLIKELY(!cpuinfo_is_initialized) { + cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "current_uarch_index_with_default"); + } + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + #ifdef __linux__ + if (cpuinfo_linux_cpu_to_uarch_index_map == NULL) { + /* Special case: avoid syscall on systems with only a single type of cores */ + return 0; + } + + /* General case */ + /* Initializing this variable silences a MemorySanitizer error. */ + unsigned cpu = 0; + if CPUINFO_UNLIKELY(syscall(__NR_getcpu, &cpu, NULL, NULL) != 0) { + return default_uarch_index; + } + if CPUINFO_UNLIKELY((uint32_t) cpu >= cpuinfo_linux_cpu_max) { + return default_uarch_index; + } + return cpuinfo_linux_cpu_to_uarch_index_map[cpu]; + #else + /* Fallback: no API to query current core, use default uarch index. */ + return default_uarch_index; + #endif + #else + /* Only ARM/ARM64 processors may include cores of different types in the same package. */ + return 0; + #endif +} diff --git a/source/3rdparty/cpuinfo/src/arm/android/api.h b/source/3rdparty/cpuinfo/src/arm/android/api.h new file mode 100644 index 0000000..228632a --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/android/api.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include +#include + +enum cpuinfo_android_chipset_property { + cpuinfo_android_chipset_property_proc_cpuinfo_hardware = 0, + cpuinfo_android_chipset_property_ro_product_board, + cpuinfo_android_chipset_property_ro_board_platform, + cpuinfo_android_chipset_property_ro_mediatek_platform, + cpuinfo_android_chipset_property_ro_arch, + cpuinfo_android_chipset_property_ro_chipname, + cpuinfo_android_chipset_property_ro_hardware_chipname, + cpuinfo_android_chipset_property_max, +}; + +CPUINFO_INTERNAL void cpuinfo_arm_android_parse_properties( + struct cpuinfo_android_properties properties[restrict static 1]); diff --git a/source/3rdparty/cpuinfo/src/arm/android/properties.c b/source/3rdparty/cpuinfo/src/arm/android/properties.c new file mode 100644 index 0000000..5f93889 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/android/properties.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#if CPUINFO_MOCK + #include + + static struct cpuinfo_mock_property* cpuinfo_mock_properties = NULL; + + void CPUINFO_ABI cpuinfo_mock_android_properties(struct cpuinfo_mock_property* properties) { + cpuinfo_log_info("Android properties mocking enabled"); + cpuinfo_mock_properties = properties; + } + + static int cpuinfo_android_property_get(const char* key, char* value) { + if (cpuinfo_mock_properties != NULL) { + for (const struct cpuinfo_mock_property* prop = cpuinfo_mock_properties; prop->key != NULL; prop++) { + if (strncmp(key, prop->key, CPUINFO_BUILD_PROP_NAME_MAX) == 0) { + strncpy(value, prop->value, CPUINFO_BUILD_PROP_VALUE_MAX); + return (int) strnlen(prop->value, CPUINFO_BUILD_PROP_VALUE_MAX); + } + } + } + *value = '\0'; + return 0; + } +#else + static inline int cpuinfo_android_property_get(const char* key, char* value) { + return __system_property_get(key, value); + } +#endif + +void cpuinfo_arm_android_parse_properties(struct cpuinfo_android_properties properties[restrict static 1]) { + const int ro_product_board_length = + cpuinfo_android_property_get("ro.product.board", properties->ro_product_board); + cpuinfo_log_debug("read ro.product.board = \"%.*s\"", ro_product_board_length, properties->ro_product_board); + + const int ro_board_platform_length = + cpuinfo_android_property_get("ro.board.platform", properties->ro_board_platform); + cpuinfo_log_debug("read ro.board.platform = \"%.*s\"", ro_board_platform_length, properties->ro_board_platform); + + const int ro_mediatek_platform_length = + cpuinfo_android_property_get("ro.mediatek.platform", properties->ro_mediatek_platform); + cpuinfo_log_debug("read ro.mediatek.platform = \"%.*s\"", + ro_mediatek_platform_length, properties->ro_mediatek_platform); + + const int ro_arch_length = + cpuinfo_android_property_get("ro.arch", properties->ro_arch); + cpuinfo_log_debug("read ro.arch = \"%.*s\"", ro_arch_length, properties->ro_arch); + + const int ro_chipname_length = + cpuinfo_android_property_get("ro.chipname", properties->ro_chipname); + cpuinfo_log_debug("read ro.chipname = \"%.*s\"", ro_chipname_length, properties->ro_chipname); + + const int ro_hardware_chipname_length = + cpuinfo_android_property_get("ro.hardware.chipname", properties->ro_hardware_chipname); + cpuinfo_log_debug("read ro.hardware.chipname = \"%.*s\"", ro_hardware_chipname_length, properties->ro_hardware_chipname); +} diff --git a/source/3rdparty/cpuinfo/src/arm/api.h b/source/3rdparty/cpuinfo/src/arm/api.h new file mode 100644 index 0000000..48b99dd --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/api.h @@ -0,0 +1,122 @@ +#pragma once + +#include +#include + +#include +#include + +enum cpuinfo_arm_chipset_vendor { + cpuinfo_arm_chipset_vendor_unknown = 0, + cpuinfo_arm_chipset_vendor_qualcomm, + cpuinfo_arm_chipset_vendor_mediatek, + cpuinfo_arm_chipset_vendor_samsung, + cpuinfo_arm_chipset_vendor_hisilicon, + cpuinfo_arm_chipset_vendor_actions, + cpuinfo_arm_chipset_vendor_allwinner, + cpuinfo_arm_chipset_vendor_amlogic, + cpuinfo_arm_chipset_vendor_broadcom, + cpuinfo_arm_chipset_vendor_lg, + cpuinfo_arm_chipset_vendor_leadcore, + cpuinfo_arm_chipset_vendor_marvell, + cpuinfo_arm_chipset_vendor_mstar, + cpuinfo_arm_chipset_vendor_novathor, + cpuinfo_arm_chipset_vendor_nvidia, + cpuinfo_arm_chipset_vendor_pinecone, + cpuinfo_arm_chipset_vendor_renesas, + cpuinfo_arm_chipset_vendor_rockchip, + cpuinfo_arm_chipset_vendor_spreadtrum, + cpuinfo_arm_chipset_vendor_telechips, + cpuinfo_arm_chipset_vendor_texas_instruments, + cpuinfo_arm_chipset_vendor_wondermedia, + cpuinfo_arm_chipset_vendor_max, +}; + +enum cpuinfo_arm_chipset_series { + cpuinfo_arm_chipset_series_unknown = 0, + cpuinfo_arm_chipset_series_qualcomm_qsd, + cpuinfo_arm_chipset_series_qualcomm_msm, + cpuinfo_arm_chipset_series_qualcomm_apq, + cpuinfo_arm_chipset_series_qualcomm_snapdragon, + cpuinfo_arm_chipset_series_mediatek_mt, + cpuinfo_arm_chipset_series_samsung_exynos, + cpuinfo_arm_chipset_series_hisilicon_k3v, + cpuinfo_arm_chipset_series_hisilicon_hi, + cpuinfo_arm_chipset_series_hisilicon_kirin, + cpuinfo_arm_chipset_series_actions_atm, + cpuinfo_arm_chipset_series_allwinner_a, + cpuinfo_arm_chipset_series_amlogic_aml, + cpuinfo_arm_chipset_series_amlogic_s, + cpuinfo_arm_chipset_series_broadcom_bcm, + cpuinfo_arm_chipset_series_lg_nuclun, + cpuinfo_arm_chipset_series_leadcore_lc, + cpuinfo_arm_chipset_series_marvell_pxa, + cpuinfo_arm_chipset_series_mstar_6a, + cpuinfo_arm_chipset_series_novathor_u, + cpuinfo_arm_chipset_series_nvidia_tegra_t, + cpuinfo_arm_chipset_series_nvidia_tegra_ap, + cpuinfo_arm_chipset_series_nvidia_tegra_sl, + cpuinfo_arm_chipset_series_pinecone_surge_s, + cpuinfo_arm_chipset_series_renesas_mp, + cpuinfo_arm_chipset_series_rockchip_rk, + cpuinfo_arm_chipset_series_spreadtrum_sc, + cpuinfo_arm_chipset_series_telechips_tcc, + cpuinfo_arm_chipset_series_texas_instruments_omap, + cpuinfo_arm_chipset_series_wondermedia_wm, + cpuinfo_arm_chipset_series_max, +}; + +#define CPUINFO_ARM_CHIPSET_SUFFIX_MAX 8 + +struct cpuinfo_arm_chipset { + enum cpuinfo_arm_chipset_vendor vendor; + enum cpuinfo_arm_chipset_series series; + uint32_t model; + char suffix[CPUINFO_ARM_CHIPSET_SUFFIX_MAX]; +}; + +#define CPUINFO_ARM_CHIPSET_NAME_MAX CPUINFO_PACKAGE_NAME_MAX + +#ifndef __cplusplus + CPUINFO_INTERNAL void cpuinfo_arm_chipset_to_string( + const struct cpuinfo_arm_chipset chipset[restrict static 1], + char name[restrict static CPUINFO_ARM_CHIPSET_NAME_MAX]); + + CPUINFO_INTERNAL void cpuinfo_arm_fixup_chipset( + struct cpuinfo_arm_chipset chipset[restrict static 1], uint32_t cores, uint32_t max_cpu_freq_max); + + CPUINFO_INTERNAL void cpuinfo_arm_decode_vendor_uarch( + uint32_t midr, + #if CPUINFO_ARCH_ARM + bool has_vfpv4, + #endif + enum cpuinfo_vendor vendor[restrict static 1], + enum cpuinfo_uarch uarch[restrict static 1]); + + CPUINFO_INTERNAL void cpuinfo_arm_decode_cache( + enum cpuinfo_uarch uarch, + uint32_t cluster_cores, + uint32_t midr, + const struct cpuinfo_arm_chipset chipset[restrict static 1], + uint32_t cluster_id, + uint32_t arch_version, + struct cpuinfo_cache l1i[restrict static 1], + struct cpuinfo_cache l1d[restrict static 1], + struct cpuinfo_cache l2[restrict static 1], + struct cpuinfo_cache l3[restrict static 1]); + + CPUINFO_INTERNAL uint32_t cpuinfo_arm_compute_max_cache_size( + const struct cpuinfo_processor processor[restrict static 1]); +#else /* defined(__cplusplus) */ + CPUINFO_INTERNAL void cpuinfo_arm_decode_cache( + enum cpuinfo_uarch uarch, + uint32_t cluster_cores, + uint32_t midr, + const struct cpuinfo_arm_chipset chipset[1], + uint32_t cluster_id, + uint32_t arch_version, + struct cpuinfo_cache l1i[1], + struct cpuinfo_cache l1d[1], + struct cpuinfo_cache l2[1], + struct cpuinfo_cache l3[1]); +#endif diff --git a/source/3rdparty/cpuinfo/src/arm/cache.c b/source/3rdparty/cpuinfo/src/arm/cache.c new file mode 100644 index 0000000..1a6dd38 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/cache.c @@ -0,0 +1,1737 @@ +#include + +#include +#include +#include +#include +#include + + +void cpuinfo_arm_decode_cache( + enum cpuinfo_uarch uarch, + uint32_t cluster_cores, + uint32_t midr, + const struct cpuinfo_arm_chipset chipset[restrict static 1], + uint32_t cluster_id, + uint32_t arch_version, + struct cpuinfo_cache l1i[restrict static 1], + struct cpuinfo_cache l1d[restrict static 1], + struct cpuinfo_cache l2[restrict static 1], + struct cpuinfo_cache l3[restrict static 1]) +{ + switch (uarch) { +#if CPUINFO_ARCH_ARM && !defined(__ARM_ARCH_7A__) && !defined(__ARM_ARCH_8A__) + case cpuinfo_uarch_xscale: + switch (midr_get_part(midr) >> 8) { + case 2: + /* + * PXA 210/25X/26X + * + * See "Computer Organization and Design, Revised Printing: The Hardware/Software Interface" + * by David A. Patterson, John L. Hennessy + */ + *l1i = (struct cpuinfo_cache) { + .size = 16 * 1024, + .associativity = 32, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 16 * 1024, + .associativity = 4, + .line_size = 64 + }; + break; + case 4: + /* PXA 27X */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 32, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 32, + .line_size = 32 + }; + break; + case 6: + /* + * PXA 3XX + * + * See http://download.intel.com/design/intelxscale/31628302.pdf + */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l2 = (struct cpuinfo_cache) { + .size = 256 * 1024, + .associativity = 8, + .line_size = 32 + }; + break; + } + break; + case cpuinfo_uarch_arm11: + *l1i = (struct cpuinfo_cache) { + .size = 16 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 16 * 1024, + .associativity = 4, + .line_size = 32 + }; + break; +#endif /* CPUINFO_ARCH_ARM && !defined(__ARM_ARCH_7A__) && !defined(__ARM_ARCH_8A__) */ +#if CPUINFO_ARCH_ARM && !defined(__ARM_ARCH_8A__) + case cpuinfo_uarch_cortex_a5: + /* + * Cortex-A5 Technical Reference Manual: + * 7.1.1. Memory system + * The Cortex-A5 processor has separate instruction and data caches. + * The caches have the following features: + * - Data cache is 4-way set-associative. + * - Instruction cache is 2-way set-associative. + * - The cache line length is eight words. + * - You can configure the instruction and data caches independently during implementation + * to sizes of 4KB, 8KB, 16KB, 32KB, or 64KB. + * 1.1.3. System design components + * PrimeCell Level 2 Cache Controller (PL310) + * The addition of an on-chip secondary cache, also referred to as a Level 2 or L2 cache, is a + * recognized method of improving the performance of ARM-based systems when significant memory traffic + * is generated by the processor. The PrimeCell Level 2 Cache Controller reduces the number of external + * memory accesses and has been optimized for use with the Cortex-A5 processor. + * 8.1.7. Exclusive L2 cache + * The Cortex-A5 processor can be connected to an L2 cache that supports an exclusive cache mode. + * This mode must be activated both in the Cortex-A5 processor and in the L2 cache controller. + * + * +--------------------+-----------+-----------+----------+-----------+ + * | Processor model | L1D cache | L1I cache | L2 cache | Reference | + * +--------------------+-----------+-----------+----------+-----------+ + * | Qualcomm MSM7225A | | | | | + * | Qualcomm MSM7625A | | | | | + * | Qualcomm MSM7227A | | | | | + * | Qualcomm MSM7627A | 32K | 32K | 256K | Wiki [1] | + * | Qualcomm MSM7225AB | | | | | + * | Qualcomm MSM7225AB | | | | | + * | Qualcomm QSD8250 | | | | | + * | Qualcomm QSD8650 | | | | | + * +--------------------+-----------+-----------+----------+-----------+ + * | Spreadtrum SC6821 | 32K | 32K | ? | | + * | Spreadtrum SC6825 | 32K | 32K | 256K | Wiki [2] | + * | Spreadtrum SC8810 | ? | ? | ? | | + * | Spreadtrum SC8825 | 32K | 32K | ? | | + * +--------------------+-----------+-----------+----------+-----------+ + * + * [1] https://en.wikipedia.org/wiki/List_of_Qualcomm_Snapdragon_systems-on-chip#Snapdragon_S1 + * [2] https://en.wikipedia.org/wiki/Spreadtrum + */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 2, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l2 = (struct cpuinfo_cache) { + .size = 256 * 1024, + /* + * Follow NXP specification: "Eight-way set-associative 512 kB L2 cache with 32B line size" + * Reference: http://www.nxp.com/assets/documents/data/en/application-notes/AN4947.pdf + */ + .associativity = 8, + .line_size = 32 + }; + break; + case cpuinfo_uarch_cortex_a7: + /* + * Cortex-A7 MPCore Technical Reference Manual: + * 6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. You can configure the + * instruction and data caches independently during implementation to sizes of 8KB, 16KB, 32KB, or 64KB. + * + * The L1 instruction memory system has the following features: + * - Instruction side cache line length of 32-bytes. + * - 2-way set-associative instruction cache. + * + * The L1 data memory system has the following features: + * - Data side cache line length of 64-bytes. + * - 4-way set-associative data cache. + * + * 7.1. About the L2 Memory system + * The L2 memory system consists of an: + * - Optional tightly-coupled L2 cache that includes: + * - Configurable L2 cache size of 128KB, 256KB, 512KB, and 1MB. + * - Fixed line length of 64 bytes + * - 8-way set-associative cache structure + * + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Allwinner A20 | 2 | 32K | 32K | 256K | [1] | + * | Allwinner A23 | 2 | 32K | 32K | 256K | [2] | + * | Allwinner A31 | 4 | 32K | 32K | 1M | [3] | + * | Allwinner A31s | 4 | 32K | 32K | 1M | [4] | + * | Allwinner A33 | 4 | 32K | 32K | 512K | [5] | + * | Allwinner A80 Octa | 4(+4) | 32K | 32K | 512K(+2M) | [6] | + * | Allwinner A81T | 8 | 32K | 32K | 1M | [7] | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Broadcom BCM2836 | 4 | 32K | 32K | 512K | [8] | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Kirin 920 | 4(+4) | ? | ? | 512K | [9] | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * + * [1] https://linux-sunxi.org/A20 + * [2] https://linux-sunxi.org/A23 + * [3] http://dl.linux-sunxi.org/A31/A3x_release_document/A31/IC/A31%20datasheet%20V1.3%2020131106.pdf + * [4] https://github.com/allwinner-zh/documents/blob/master/A31s/A31s_Datasheet_v1.5_20150510.pdf + * [5] http://dl.linux-sunxi.org/A33/A33_Datasheet_release1.0.pdf + * [6] https://linux-sunxi.org/images/1/10/A80_Datasheet_Revision_1.0_0404.pdf + * [7] http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf + * [8] https://www.raspberrypi.org/forums/viewtopic.php?t=98428 + * [9] http://www.gizmochina.com/2014/10/07/hisilicon-kirin-920-tear-down/ + */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 2, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = 128 * 1024 * cluster_cores, + .associativity = 8, + .line_size = 64 + }; + break; + case cpuinfo_uarch_cortex_a8: + /* + * Cortex-A8 Technical Reference Manual: + * 7.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches in a Harvard arrangement. + * The L1 memory system provides the core with: + * - fixed line length of 64 bytes + * - support for 16KB or 32KB caches + * - 4-way set associative cache structure + * 8.1. About the L2 memory system + * The L2 memory system is tightly coupled to the L1 data cache and L1 instruction cache. + * The key features of the L2 memory system include: + * - configurable cache size of 0KB, 128KB, 256KB, 512KB, and 1MB + * - fixed line length of 64 bytes + * - 8-way set associative cache structure + * + * +----------------------+-----------+-----------+-----------+-----------+ + * | Processor model | L1D cache | L1I cache | L2 cache | Reference | + * +----------------------+-----------+-----------+-----------+-----------+ + * | Exynos 3 Single 3110 | 32K | 32K | 512K | [1] | + * +----------------------+-----------+-----------+-----------+-----------+ + * | TI DM 3730 | 32K | 32K | 256K | [2] | + * +----------------------+-----------+-----------+-----------+-----------+ + * + * [1] https://en.wikichip.org/w/images/0/04/Exynos_3110.pdf + * [2] https://www.ti.com/lit/ds/symlink/dm3725.pdf + */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .associativity = 8, + .line_size = 64 + }; + switch (chipset->vendor) { + case cpuinfo_arm_chipset_vendor_samsung: + l2->size = 512 * 1024; + break; + default: + l2->size = 256 * 1024; + break; + } + + break; + case cpuinfo_uarch_cortex_a9: + /* + * ARM Cortex‑A9 Technical Reference Manual: + * 7.1.1 Memory system + * The Cortex‑A9 processor has separate instruction and data caches. + * The caches have the following features: + * - Both caches are 4-way set-associative. + * - The cache line length is eight words. + * - You can configure the instruction and data caches independently during implementation + * to sizes of 16KB, 32KB, or 64KB. + * 8.1.5 Exclusive L2 cache + * The Cortex‑A9 processor can be connected to an L2 cache that supports an exclusive cache mode. + * This mode must be activated both in the Cortex‑A9 processor and in the L2 cache controller. + * + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Exynos 4 Dual 4210 | 2 | 32K | 32K | 1M | [1] | + * | Exynos 4 Dual 4212 | 2 | 32K | 32K | 1M | [2] | + * | Exynos 4 Quad 4412 | 4 | 32K | 32K | 1M | [3] | + * | Exynos 4 Quad 4415 | 4 | 32K | 32K | 1M | | + * | TI OMAP 4430 | 2 | 32K | 32K | 1M | [4] | + * | TI OMAP 4460 | 2 | 32K | 32K | 1M | [5] | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * + * [1] http://www.samsung.com/global/business/semiconductor/file/product/Exynos_4_Dual_45nm_User_Manaul_Public_REV1.00-0.pdf + * [2] http://www.samsung.com/global/business/semiconductor/file/product/Exynos_4_Dual_32nm_User_Manaul_Public_REV100-0.pdf + * [3] http://www.samsung.com/global/business/semiconductor/file/product/Exynos_4_Quad_User_Manaul_Public_REV1.00-0.pdf + * [4] https://www.hotchips.org/wp-content/uploads/hc_archives/hc21/2_mon/HC21.24.400.ClientProcessors-Epub/HC21.24.421.Witt-OMAP4430.pdf + * [5] http://www.anandtech.com/show/5310/samsung-galaxy-nexus-ice-cream-sandwich-review/9 + */ + + /* Use Exynos 4 specs */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l2 = (struct cpuinfo_cache) { + .size = 1024 * 1024, + /* OMAP4460 in Pandaboard ES has 16-way set-associative L2 cache */ + .associativity = 16, + .line_size = 32 + }; + break; + case cpuinfo_uarch_cortex_a15: + /* + * 6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. + * The L1 instruction memory system has the following features: + * - 32KB 2-way set-associative instruction cache. + * - Fixed line length of 64 bytes. + * The L1 data memory system has the following features: + * - 32KB 2-way set-associative data cache. + * - Fixed line length of 64 bytes. + * 7.1. About the L2 memory system + * The features of the L2 memory system include: + * - Configurable L2 cache size of 512KB, 1MB, 2MB and 4MB. + * - Fixed line length of 64 bytes. + * - 16-way set-associative cache structure. + * + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Exynos 5 Dual 5250 | 2 | 32K | 32K | 1M | [1] | + * | Exynos 5 Hexa 5260 | 2(+4) | 32K | 32K | 1M(+512K) | [2] | + * | Exynos 5 Octa 5410 | 4(+4) | 32K | 32K | 2M(+512K) | [3] | + * | Exynos 5 Octa 5420 | 4(+4) | 32K | 32K | 2M(+512K) | [3] | + * | Exynos 5 Octa 5422 | 4(+4) | 32K | 32K | 2M(+512K) | [3] | + * | Exynos 5 Octa 5430 | 4(+4) | 32K | 32K | 2M(+512K) | [3] | + * | Exynos 5 Octa 5800 | 4(+4) | 32K | 32K | 2M(+512K) | [3] | + * | Kirin 920 | 4(+4) | ? | ? | 2M(+512K) | [4] | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * + * [1] http://www.arndaleboard.org/wiki/downloads/supports/Exynos_5_Dual_User_Manaul_Public_REV1.00.pdf + * [2] http://www.yicsystem.com/wp-content/uploads/2014/08/Espresso5260P-Guide-Book.pdf + * [3] http://www.anandtech.com/show/6768/samsung-details-exynos-5-octa-architecture-power-at-isscc-13 + * [4] http://www.gizmochina.com/2014/10/07/hisilicon-kirin-920-tear-down/ + */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 2, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 2, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = cluster_cores * 512 * 1024, + .associativity = 16, + .line_size = 64 + }; + break; + case cpuinfo_uarch_cortex_a17: + /* + * ARM Cortex-A17 MPCore Processor Technical Reference Manual: + * 6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. + * The size of the instruction cache is implemented as either 32KB or 64KB. + * The size of the data cache is 32KB. + * + * The L1 instruction cache has the following features: + * - Instruction side cache line length of 64-bytes. + * - 4-way set-associative instruction cache. + * + * The L1 data cache has the following features: + * - Data side cache line length of 64-bytes. + * - 4-way set-associative data cache. + * + * 7.1. About the L2 Memory system + * An integrated L2 cache: + * - The cache size is implemented as either 256KB, 512KB, 1MB, 2MB, 4MB or 8MB. + * - A fixed line length of 64 bytes. + * - 16-way set-associative cache structure. + * + * +------------------+-------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +------------------+-------+-----------+-----------+-----------+-----------+ + * | MediaTek MT6595 | 4(+4) | 32K | 32K | 2M(+512K) | [1] | + * +------------------+-------+-----------+-----------+-----------+-----------+ + * + * [1] https://blog.osakana.net/archives/5268 + */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = cluster_cores * 512 * 1024, + .associativity = 16, + .line_size = 64 + }; + break; +#endif /* CPUINFO_ARCH_ARM && !defined(__ARM_ARCH_8A__) */ + case cpuinfo_uarch_cortex_a35: + /* + * ARM Cortex‑A35 Processor Technical Reference Manual: + * 6.1. About the L1 memory system + * The L1 memory system includes several power-saving and performance-enhancing features. + * These include separate instruction and data caches, which can be configured + * independently during implementation to sizes of 8KB, 16KB, 32KB, or 64KB. + * + * L1 instruction-side memory system + * A dedicated instruction cache that: + * - is virtually indexed and physically tagged. + * - is 2-way set associative. + * - is configurable to be 8KB, 16KB, 32KB, or 64KB. + * - uses a cache line length of 64 bytes. + * + * L1 data-side memory system + * A dedicated data cache that: + * - is physically indexed and physically tagged. + * - is 4-way set associative. + * - is configurable to be 8KB, 16KB, 32KB, or 64KB. + * - uses a cache line length of 64 bytes. + * + * 7.1. About the L2 memory system + * The L2 cache is 8-way set associative. + * Further features of the L2 cache are: + * - Configurable size of 128KB, 256KB, 512KB, and 1MB. + * - Fixed line length of 64 bytes. + * - Physically indexed and tagged. + * + * +-----------------+---------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +-----------------+---------+-----------+-----------+-----------+-----------+ + * | MediaTek MT6599 | 4(+4+2) | ? | ? | ? | | + * +-----------------+---------+-----------+-----------+-----------+-----------+ + */ + *l1i = (struct cpuinfo_cache) { + .size = 16 * 1024, /* assumption based on low-end Cortex-A53 */ + .associativity = 2, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 16 * 1024, /* assumption based on low-end Cortex-A53 */ + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = 256 * 1024, /* assumption based on low-end Cortex-A53 */ + .associativity = 8, + .line_size = 64 + }; + break; + case cpuinfo_uarch_cortex_a53: + /* + * ARM Cortex-A53 MPCore Processor Technical Reference Manual: + * 6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. The implementer configures the + * instruction and data caches independently during implementation, to sizes of 8KB, 16KB, 32KB, or 64KB. + * + * The L1 Instruction memory system has the following key features: + * - Instruction side cache line length of 64 bytes. + * - 2-way set associative L1 Instruction cache. + * + * The L1 Data memory system has the following features: + * - Data side cache line length of 64 bytes. + * - 4-way set associative L1 Data cache. + * + * 7.1. About the L2 memory system + * The L2 memory system consists of an: + * - Optional tightly-coupled L2 cache that includes: + * - Configurable L2 cache size of 128KB, 256KB, 512KB, 1MB and 2MB. + * - Fixed line length of 64 bytes. + * - 16-way set-associative cache structure. + * + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Broadcom BCM2837 | 4 | 16K | 16K | 512K | [1] | + * | Exynos 7420 | 4(+4) | 32K | 32K | 256K | [2, 3] | + * | Exynos 8890 | 4(+4) | 32K | 32K | 256K | [4] | + * | Rochchip RK3368 | 4+4 | 32K | 32K | 512K+256K | sysfs | + * | MediaTek MT8173C | 2(+2) | 32K | 32K | 512K(+1M) | sysfs | + * | Snapdragon 410 | 4 | 32K | 32K | 512K | [3] | + * | Snapdragon 630 | 4+4 | 32K | 32K | 1M+512K | sysfs | + * | Snapdragon 636 | 4(+4) | 32K+64K | 32K+64K | 1M+1M | sysfs | + * | Snapdragon 660 | 4(+4) | 32K+64K | 32K+64K | 1M+1M | sysfs | + * | Snapdragon 835 | 4(+4) | 32K+64K | 32K+64K | 1M(+2M) | sysfs | + * | Kirin 620 | 4+4 | 32K | 32K | 512K | [5] | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * + * [1] https://www.raspberrypi.org/forums/viewtopic.php?f=91&t=145766 + * [2] http://www.anandtech.com/show/9330/exynos-7420-deep-dive/2 + * [3] https://www.usenix.org/system/files/conference/usenixsecurity16/sec16_paper_lipp.pdf + * [4] http://www.boardset.com/products/products_v8890.php + * [5] http://mirror.lemaker.org/Hi6220V100_Multi-Mode_Application_Processor_Function_Description.pdf + */ + if (midr_is_qualcomm_cortex_a53_silver(midr)) { + /* Qualcomm-modified Cortex-A53 in Snapdragon 630/660/835 */ + + uint32_t l2_size = 512 * 1024; + switch (chipset->series) { + case cpuinfo_arm_chipset_series_qualcomm_msm: + if (chipset->model == 8998) { + /* Snapdragon 835 (MSM8998): 1 MB L2 (little cores only) */ + l2_size = 1024 * 1024; + } + break; + case cpuinfo_arm_chipset_series_qualcomm_snapdragon: + switch (chipset->model) { + case 630: + if (cluster_id == 0) { + /* Snapdragon 630: 1 MB L2 for the big cores */ + l2_size = 1024 * 1024; + } + break; + case 636: + /* Snapdragon 636: 1 MB L2 (little cores only) */ + l2_size = 1024 * 1024; + break; + case 660: + case 662: + /* Snapdragon 660: 1 MB L2 (little cores only) */ + l2_size = 1024 * 1024; + break; + } + break; + default: + break; + } + + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 2, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = l2_size, + .associativity = 16, + .line_size = 64 + }; + } else { + /* Standard Cortex-A53 */ + + /* Use conservative values by default */ + uint32_t l1_size = 16 * 1024; + uint32_t l2_size = 256 * 1024; + switch (chipset->series) { + case cpuinfo_arm_chipset_series_qualcomm_msm: + l1_size = 32 * 1024; + l2_size = 512 * 1024; + switch (chipset->model) { + case 8937: /* Snapdragon 430 */ + case 8940: /* Snapdragon 435 */ + case 8953: /* Snapdragon 625 or 626 (8953PRO) */ + if (cluster_id == 0) { + /* 1M L2 for big cluster */ + l2_size = 1024 * 1024; + } + break; + case 8952: /* Snapdragon 617 */ + if (cluster_id != 0) { + /* 256K L2 for LITTLE cluster */ + l2_size = 256 * 1024; + } + break; + default: + /* Silence compiler warning about unhandled enum values */ + break; + } + break; + case cpuinfo_arm_chipset_series_qualcomm_apq: + l1_size = 32 * 1024; + l2_size = 512 * 1024; + break; + case cpuinfo_arm_chipset_series_qualcomm_snapdragon: + l1_size = 32 * 1024; + l2_size = 512 * 1024; + if (chipset->model == 450 && cluster_id == 0) { + /* Snapdragon 450: 1M L2 for big cluster */ + l2_size = 1024 * 1024; + } + break; + case cpuinfo_arm_chipset_series_hisilicon_hi: + l1_size = 32 * 1024; + l2_size = 512 * 1024; + break; + case cpuinfo_arm_chipset_series_hisilicon_kirin: + l1_size = 32 * 1024; + switch (chipset->model) { + case 970: /* Kirin 970 */ + l2_size = 1024 * 1024; + break; + default: + l2_size = 512 * 1024; + break; + } + break; + case cpuinfo_arm_chipset_series_mediatek_mt: + switch (chipset->model) { + case 8173: + l1_size = 32 * 1024; + l2_size = 512 * 1024; + break; + } + break; + case cpuinfo_arm_chipset_series_rockchip_rk: + l1_size = 32 * 1024; + switch (chipset->model) { + case 3368: + if (cluster_id == 0) { + /* RK3368: 512 KB L2 for the big cores */ + l2_size = 512 * 1024; + } + break; + } + break; + case cpuinfo_arm_chipset_series_broadcom_bcm: + switch (chipset->model) { + case 2837: /* BCM2837 */ + l2_size = 512 * 1024; + break; + } + break; + case cpuinfo_arm_chipset_series_samsung_exynos: + l1_size = 32 * 1024; + break; + default: + /* Silence compiler warning about unhandled enum values */ + break; + } + *l1i = (struct cpuinfo_cache) { + .size = l1_size, + .associativity = 2, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = l1_size, + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = l2_size, + .associativity = 16, + .line_size = 64 + }; + } + break; + case cpuinfo_uarch_cortex_a55r0: + case cpuinfo_uarch_cortex_a55: + /* + * ARM Cortex-A55 Core Technical Reference Manual + * A6.1. About the L1 memory system + * The Cortex®-A55 core's L1 memory system enhances core performance and power efficiency. + * It consists of separate instruction and data caches. You can configure instruction and data caches + * independently during implementation to sizes of 16KB, 32KB, or 64KB. + * + * L1 instruction-side memory system + * The L1 instruction-side memory system provides an instruction stream to the DPU. Its key features are: + * - 64-byte instruction side cache line length. + * - 4-way set associative L1 instruction cache. + * + * L1 data-side memory system + * - 64-byte data side cache line length. + * - 4-way set associative L1 data cache. + * + * A7.1 About the L2 memory system + * The Cortex-A55 L2 memory system is required to interface the Cortex-A55 cores to the L3 memory system. + * The L2 memory subsystem consists of: + * - An optional 4-way, set-associative L2 cache with a configurable size of 64KB, 128KB or 256KB. Cache + * lines have a fixed length of 64 bytes. + * + * The main features of the L2 memory system are: + * - Strictly exclusive with L1 data cache. + * - Pseudo-inclusive with L1 instruction cache. + * - Private per-core unified L2 cache. + * + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | L3 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * | Snapdragon 845 | 4(+4) | 32K | 32K | 128K | 2M | [1], sysfs | + * | Exynos 9810 | 4(+4) | ? | ? | None | 512K | [2] | + * | Kirin 980 | 4(+4) | 32K | 32K | 128K | 4M | [3] | + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * + * [1] https://www.anandtech.com/show/12114/qualcomm-announces-snapdragon-845-soc + * [2] https://www.anandtech.com/show/12478/exynos-9810-handson-awkward-first-results + * [3] https://en.wikichip.org/wiki/hisilicon/kirin/980 + */ + if (midr_is_qualcomm_cortex_a55_silver(midr)) { + /* Qualcomm-modified Cortex-A55 in Snapdragon 670 / 710 / 845 */ + uint32_t l3_size = 1024 * 1024; + switch (chipset->series) { + case cpuinfo_arm_chipset_series_qualcomm_snapdragon: + /* Snapdragon 845: 2M L3 cache */ + if (chipset->model == 845) { + l3_size = 2 * 1024 * 1024; + } + break; + default: + break; + } + + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l2 = (struct cpuinfo_cache) { + .size = 128 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l3 = (struct cpuinfo_cache) { + .size = l3_size, + .associativity = 16, + .line_size = 64, + }; + } else { + /* Standard Cortex-A55 */ + + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64, + }; + if (chipset->series == cpuinfo_arm_chipset_series_samsung_exynos) { + *l2 = (struct cpuinfo_cache) { + .size = 512 * 1024, + /* DynamIQ */ + .associativity = 16, + .line_size = 64, + }; + } else { + uint32_t l3_size = 1024 * 1024; + switch (chipset->series) { + case cpuinfo_arm_chipset_series_hisilicon_kirin: + /* Kirin 980: 4M L3 cache */ + if (chipset->model == 980) { + l3_size = 4 * 1024 * 1024; + } + break; + default: + break; + } + *l2 = (struct cpuinfo_cache) { + .size = 128 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l3 = (struct cpuinfo_cache) { + .size = l3_size, + /* DynamIQ */ + .associativity = 16, + .line_size = 64, + }; + } + } + break; + case cpuinfo_uarch_cortex_a57: + /* + * ARM Cortex-A57 MPCore Processor Technical Reference Manual: + * 6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. + * + * The L1 instruction memory system has the following features: + * - 48KB 3-way set-associative instruction cache. + * - Fixed line length of 64 bytes. + * + * The L1 data memory system has the following features: + * - 32KB 2-way set-associative data cache. + * - Fixed line length of 64 bytes. + * + * 7.1 About the L2 memory system + * The features of the L2 memory system include: + * - Configurable L2 cache size of 512KB, 1MB, and 2MB. + * - Fixed line length of 64 bytes. + * - 16-way set-associative cache structure. + * - Inclusion property with L1 data caches. + * + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Snapdragon 810 | 4(+4) | 32K | 48K | 2M | [1] | + * | Exynos 7420 | 4(+4) | 32K | 48K | 2M | [2] | + * | Jetson TX1 | 4 | 32K | 48K | 2M | [3] | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * + * [1] http://www.anandtech.com/show/9837/snapdragon-820-preview + * [2] http://www.anandtech.com/show/9330/exynos-7420-deep-dive/2 + * [3] https://devblogs.nvidia.com/parallelforall/jetson-tx2-delivers-twice-intelligence-edge/ + */ + *l1i = (struct cpuinfo_cache) { + .size = 48 * 1024, + .associativity = 3, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 2, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = cluster_cores * 512 * 1024, + .associativity = 16, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE + }; + break; + case cpuinfo_uarch_cortex_a65: + { + /* + * ARM Cortex‑A65 Core Technical Reference Manual + * A6.1. About the L1 memory system + * The L1 memory system enhances the performance and power efficiency in the Cortex‑A65 core. + * It consists of separate instruction and data caches. You can configure instruction and data caches + * independently during implementation to sizes of 32KB or 64KB. + * + * L1 instruction-side memory system + * The L1 instruction-side memory system provides an instruction stream to the DPU. Its key features are: + * - 64-byte instruction side cache line length. + * - 4-way set associative L1 instruction cache. + * + * L1 data-side memory system + * - 64-byte data side cache line length. + * - 4-way set associative L1 data cache. + * + * A7.1 About the L2 memory system + * The Cortex‑A65 L2 memory system is required to interface the Cortex‑A65 cores to the L3 memory system. + * The L2 memory subsystem consists of: + * - An optional 4-way, set-associative L2 cache with a configurable size of 64KB, 128KB, or 256KB. + * Cache lines have a fixed length of 64 bytes. + * + * The main features of the L2 memory system are: + * - Strictly exclusive with L1 data cache. + * - Pseudo-inclusive with L1 instruction cache. + * - Private per-core unified L2 cache. + */ + const uint32_t l1_size = 32 * 1024; + const uint32_t l2_size = 128 * 1024; + const uint32_t l3_size = 512 * 1024; + *l1i = (struct cpuinfo_cache) { + .size = l1_size, + .associativity = 4, + .line_size = 64, + }; + *l1d = (struct cpuinfo_cache) { + .size = l1_size, + .associativity = 4, + .line_size = 64, + }; + *l2 = (struct cpuinfo_cache) { + .size = l2_size, + .associativity = 4, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE + }; + *l3 = (struct cpuinfo_cache) { + .size = l3_size, + /* DynamIQ */ + .associativity = 16, + .line_size = 64, + }; + break; + } + case cpuinfo_uarch_cortex_a72: + { + /* + * ARM Cortex-A72 MPCore Processor Technical Reference Manual + * 6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. + * + * The L1 instruction memory system has the following features: + * - 48KB 3-way set-associative instruction cache. + * - Fixed line length of 64 bytes. + * + * The L1 data memory system has the following features: + * - 32KB 2-way set-associative data cache. + * - Fixed cache line length of 64 bytes. + * + * 7.1 About the L2 memory system + * The features of the L2 memory system include: + * - Configurable L2 cache size of 512KB, 1MB, 2MB and 4MB. + * - Fixed line length of 64 bytes. + * - Banked pipeline structures. + * - Inclusion property with L1 data caches. + * - 16-way set-associative cache structure. + * + * +---------------------+---------+-----------+-----------+------------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +---------------------+---------+-----------+-----------+------------+-----------+ + * | Snapdragon 650 | 2(+4) | 32K(+32K) | 48K(+32K) | 1M(+512K) | [1] | + * | Snapdragon 652 | 4(+4) | 32K(+32K) | 48K(+32K) | 1M(+512K) | [2] | + * | Snapdragon 653 | 4(+4) | 32K(+32K) | 48K(+32K) | 1M(+512K) | [3] | + * | HiSilicon Kirin 950 | 4(+4) | 32K+32K | 48K+32K | ? | | + * | HiSilicon Kirin 955 | 4(+4) | 32K+32K | 48K+32K | ? | | + * | MediaTek MT8173C | 2(+2) | 32K(+32K) | 48K(+32K) | 1M(+512K) | sysfs | + * | MediaTek Helio X20 | 2(+4+4) | ? | ? | ? | | + * | MediaTek Helio X23 | 2(+4+4) | ? | ? | ? | | + * | MediaTek Helio X25 | 2(+4+4) | ? | ? | ? | | + * | MediaTek Helio X27 | 2(+4+4) | ? | ? | ? | | + * | Broadcom BCM2711 | 4 | 32K | 48K | 1M | [4] | + * +---------------------+---------+-----------+-----------+------------+-----------+ + * + * [1] http://pdadb.net/index.php?m=processor&id=578&c=qualcomm_snapdragon_618_msm8956__snapdragon_650 + * [2] http://pdadb.net/index.php?m=processor&id=667&c=qualcomm_snapdragon_620_apq8076__snapdragon_652 + * [3] http://pdadb.net/index.php?m=processor&id=692&c=qualcomm_snapdragon_653_msm8976sg__msm8976_pro + * [4] https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2711/README.md + */ + uint32_t l2_size; + switch (chipset->series) { + case cpuinfo_arm_chipset_series_hisilicon_kirin: + l2_size = 2 * 1024 * 1024; + break; + default: + l2_size = 1024 * 1024; + break; + } + + *l1i = (struct cpuinfo_cache) { + .size = 48 * 1024, + .associativity = 3, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 2, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = l2_size, + .associativity = 16, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE + }; + break; + } + case cpuinfo_uarch_cortex_a73: + { + /* + * ARM Cortex‑A73 MPCore Processor Technical Reference Manual + * 6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. + * The size of the instruction cache is 64KB. + * The size of the data cache is configurable to either 32KB or 64KB. + * + * The L1 instruction memory system has the following key features: + * - Virtually Indexed, Physically Tagged (VIPT), four-way set-associative instruction cache. + * - Fixed cache line length of 64 bytes. + * + * The L1 data memory system has the following features: + * - ...the data cache behaves like an eight-way set associative PIPT cache (for 32KB configurations) + * and a 16-way set associative PIPT cache (for 64KB configurations). + * - Fixed cache line length of 64 bytes. + * + * 7.1 About the L2 memory system + * The L2 memory system consists of: + * - A tightly-integrated L2 cache with: + * - A configurable size of 256KB, 512KB, 1MB, 2MB, 4MB, or 8MB. + * - A 16-way, set-associative structure. + * - A fixed line length of 64 bytes. + * + * The ARM Cortex A73 - Artemis Unveiled [1] + * "ARM still envisions that most vendors will choose to use configurations of 1 to + * 2MB in consumer products. The L2 cache is inclusive of the L1 cache. " + * + * +---------------------+---------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +---------------------+---------+-----------+-----------+-----------+-----------+ + * | HiSilicon Kirin 960 | 4(+4) | 64K+32K | 64K+32K | ? | [2] | + * | MediaTek Helio X30 | 2(+4+4) | ? | 64K+ ? | ? | | + * | Snapdragon 636 | 4(+4) | 64K(+32K) | 64K(+32K) | 1M(+1M) | sysfs | + * | Snapdragon 660 | 4(+4) | 64K+32K | 64K+32K | 1M(+1M) | [3] | + * | Snapdragon 835 | 4(+4) | 64K+32K | 64K+32K | 2M(+1M) | sysfs | + * +---------------------+---------+-----------+-----------+-----------+-----------+ + * + * [1] http://www.anandtech.com/show/10347/arm-cortex-a73-artemis-unveiled/2 + * [2] http://www.anandtech.com/show/11088/hisilicon-kirin-960-performance-and-power/3 + * [3] https://arstechnica.com/gadgets/2017/05/qualcomms-snapdragon-660-and-630-bring-more-high-end-features-to-midrange-chips/ + */ + uint32_t l1d_size = 32 * 1024; + uint32_t l2_size = 512 * 1024; + switch (chipset->series) { + case cpuinfo_arm_chipset_series_hisilicon_kirin: + l1d_size = 64 * 1024; + l2_size = 2 * 1024 * 1024; + break; + case cpuinfo_arm_chipset_series_mediatek_mt: + l1d_size = 64 * 1024; + l2_size = 1 * 1024 * 1024; /* TODO: verify assumption */ + break; + default: + switch (midr) { + case UINT32_C(0x51AF8001): /* Kryo 280 Gold */ + l1d_size = 64 * 1024; + l2_size = 2 * 1024 * 1024; + break; + case UINT32_C(0x51AF8002): /* Kryo 260 Gold */ + l1d_size = 64 * 1024; + l2_size = 1 * 1024 * 1024; + break; + } + } + + *l1i = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = l1d_size, + .associativity = (l1d_size >> 12), + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = l2_size, + .associativity = 16, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE + }; + break; + } + case cpuinfo_uarch_cortex_a75: + { + /* + * ARM Cortex-A75 Core Technical Reference Manual + * A6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. Both have a fixed size of 64KB. + * + * A6.1.1 L1 instruction-side memory system + * The L1 instruction memory system has the following key features: + * - Virtually Indexed, Physically Tagged (VIPT), four-way set-associative instruction cache. + * - Fixed cache line length of 64 bytes. + * + * A6.1.2 L1 data-side memory system + * The L1 data memory system has the following features: + * - Physically Indexed, Physically Tagged (PIPT), 16-way set-associative L1 data cache. + * - Fixed cache line length of 64 bytes. + * - Pseudo-random cache replacement policy. + * + * A7.1 About the L2 memory system + * The L2 memory subsystem consist of: + * - An 8-way set associative L2 cache with a configurable size of 256KB or 512KB. + * Cache lines have a fixed length of 64 bytes. + * + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | L3 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * | Snapdragon 845 | 4(+4) | 64K | 64K | 256K | 2M | [1], sysfs | + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * + * [1] https://www.anandtech.com/show/12114/qualcomm-announces-snapdragon-845-soc + */ + uint32_t l3_size = 1024 * 1024; + switch (chipset->series) { + case cpuinfo_arm_chipset_series_qualcomm_snapdragon: + /* Snapdragon 845: 2M L3 cache */ + if (chipset->model == 845) { + l3_size = 2 * 1024 * 1024; + } + break; + default: + break; + } + *l1i = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 16, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = 256 * 1024, + .associativity = 8, + .line_size = 64 + }; + *l3 = (struct cpuinfo_cache) { + .size = l3_size, + .associativity = 16, + .line_size = 64 + }; + break; + } + case cpuinfo_uarch_cortex_a76: + { + /* + * ARM Cortex-A76 Core Technical Reference Manual + * A6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. Both have a fixed size of 64KB. + * + * A6.1.1 L1 instruction-side memory system + * The L1 instruction memory system has the following key features: + * - Virtually Indexed, Physically Tagged (VIPT), which behaves as a Physically Indexed, + * Physically Tagged (PIPT) 4-way set-associative L1 data cache. + * - Fixed cache line length of 64 bytes. + * + * A6.1.2 L1 data-side memory system + * The L1 data memory system has the following features: + * - Virtually Indexed, Physically Tagged (VIPT), which behaves as a Physically Indexed, + * Physically Tagged (PIPT) 4-way set-associative L1 data cache. + * - Fixed cache line length of 64 bytes. + * - Pseudo-LRU cache replacement policy. + * + * A7.1 About the L2 memory system + * The L2 memory subsystem consist of: + * - An 8-way set associative L2 cache with a configurable size of 128KB, 256KB or 512KB. + * Cache lines have a fixed length of 64 bytes. + * - Strictly inclusive with L1 data cache. Weakly inclusive with L1 instruction cache. + * - Dynamic biased replacement policy. + * - Modified Exclusive Shared Invalid (MESI) coherency. + * + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | L3 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * | Kirin 980 | 4(+4) | 64K | 64K | 512K | 4M | [1], [2] | + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * + * [1] https://www.anandtech.com/show/13298/hisilicon-announces-the-kirin-980-first-a76-g76-on-7nm + * [2] https://en.wikichip.org/wiki/hisilicon/kirin/980 + */ + uint32_t l2_size = 256 * 1024; + uint32_t l3_size = 1024 * 1024; + switch (chipset->series) { + case cpuinfo_arm_chipset_series_hisilicon_kirin: + /* Kirin 980: 512K L2 cache + 4M L3 cache */ + if (chipset->model == 980) { + l2_size = 512 * 1024; + l3_size = 4 * 1024 * 1024; + } + break; + default: + break; + } + *l1i = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l1d = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l2 = (struct cpuinfo_cache) { + .size = l2_size, + .associativity = 8, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + *l3 = (struct cpuinfo_cache) { + .size = l3_size, + .associativity = 16, + .line_size = 64, + }; + break; + } + case cpuinfo_uarch_cortex_a77: + { + /* + * ARM Cortex-A77 Core Technical Reference Manual + * A6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. Both have a fixed size of 64KB. + * + * A6.1.1 L1 instruction-side memory system + * The L1 instruction memory system has the following key features: + * - Virtually Indexed, Physically Tagged (VIPT), which behaves as a Physically Indexed, + * Physically Tagged (PIPT) 4-way set-associative L1 data cache. + * - Fixed cache line length of 64 bytes. + * + * A6.1.2 L1 data-side memory system + * The L1 data memory system has the following features: + * - Virtually Indexed, Physically Tagged (VIPT), which behaves as a Physically Indexed, + * Physically Tagged (PIPT) 4-way set-associative L1 data cache. + * - Fixed cache line length of 64 bytes. + * - Pseudo-LRU cache replacement policy. + * + * A7.1 About the L2 memory system + * The L2 memory subsystem consist of: + * - An 8-way set associative L2 cache with a configurable size of 128KB, 256KB or 512KB. Cache lines + * have a fixed length of 64 bytes. + * - Strictly inclusive with L1 data cache. Weakly inclusive with L1 instruction cache. + */ + const uint32_t l2_size = 256 * 1024; + const uint32_t l3_size = 1024 * 1024; + *l1i = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l1d = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l2 = (struct cpuinfo_cache) { + .size = l2_size, + .associativity = 8, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + *l3 = (struct cpuinfo_cache) { + .size = l3_size, + .associativity = 16, + .line_size = 64, + }; + break; + } + case cpuinfo_uarch_neoverse_n1: + case cpuinfo_uarch_neoverse_v1: + case cpuinfo_uarch_neoverse_n2: + { + /* + * ARM Neoverse-n1 Core Technical Reference Manual + * A6.1. About the L1 memory system + * The L1 memory system consists of separate instruction and data caches. Both have a fixed size of 64KB. + * + * A6.1.1 L1 instruction-side memory system + * The L1 instruction memory system has the following key features: + * - Virtually Indexed, Physically Tagged (VIPT), which behaves as a Physically Indexed, + * Physically Tagged (PIPT) 4-way set-associative L1 data cache. + * - Fixed cache line length of 64 bytes. + * + * A6.1.2 L1 data-side memory system + * The L1 data memory system has the following features: + * - Virtually Indexed, Physically Tagged (VIPT), which behaves as a Physically Indexed, + * Physically Tagged (PIPT) 4-way set-associative L1 data cache. + * - Fixed cache line length of 64 bytes. + * - Pseudo-LRU cache replacement policy. + * + * A7.1 About the L2 memory system + * The L2 memory subsystem consist of: + * - An 8-way set associative L2 cache with a configurable size of 256KB, 512KB, or 1024KB. Cache lines + * have a fixed length of 64 bytes. + * - Strictly inclusive with L1 data cache. + * - When configured with instruction cache hardware coherency, strictly inclusive with L1 instruction cache. + * - When configured without instruction cache hardware coherency, weakly inclusive with L1 instruction cache. + */ + + const uint32_t min_l2_size_KB= 256; + const uint32_t min_l3_size_KB = 0; + + *l1i = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l1d = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64, + }; + *l2 = (struct cpuinfo_cache) { + .size = min_l2_size_KB * 1024, + .associativity = 8, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + *l3 = (struct cpuinfo_cache) { + .size = min_l3_size_KB * 1024, + .associativity = 16, + .line_size = 64, + }; + break; + } +#if CPUINFO_ARCH_ARM && !defined(__ARM_ARCH_8A__) + case cpuinfo_uarch_scorpion: + /* + * - "The CPU includes 32KB instruction and data caches as + * well as a complete memory-management unit (MMU) suitable + * for high-level operating systems. The CPU also has + * 256KB of SRAM that can be allocated in 64KB increments + * to level-two (L2) cache or tightly coupled memory (TCM)." [1] + * We interpret it as L2 cache being 4-way set-associative on single-core Scorpion. + * - L1 Data Cache = 32 KB. 32 B/line. [2] + * - L2 Cache = 256 KB. 128 B/line. [2] + * - 256 KB (single-core) or 512 KB (dual-core) L2 cache [3] + * - Single or dual-core configuration [3] + * - For L1 cache assume the same associativity as Krait + * + * [1] https://www.qualcomm.com/media/documents/files/linley-report-on-dual-core-snapdragon.pdf + * [2] http://www.7-cpu.com/cpu/Snapdragon.html + * [3] https://en.wikipedia.org/wiki/Scorpion_(CPU) + */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l2 = (struct cpuinfo_cache) { + .size = cluster_cores * 256 * 1024, + .associativity = 4, + .line_size = 128 + }; + break; + case cpuinfo_uarch_krait: + /* + * - L0 Data cache = 4 KB. 64 B/line, direct mapped [1] + * - L0 Instruction cache = 4 KB. [1] + * - L1 Data cache = 16 KB. 64 B/line, 4-way [1] + * - L1 Instruction cache = 16 KB, 4-way [1] + * - L2 Cache = 1 MB, 128 B/line, 8-way. Each core has fast access only to 512 KB of L2 cache. [1] + * - L2 = 1MB (dual core) or 2MB (quad core), 8-way set associative [2] + * + * [1] http://www.7-cpu.com/cpu/Krait.html + * [2] http://www.anandtech.com/show/4940/qualcomm-new-snapdragon-s4-msm8960-krait-architecture/2 + */ + *l1i = (struct cpuinfo_cache) { + .size = 16 * 1024, + .associativity = 4, + .line_size = 64 /* assume same as L1D */ + }; + *l1d = (struct cpuinfo_cache) { + .size = 16 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = cluster_cores * 512 * 1024, + .associativity = 8, + .line_size = 128 + }; + break; +#endif /* CPUINFO_ARCH_ARM && !defined(__ARM_ARCH_8A__) */ + case cpuinfo_uarch_kryo: + /* + * +-----------------+-------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +-----------------+-------+-----------+-----------+-----------+-----------+ + * | Snapdragon 820 | 2+2 | 24K | 32K | 1M+512K | [1, 2] | + * | Snapdragon 821 | 2+2 | ? | ? | 1M+512K | [1] | + * +-----------------+-------+-----------+-----------+-----------+-----------+ + * + * [1] http://www.anandtech.com/show/9837/snapdragon-820-preview/2 + * [2] https://www.inforcecomputing.com/public_docs/Inforce6601/Inforce_6601_Micro-SOM_FAQs_04-2016-1.pdf + */ + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 24 * 1024, + .associativity = 3, + .line_size = 64 + }; + if (midr_is_kryo_silver(midr)) { + /* Kryo "Silver" */ + *l2 = (struct cpuinfo_cache) { + .size = 512 * 1024, + .associativity = 8, + .line_size = 128 + }; + } else { + /* Kryo "Gold" */ + *l2 = (struct cpuinfo_cache) { + .size = 1024 * 1024, + .associativity = 8, + .line_size = 128 + }; + } + break; + case cpuinfo_uarch_denver: + case cpuinfo_uarch_denver2: + /* + * The Denver chip includes a 128KB, 4-way level 1 instruction cache, a 64KB, 4-way level 2 data cache, + * and a 2MB, 16-way level 2 cache, all of which can service both cores. [1] + * + * All the caches have 64-byte lines. [2] + * + * [1] http://www.pcworld.com/article/2463900/nvidia-reveals-pc-like-performance-for-denver-tegra-k1.html + * [2] http://linleygroup.com/newsletters/newsletter_detail.php?num=5205&year=2014 + */ + *l1i = (struct cpuinfo_cache) { + .size = 128 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = 2 * 1024 * 1024, + .associativity = 16, + .line_size = 64 + }; + break; + case cpuinfo_uarch_exynos_m1: + case cpuinfo_uarch_exynos_m2: + /* + * - "Moving past branch prediction we can see some elements of how the cache is set up for the L1 I$, + * namely 64 KB split into four sets with 128-byte line sizes for 128 cache lines per set" [1] + * - "For loads and stores, a 32 KB, 8-way set associative cache with 64 byte line size is used" [1] + * - "The L2 cache here is 2MB shared across all cores split into 16 sets. This memory is also split + * into 4 banks and has a 22 cycle latency" [1] + * + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * | Exynos 8 Octa 8890 | 4(+4) | 64K | 32K | 2M | [1] | + * | Exynos 8 Octa 8895 | 4(+4) | 64K | 32K | 2M | [2] | + * +--------------------+-------+-----------+-----------+-----------+-----------+ + * + * [1] http://www.anandtech.com/show/10590/hot-chips-2016-exynos-m1-architecture-disclosed + * [2] https://www.extremetech.com/mobile/244949-samsungs-exynos-8895-features-custom-cpu-cores-first-10nm-chip-market + */ + *l1i = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4, + .line_size = 128 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 8, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = 2 * 1024 * 1024, + .associativity = 16, + .line_size = 64 + }; + break; + case cpuinfo_uarch_exynos_m3: + /* + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | L3 cache | Reference | + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * | Exynos 9810 | 4(+4) | 64K | ? | 512K | 4M | [1] | + * +--------------------+-------+-----------+-----------+-----------+----------+------------+ + * + * [1] https://www.anandtech.com/show/12478/exynos-9810-handson-awkward-first-results + */ + *l1i = (struct cpuinfo_cache) { + .size = 64 * 1024 /* assume same as in Exynos M1/M2 cores */, + .associativity = 4 /* assume same as in Exynos M1/M2 cores */, + .line_size = 128 /* assume same as in Exynos M1/M2 cores */ + }; + *l1d = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 8 /* assume same as in Exynos M1/M2 cores */, + .line_size = 64 /* assume same as in Exynos M1/M2 cores */, + }; + *l2 = (struct cpuinfo_cache) { + .size = 512 * 1024, + .associativity = 16 /* assume same as in Exynos M1/M2 cores */, + .line_size = 64 /* assume same as in Exynos M1/M2 cores */, + }; + *l3 = (struct cpuinfo_cache) { + .size = 4 * 1024 * 1024, + .associativity = 16 /* assume DynamIQ cache */, + .line_size = 64 /* assume DynamIQ cache */, + }; + break; +#if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) + case cpuinfo_uarch_thunderx: + /* + * "78K-Icache and 32K-D cache per core, 16 MB shared L2 cache" [1] + * + * [1] https://www.cavium.com/pdfFiles/ThunderX_CP_PB_Rev1.pdf + */ + *l1i = (struct cpuinfo_cache) { + .size = 78 * 1024, + .associativity = 4 /* assumption */, + .line_size = 64 /* assumption */ + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4 /* assumption */, + .line_size = 64 /* assumption */ + }; + *l2 = (struct cpuinfo_cache) { + .size = 16 * 1024 * 1024, + .associativity = 8 /* assumption */, + .line_size = 64 /* assumption */ + }; + break; + case cpuinfo_uarch_taishan_v110: + /* + * It features private 64 KiB L1 instruction and data caches as well as 512 KiB of private L2. [1] + * + * +------------------+-------+-----------+-----------+-----------+----------+-----------+ + * | Processor model | Cores | L1D cache | L1I cache | L2 cache | L3 cache | Reference | + * +------------------+-------+-----------+-----------+-----------+----------+-----------+ + * | Kunpeng 920-3226 | 32 | 64K | 64K | 512K | 32M | [2] | + * +------------------+-------+-----------+-----------+-----------+----------+-----------+ + * | Kunpeng 920-4826 | 48 | 64K | 64K | 512K | 48M | [3] | + * +------------------+-------+-----------+-----------+-----------+----------+-----------+ + * | Kunpeng 920-6426 | 64 | 64K | 64K | 512K | 64M | [4] | + * +------------------+-------+-----------+-----------+-----------+----------+-----------+ + * + * [1] https://en.wikichip.org/wiki/hisilicon/microarchitectures/taishan_v110 + * [2] https://en.wikichip.org/wiki/hisilicon/kunpeng/920-3226 + * [3] https://en.wikichip.org/wiki/hisilicon/kunpeng/920-4826 + * [4] https://en.wikichip.org/wiki/hisilicon/kunpeng/920-6426 + */ + *l1i = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4 /* assumption */, + .line_size = 128 /* assumption */, + }; + *l1d = (struct cpuinfo_cache) { + .size = 64 * 1024, + .associativity = 4 /* assumption */, + .line_size = 128 /* assumption */, + }; + *l2 = (struct cpuinfo_cache) { + .size = 512 * 1024, + .associativity = 8 /* assumption */, + .line_size = 128 /* assumption */, + .flags = CPUINFO_CACHE_INCLUSIVE /* assumption */, + }; + *l3 = (struct cpuinfo_cache) { + .size = cluster_cores * 1024 * 1024, + .associativity = 16 /* assumption */, + .line_size = 128 /* assumption */, + }; + break; +#endif + case cpuinfo_uarch_cortex_a12: + case cpuinfo_uarch_cortex_a32: + default: + cpuinfo_log_warning("target uarch not recognized; using generic cache parameters"); + /* Follow OpenBLAS */ + if (arch_version >= 8) { + *l1i = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l1d = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .line_size = 64 + }; + *l2 = (struct cpuinfo_cache) { + .size = cluster_cores * 256 * 1024, + .associativity = 8, + .line_size = 64 + }; + } else { + *l1i = (struct cpuinfo_cache) { + .size = 16 * 1024, + .associativity = 4, + .line_size = 32 + }; + *l1d = (struct cpuinfo_cache) { + .size = 16 * 1024, + .associativity = 4, + .line_size = 32 + }; + if (arch_version >= 7) { + *l2 = (struct cpuinfo_cache) { + .size = cluster_cores * 128 * 1024, + .associativity = 8, + .line_size = 32 + }; + } + } + break; + } + l1i->sets = l1i->size / (l1i->associativity * l1i->line_size); + l1i->partitions = 1; + l1d->sets = l1d->size / (l1d->associativity * l1d->line_size); + l1d->partitions = 1; + if (l2->size != 0) { + l2->sets = l2->size / (l2->associativity * l2->line_size); + l2->partitions = 1; + if (l3->size != 0) { + l3->sets = l3->size / (l3->associativity * l3->line_size); + l3->partitions = 1; + } + } +} + +uint32_t cpuinfo_arm_compute_max_cache_size(const struct cpuinfo_processor* processor) { + /* + * There is no precise way to detect cache size on ARM/ARM64, and cache size reported by cpuinfo + * may underestimate the actual cache size. Thus, we use microarchitecture-specific maximum. + */ + switch (processor->core->uarch) { + case cpuinfo_uarch_xscale: + case cpuinfo_uarch_arm11: + case cpuinfo_uarch_scorpion: + case cpuinfo_uarch_krait: + case cpuinfo_uarch_kryo: + case cpuinfo_uarch_exynos_m1: + case cpuinfo_uarch_exynos_m2: + case cpuinfo_uarch_exynos_m3: + /* cpuinfo-detected cache size always correct */ + return cpuinfo_compute_max_cache_size(processor); + case cpuinfo_uarch_cortex_a5: + /* Max observed (NXP Vybrid SoC) */ + return 512 * 1024; + case cpuinfo_uarch_cortex_a7: + /* + * Cortex-A7 MPCore Technical Reference Manual: + * 7.1. About the L2 Memory system + * The L2 memory system consists of an: + * - Optional tightly-coupled L2 cache that includes: + * - Configurable L2 cache size of 128KB, 256KB, 512KB, and 1MB. + */ + return 1024 * 1024; + case cpuinfo_uarch_cortex_a8: + /* + * Cortex-A8 Technical Reference Manual: + * 8.1. About the L2 memory system + * The key features of the L2 memory system include: + * - configurable cache size of 0KB, 128KB, 256KB, 512KB, and 1MB + */ + return 1024 * 1024; + case cpuinfo_uarch_cortex_a9: + /* Max observed (e.g. Exynos 4212) */ + return 1024 * 1024; + case cpuinfo_uarch_cortex_a12: + case cpuinfo_uarch_cortex_a17: + /* + * ARM Cortex-A17 MPCore Processor Technical Reference Manual: + * 7.1. About the L2 Memory system + * The key features of the L2 memory system include: + * - An integrated L2 cache: + * - The cache size is implemented as either 256KB, 512KB, 1MB, 2MB, 4MB or 8MB. + */ + return 8 * 1024 * 1024; + case cpuinfo_uarch_cortex_a15: + /* + * ARM Cortex-A15 MPCore Processor Technical Reference Manual: + * 7.1. About the L2 memory system + * The features of the L2 memory system include: + * - Configurable L2 cache size of 512KB, 1MB, 2MB and 4MB. + */ + return 4 * 1024 * 1024; + case cpuinfo_uarch_cortex_a35: + /* + * ARM Cortex‑A35 Processor Technical Reference Manual: + * 7.1 About the L2 memory system + * L2 cache + * - Further features of the L2 cache are: + * - Configurable size of 128KB, 256KB, 512KB, and 1MB. + */ + return 1024 * 1024; + case cpuinfo_uarch_cortex_a53: + /* + * ARM Cortex-A53 MPCore Processor Technical Reference Manual: + * 7.1. About the L2 memory system + * The L2 memory system consists of an: + * - Optional tightly-coupled L2 cache that includes: + * - Configurable L2 cache size of 128KB, 256KB, 512KB, 1MB and 2MB. + */ + return 2 * 1024 * 1024; + case cpuinfo_uarch_cortex_a57: + /* + * ARM Cortex-A57 MPCore Processor Technical Reference Manual: + * 7.1 About the L2 memory system + * The features of the L2 memory system include: + * - Configurable L2 cache size of 512KB, 1MB, and 2MB. + */ + return 2 * 1024 * 1024; + case cpuinfo_uarch_cortex_a72: + /* + * ARM Cortex-A72 MPCore Processor Technical Reference Manual: + * 7.1 About the L2 memory system + * The features of the L2 memory system include: + * - Configurable L2 cache size of 512KB, 1MB, 2MB and 4MB. + */ + return 4 * 1024 * 1024; + case cpuinfo_uarch_cortex_a73: + /* + * ARM Cortex‑A73 MPCore Processor Technical Reference Manual + * 7.1 About the L2 memory system + * The L2 memory system consists of: + * - A tightly-integrated L2 cache with: + * - A configurable size of 256KB, 512KB, 1MB, 2MB, 4MB, or 8MB. + */ + return 8 * 1024 * 1024; + case cpuinfo_uarch_cortex_a55: + case cpuinfo_uarch_neoverse_n1: + case cpuinfo_uarch_neoverse_v1: + case cpuinfo_uarch_neoverse_n2: + case cpuinfo_uarch_cortex_a75: + case cpuinfo_uarch_cortex_a76: + case cpuinfo_uarch_exynos_m4: + default: + /* + * ARM DynamIQ Shared Unit Technical Reference Manual + * 1.3 Implementation options + * L3_CACHE_SIZE + * - 256KB + * - 512KB + * - 1024KB + * - 1536KB + * - 2048KB + * - 3072KB + * - 4096KB + */ + return 4 * 1024 * 1024; + } +} diff --git a/source/3rdparty/cpuinfo/src/arm/linux/aarch32-isa.c b/source/3rdparty/cpuinfo/src/arm/linux/aarch32-isa.c new file mode 100644 index 0000000..d6f6a21 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/aarch32-isa.c @@ -0,0 +1,271 @@ +#include + +#if CPUINFO_MOCK + #include +#endif +#include +#include +#include +#include + + +#if CPUINFO_MOCK + uint32_t cpuinfo_arm_fpsid = 0; + uint32_t cpuinfo_arm_mvfr0 = 0; + uint32_t cpuinfo_arm_wcid = 0; + + void cpuinfo_set_fpsid(uint32_t fpsid) { + cpuinfo_arm_fpsid = fpsid; + } + + void cpuinfo_set_wcid(uint32_t wcid) { + cpuinfo_arm_wcid = wcid; + } +#endif + + +void cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo( + uint32_t features, + uint32_t features2, + uint32_t midr, + uint32_t architecture_version, + uint32_t architecture_flags, + const struct cpuinfo_arm_chipset chipset[restrict static 1], + struct cpuinfo_arm_isa isa[restrict static 1]) +{ + if (architecture_version >= 8) { + /* + * ARMv7 code running on ARMv8: IDIV, VFP, NEON are always supported, + * but may be not reported in /proc/cpuinfo features. + */ + isa->armv5e = true; + isa->armv6 = true; + isa->armv6k = true; + isa->armv7 = true; + isa->armv7mp = true; + isa->armv8 = true; + isa->thumb = true; + isa->thumb2 = true; + isa->idiv = true; + isa->vfpv3 = true; + isa->d32 = true; + isa->fp16 = true; + isa->fma = true; + isa->neon = true; + + /* + * NEON FP16 compute extension and VQRDMLAH/VQRDMLSH instructions are not indicated in /proc/cpuinfo. + * Use a MIDR-based heuristic to whitelist processors known to support it: + * - Processors with Cortex-A55 cores + * - Processors with Cortex-A65 cores + * - Processors with Cortex-A75 cores + * - Processors with Cortex-A76 cores + * - Processors with Cortex-A77 cores + * - Processors with Exynos M4 cores + * - Processors with Exynos M5 cores + * - Neoverse N1 cores + * - Neoverse V1 cores + * - Neoverse N2 cores + */ + if (chipset->series == cpuinfo_arm_chipset_series_samsung_exynos && chipset->model == 9810) { + /* Only little cores of Exynos 9810 support FP16 & RDM */ + cpuinfo_log_warning("FP16 arithmetics and RDM disabled: only little cores in Exynos 9810 support these extensions"); + } else { + switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) { + case UINT32_C(0x4100D050): /* Cortex-A55 */ + case UINT32_C(0x4100D060): /* Cortex-A65 */ + case UINT32_C(0x4100D0B0): /* Cortex-A76 */ + case UINT32_C(0x4100D0C0): /* Neoverse N1 */ + case UINT32_C(0x4100D0D0): /* Cortex-A77 */ + case UINT32_C(0x4100D0E0): /* Cortex-A76AE */ + case UINT32_C(0x4100D400): /* Neoverse V1 */ + case UINT32_C(0x4100D490): /* Neoverse N2 */ + case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */ + case UINT32_C(0x51008020): /* Kryo 385 Gold (Cortex-A75) */ + case UINT32_C(0x51008030): /* Kryo 385 Silver (Cortex-A55) */ + case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */ + case UINT32_C(0x51008050): /* Kryo 485 Silver (Cortex-A55) */ + case UINT32_C(0x53000030): /* Exynos M4 */ + case UINT32_C(0x53000040): /* Exynos M5 */ + isa->fp16arith = true; + isa->rdm = true; + break; + } + } + + /* + * NEON VDOT instructions are not indicated in /proc/cpuinfo. + * Use a MIDR-based heuristic to whitelist processors known to support it. + */ + switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) { + case UINT32_C(0x4100D0B0): /* Cortex-A76 */ + case UINT32_C(0x4100D0D0): /* Cortex-A77 */ + case UINT32_C(0x4100D0E0): /* Cortex-A76AE */ + case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */ + case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */ + case UINT32_C(0x51008050): /* Kryo 485 Silver (Cortex-A55) */ + case UINT32_C(0x53000030): /* Exynos-M4 */ + case UINT32_C(0x53000040): /* Exynos-M5 */ + isa->dot = true; + break; + case UINT32_C(0x4100D050): /* Cortex A55: revision 1 or later only */ + isa->dot = !!(midr_get_variant(midr) >= 1); + break; + case UINT32_C(0x4100D0A0): /* Cortex A75: revision 2 or later only */ + isa->dot = !!(midr_get_variant(midr) >= 2); + break; + } + } else { + /* ARMv7 or lower: use feature flags to detect optional features */ + + /* + * ARM11 (ARM 1136/1156/1176/11 MPCore) processors can report v7 architecture + * even though they support only ARMv6 instruction set. + */ + if (architecture_version == 7 && midr_is_arm11(midr)) { + cpuinfo_log_warning("kernel-reported architecture ARMv7 ignored due to mismatch with processor microarchitecture (ARM11)"); + architecture_version = 6; + } + + if (architecture_version < 7) { + const uint32_t armv7_features_mask = CPUINFO_ARM_LINUX_FEATURE_VFPV3 | CPUINFO_ARM_LINUX_FEATURE_VFPV3D16 | CPUINFO_ARM_LINUX_FEATURE_VFPD32 | + CPUINFO_ARM_LINUX_FEATURE_VFPV4 | CPUINFO_ARM_LINUX_FEATURE_NEON | CPUINFO_ARM_LINUX_FEATURE_IDIVT | CPUINFO_ARM_LINUX_FEATURE_IDIVA; + if (features & armv7_features_mask) { + architecture_version = 7; + } + } + if ((architecture_version >= 6) || (features & CPUINFO_ARM_LINUX_FEATURE_EDSP) || (architecture_flags & CPUINFO_ARM_LINUX_ARCH_E)) { + isa->armv5e = true; + } + if (architecture_version >= 6) { + isa->armv6 = true; + } + if (architecture_version >= 7) { + isa->armv6k = true; + isa->armv7 = true; + + /* + * ARMv7 MP extension (PLDW instruction) is not indicated in /proc/cpuinfo. + * Use heuristic list of supporting processors: + * - Processors supporting UDIV/SDIV instructions ("idiva" + "idivt" features in /proc/cpuinfo) + * - Cortex-A5 + * - Cortex-A9 + * - Dual-Core Scorpion + * - Krait (supports UDIV/SDIV, but kernels may not report it in /proc/cpuinfo) + * + * TODO: check single-core Qualcomm Scorpion. + */ + switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) { + case UINT32_C(0x4100C050): /* Cortex-A5 */ + case UINT32_C(0x4100C090): /* Cortex-A9 */ + case UINT32_C(0x510002D0): /* Scorpion (dual-core) */ + case UINT32_C(0x510004D0): /* Krait (dual-core) */ + case UINT32_C(0x510006F0): /* Krait (quad-core) */ + isa->armv7mp = true; + break; + default: + /* In practice IDIV instruction implies ARMv7+MP ISA */ + isa->armv7mp = (features & CPUINFO_ARM_LINUX_FEATURE_IDIV) == CPUINFO_ARM_LINUX_FEATURE_IDIV; + break; + } + } + + if (features & CPUINFO_ARM_LINUX_FEATURE_IWMMXT) { + const uint32_t wcid = read_wcid(); + cpuinfo_log_debug("WCID = 0x%08"PRIx32, wcid); + const uint32_t coprocessor_type = (wcid >> 8) & UINT32_C(0xFF); + if (coprocessor_type >= 0x10) { + isa->wmmx = true; + if (coprocessor_type >= 0x20) { + isa->wmmx2 = true; + } + } else { + cpuinfo_log_warning("WMMX ISA disabled: OS reported iwmmxt feature, " + "but WCID coprocessor type 0x%"PRIx32" indicates no WMMX support", + coprocessor_type); + } + } + + if ((features & CPUINFO_ARM_LINUX_FEATURE_THUMB) || (architecture_flags & CPUINFO_ARM_LINUX_ARCH_T)) { + isa->thumb = true; + + /* + * There is no separate feature flag for Thumb 2. + * All ARMv7 processors and ARM 1156 support Thumb 2. + */ + if (architecture_version >= 7 || midr_is_arm1156(midr)) { + isa->thumb2 = true; + } + } + if (features & CPUINFO_ARM_LINUX_FEATURE_THUMBEE) { + isa->thumbee = true; + } + if ((features & CPUINFO_ARM_LINUX_FEATURE_JAVA) || (architecture_flags & CPUINFO_ARM_LINUX_ARCH_J)) { + isa->jazelle = true; + } + + /* Qualcomm Krait may have buggy kernel configuration that doesn't report IDIV */ + if ((features & CPUINFO_ARM_LINUX_FEATURE_IDIV) == CPUINFO_ARM_LINUX_FEATURE_IDIV || midr_is_krait(midr)) { + isa->idiv = true; + } + + const uint32_t vfp_mask = \ + CPUINFO_ARM_LINUX_FEATURE_VFP | CPUINFO_ARM_LINUX_FEATURE_VFPV3 | CPUINFO_ARM_LINUX_FEATURE_VFPV3D16 | \ + CPUINFO_ARM_LINUX_FEATURE_VFPD32 | CPUINFO_ARM_LINUX_FEATURE_VFPV4 | CPUINFO_ARM_LINUX_FEATURE_NEON; + if (features & vfp_mask) { + const uint32_t vfpv3_mask = CPUINFO_ARM_LINUX_FEATURE_VFPV3 | CPUINFO_ARM_LINUX_FEATURE_VFPV3D16 | \ + CPUINFO_ARM_LINUX_FEATURE_VFPD32 | CPUINFO_ARM_LINUX_FEATURE_VFPV4 | CPUINFO_ARM_LINUX_FEATURE_NEON; + if ((architecture_version >= 7) || (features & vfpv3_mask)) { + isa->vfpv3 = true; + + const uint32_t d32_mask = CPUINFO_ARM_LINUX_FEATURE_VFPD32 | CPUINFO_ARM_LINUX_FEATURE_NEON; + if (features & d32_mask) { + isa->d32 = true; + } + } else { + #if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_8A__) || defined(__ARM_ARCH) && (__ARM_ARCH >= 7) + isa->vfpv3 = true; + #else + const uint32_t fpsid = read_fpsid(); + cpuinfo_log_debug("FPSID = 0x%08"PRIx32, fpsid); + const uint32_t subarchitecture = (fpsid >> 16) & UINT32_C(0x7F); + if (subarchitecture >= 0x01) { + isa->vfpv2 = true; + } + #endif + } + } + if (features & CPUINFO_ARM_LINUX_FEATURE_NEON) { + isa->neon = true; + } + + /* + * There is no separate feature flag for FP16 support. + * VFPv4 implies VFPv3-FP16 support (and in practice, NEON-HP as well). + * Additionally, ARM Cortex-A9 and Qualcomm Scorpion support FP16. + */ + if ((features & CPUINFO_ARM_LINUX_FEATURE_VFPV4) || midr_is_cortex_a9(midr) || midr_is_scorpion(midr)) { + isa->fp16 = true; + } + + if (features & CPUINFO_ARM_LINUX_FEATURE_VFPV4) { + isa->fma = true; + } + } + + if (features2 & CPUINFO_ARM_LINUX_FEATURE2_AES) { + isa->aes = true; + } + if (features2 & CPUINFO_ARM_LINUX_FEATURE2_PMULL) { + isa->pmull = true; + } + if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SHA1) { + isa->sha1 = true; + } + if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SHA2) { + isa->sha2 = true; + } + if (features2 & CPUINFO_ARM_LINUX_FEATURE2_CRC32) { + isa->crc32 = true; + } +} diff --git a/source/3rdparty/cpuinfo/src/arm/linux/aarch64-isa.c b/source/3rdparty/cpuinfo/src/arm/linux/aarch64-isa.c new file mode 100644 index 0000000..7b18095 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/aarch64-isa.c @@ -0,0 +1,139 @@ +#include + +#include +#include + + +void cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo( + uint32_t features, + uint32_t features2, + uint32_t midr, + const struct cpuinfo_arm_chipset chipset[restrict static 1], + struct cpuinfo_arm_isa isa[restrict static 1]) +{ + if (features & CPUINFO_ARM_LINUX_FEATURE_AES) { + isa->aes = true; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_PMULL) { + isa->pmull = true; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_SHA1) { + isa->sha1 = true; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_SHA2) { + isa->sha2 = true; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_CRC32) { + isa->crc32 = true; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_ATOMICS) { + isa->atomics = true; + } + + /* + * Some phones ship with an old kernel configuration that doesn't report NEON FP16 compute extension and SQRDMLAH/SQRDMLSH/UQRDMLAH/UQRDMLSH instructions. + * Use a MIDR-based heuristic to whitelist processors known to support it: + * - Processors with Cortex-A55 cores + * - Processors with Cortex-A65 cores + * - Processors with Cortex-A75 cores + * - Processors with Cortex-A76 cores + * - Processors with Cortex-A77 cores + * - Processors with Exynos M4 cores + * - Processors with Exynos M5 cores + * - Neoverse N1 cores + * - Neoverse V1 cores + * - Neoverse N2 cores + */ + if (chipset->series == cpuinfo_arm_chipset_series_samsung_exynos && chipset->model == 9810) { + /* Exynos 9810 reports that it supports FP16 compute, but in fact only little cores do */ + cpuinfo_log_warning("FP16 arithmetics and RDM disabled: only little cores in Exynos 9810 support these extensions"); + } else { + const uint32_t fp16arith_mask = CPUINFO_ARM_LINUX_FEATURE_FPHP | CPUINFO_ARM_LINUX_FEATURE_ASIMDHP; + switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) { + case UINT32_C(0x4100D050): /* Cortex-A55 */ + case UINT32_C(0x4100D060): /* Cortex-A65 */ + case UINT32_C(0x4100D0B0): /* Cortex-A76 */ + case UINT32_C(0x4100D0C0): /* Neoverse N1 */ + case UINT32_C(0x4100D0D0): /* Cortex-A77 */ + case UINT32_C(0x4100D0E0): /* Cortex-A76AE */ + case UINT32_C(0x4100D400): /* Neoverse V1 */ + case UINT32_C(0x4100D490): /* Neoverse N2 */ + case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */ + case UINT32_C(0x51008020): /* Kryo 385 Gold (Cortex-A75) */ + case UINT32_C(0x51008030): /* Kryo 385 Silver (Cortex-A55) */ + case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */ + case UINT32_C(0x51008050): /* Kryo 485 Silver (Cortex-A55) */ + case UINT32_C(0x53000030): /* Exynos M4 */ + case UINT32_C(0x53000040): /* Exynos M5 */ + isa->fp16arith = true; + isa->rdm = true; + break; + default: + if ((features & fp16arith_mask) == fp16arith_mask) { + isa->fp16arith = true; + } else if (features & CPUINFO_ARM_LINUX_FEATURE_FPHP) { + cpuinfo_log_warning("FP16 arithmetics disabled: detected support only for scalar operations"); + } else if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDHP) { + cpuinfo_log_warning("FP16 arithmetics disabled: detected support only for SIMD operations"); + } + if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDRDM) { + isa->rdm = true; + } + break; + } + } + + /* + * Many phones ship with an old kernel configuration that doesn't report UDOT/SDOT instructions. + * Use a MIDR-based heuristic to whitelist processors known to support it. + */ + switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) { + case UINT32_C(0x4100D060): /* Cortex-A65 */ + case UINT32_C(0x4100D0B0): /* Cortex-A76 */ + case UINT32_C(0x4100D0C0): /* Neoverse N1 */ + case UINT32_C(0x4100D0D0): /* Cortex-A77 */ + case UINT32_C(0x4100D0E0): /* Cortex-A76AE */ + case UINT32_C(0x4100D400): /* Neoverse V1 */ + case UINT32_C(0x4100D490): /* Neoverse N2 */ + case UINT32_C(0x4100D4A0): /* Neoverse E1 */ + case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */ + case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */ + case UINT32_C(0x51008050): /* Kryo 485 Silver (Cortex-A55) */ + case UINT32_C(0x53000030): /* Exynos-M4 */ + case UINT32_C(0x53000040): /* Exynos-M5 */ + isa->dot = true; + break; + case UINT32_C(0x4100D050): /* Cortex A55: revision 1 or later only */ + isa->dot = !!(midr_get_variant(midr) >= 1); + break; + case UINT32_C(0x4100D0A0): /* Cortex A75: revision 2 or later only */ + isa->dot = !!(midr_get_variant(midr) >= 2); + break; + default: + if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDDP) { + isa->dot = true; + } + break; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_JSCVT) { + isa->jscvt = true; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_JSCVT) { + isa->jscvt = true; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_FCMA) { + isa->fcma = true; + } + if (features & CPUINFO_ARM_LINUX_FEATURE_SVE) { + isa->sve = true; + } + if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SVE2) { + isa->sve2 = true; + } + if (features2 & CPUINFO_ARM_LINUX_FEATURE2_BF16) { + isa->bf16 = true; + } + if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SVEBF16) { + isa->svebf16 = true; + } +} diff --git a/source/3rdparty/cpuinfo/src/arm/linux/api.h b/source/3rdparty/cpuinfo/src/arm/linux/api.h new file mode 100644 index 0000000..1c09f82 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/api.h @@ -0,0 +1,384 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +/* No hard limit in the kernel, maximum length observed on non-rogue kernels is 64 */ +#define CPUINFO_HARDWARE_VALUE_MAX 64 +/* No hard limit in the kernel, maximum length on Raspberry Pi is 8. Add 1 symbol to detect overly large revision strings */ +#define CPUINFO_REVISION_VALUE_MAX 9 + +#ifdef __ANDROID__ + /* As per include/sys/system_properties.h in Android NDK */ + #define CPUINFO_BUILD_PROP_NAME_MAX 32 + #define CPUINFO_BUILD_PROP_VALUE_MAX 92 + + struct cpuinfo_android_properties { + char proc_cpuinfo_hardware[CPUINFO_HARDWARE_VALUE_MAX]; + char ro_product_board[CPUINFO_BUILD_PROP_VALUE_MAX]; + char ro_board_platform[CPUINFO_BUILD_PROP_VALUE_MAX]; + char ro_mediatek_platform[CPUINFO_BUILD_PROP_VALUE_MAX]; + char ro_arch[CPUINFO_BUILD_PROP_VALUE_MAX]; + char ro_chipname[CPUINFO_BUILD_PROP_VALUE_MAX]; + char ro_hardware_chipname[CPUINFO_BUILD_PROP_VALUE_MAX]; + }; +#endif + +#define CPUINFO_ARM_LINUX_ARCH_T UINT32_C(0x00000001) +#define CPUINFO_ARM_LINUX_ARCH_E UINT32_C(0x00000002) +#define CPUINFO_ARM_LINUX_ARCH_J UINT32_C(0x00000004) + +#define CPUINFO_ARM_LINUX_ARCH_TE UINT32_C(0x00000003) +#define CPUINFO_ARM_LINUX_ARCH_TEJ UINT32_C(0x00000007) + +struct cpuinfo_arm_linux_proc_cpuinfo_cache { + uint32_t i_size; + uint32_t i_assoc; + uint32_t i_line_length; + uint32_t i_sets; + uint32_t d_size; + uint32_t d_assoc; + uint32_t d_line_length; + uint32_t d_sets; +}; + +#if CPUINFO_ARCH_ARM + /* arch/arm/include/uapi/asm/hwcap.h */ + + #define CPUINFO_ARM_LINUX_FEATURE_SWP UINT32_C(0x00000001) + #define CPUINFO_ARM_LINUX_FEATURE_HALF UINT32_C(0x00000002) + #define CPUINFO_ARM_LINUX_FEATURE_THUMB UINT32_C(0x00000004) + #define CPUINFO_ARM_LINUX_FEATURE_26BIT UINT32_C(0x00000008) + #define CPUINFO_ARM_LINUX_FEATURE_FASTMULT UINT32_C(0x00000010) + #define CPUINFO_ARM_LINUX_FEATURE_FPA UINT32_C(0x00000020) + #define CPUINFO_ARM_LINUX_FEATURE_VFP UINT32_C(0x00000040) + #define CPUINFO_ARM_LINUX_FEATURE_EDSP UINT32_C(0x00000080) + #define CPUINFO_ARM_LINUX_FEATURE_JAVA UINT32_C(0x00000100) + #define CPUINFO_ARM_LINUX_FEATURE_IWMMXT UINT32_C(0x00000200) + #define CPUINFO_ARM_LINUX_FEATURE_CRUNCH UINT32_C(0x00000400) + #define CPUINFO_ARM_LINUX_FEATURE_THUMBEE UINT32_C(0x00000800) + #define CPUINFO_ARM_LINUX_FEATURE_NEON UINT32_C(0x00001000) + #define CPUINFO_ARM_LINUX_FEATURE_VFPV3 UINT32_C(0x00002000) + #define CPUINFO_ARM_LINUX_FEATURE_VFPV3D16 UINT32_C(0x00004000) /* Also set for VFPv4 with 16 double-precision registers */ + #define CPUINFO_ARM_LINUX_FEATURE_TLS UINT32_C(0x00008000) + #define CPUINFO_ARM_LINUX_FEATURE_VFPV4 UINT32_C(0x00010000) + #define CPUINFO_ARM_LINUX_FEATURE_IDIVA UINT32_C(0x00020000) + #define CPUINFO_ARM_LINUX_FEATURE_IDIVT UINT32_C(0x00040000) + #define CPUINFO_ARM_LINUX_FEATURE_IDIV UINT32_C(0x00060000) + #define CPUINFO_ARM_LINUX_FEATURE_VFPD32 UINT32_C(0x00080000) + #define CPUINFO_ARM_LINUX_FEATURE_LPAE UINT32_C(0x00100000) + #define CPUINFO_ARM_LINUX_FEATURE_EVTSTRM UINT32_C(0x00200000) + + #define CPUINFO_ARM_LINUX_FEATURE2_AES UINT32_C(0x00000001) + #define CPUINFO_ARM_LINUX_FEATURE2_PMULL UINT32_C(0x00000002) + #define CPUINFO_ARM_LINUX_FEATURE2_SHA1 UINT32_C(0x00000004) + #define CPUINFO_ARM_LINUX_FEATURE2_SHA2 UINT32_C(0x00000008) + #define CPUINFO_ARM_LINUX_FEATURE2_CRC32 UINT32_C(0x00000010) +#elif CPUINFO_ARCH_ARM64 + /* arch/arm64/include/uapi/asm/hwcap.h */ + #define CPUINFO_ARM_LINUX_FEATURE_FP UINT32_C(0x00000001) + #define CPUINFO_ARM_LINUX_FEATURE_ASIMD UINT32_C(0x00000002) + #define CPUINFO_ARM_LINUX_FEATURE_EVTSTRM UINT32_C(0x00000004) + #define CPUINFO_ARM_LINUX_FEATURE_AES UINT32_C(0x00000008) + #define CPUINFO_ARM_LINUX_FEATURE_PMULL UINT32_C(0x00000010) + #define CPUINFO_ARM_LINUX_FEATURE_SHA1 UINT32_C(0x00000020) + #define CPUINFO_ARM_LINUX_FEATURE_SHA2 UINT32_C(0x00000040) + #define CPUINFO_ARM_LINUX_FEATURE_CRC32 UINT32_C(0x00000080) + #define CPUINFO_ARM_LINUX_FEATURE_ATOMICS UINT32_C(0x00000100) + #define CPUINFO_ARM_LINUX_FEATURE_FPHP UINT32_C(0x00000200) + #define CPUINFO_ARM_LINUX_FEATURE_ASIMDHP UINT32_C(0x00000400) + #define CPUINFO_ARM_LINUX_FEATURE_CPUID UINT32_C(0x00000800) + #define CPUINFO_ARM_LINUX_FEATURE_ASIMDRDM UINT32_C(0x00001000) + #define CPUINFO_ARM_LINUX_FEATURE_JSCVT UINT32_C(0x00002000) + #define CPUINFO_ARM_LINUX_FEATURE_FCMA UINT32_C(0x00004000) + #define CPUINFO_ARM_LINUX_FEATURE_LRCPC UINT32_C(0x00008000) + #define CPUINFO_ARM_LINUX_FEATURE_DCPOP UINT32_C(0x00010000) + #define CPUINFO_ARM_LINUX_FEATURE_SHA3 UINT32_C(0x00020000) + #define CPUINFO_ARM_LINUX_FEATURE_SM3 UINT32_C(0x00040000) + #define CPUINFO_ARM_LINUX_FEATURE_SM4 UINT32_C(0x00080000) + #define CPUINFO_ARM_LINUX_FEATURE_ASIMDDP UINT32_C(0x00100000) + #define CPUINFO_ARM_LINUX_FEATURE_SHA512 UINT32_C(0x00200000) + #define CPUINFO_ARM_LINUX_FEATURE_SVE UINT32_C(0x00400000) + #define CPUINFO_ARM_LINUX_FEATURE_ASIMDFHM UINT32_C(0x00800000) + #define CPUINFO_ARM_LINUX_FEATURE_DIT UINT32_C(0x01000000) + #define CPUINFO_ARM_LINUX_FEATURE_USCAT UINT32_C(0x02000000) + #define CPUINFO_ARM_LINUX_FEATURE_ILRCPC UINT32_C(0x04000000) + #define CPUINFO_ARM_LINUX_FEATURE_FLAGM UINT32_C(0x08000000) + #define CPUINFO_ARM_LINUX_FEATURE_SSBS UINT32_C(0x10000000) + #define CPUINFO_ARM_LINUX_FEATURE_SB UINT32_C(0x20000000) + #define CPUINFO_ARM_LINUX_FEATURE_PACA UINT32_C(0x40000000) + #define CPUINFO_ARM_LINUX_FEATURE_PACG UINT32_C(0x80000000) + + #define CPUINFO_ARM_LINUX_FEATURE2_DCPODP UINT32_C(0x00000001) + #define CPUINFO_ARM_LINUX_FEATURE2_SVE2 UINT32_C(0x00000002) + #define CPUINFO_ARM_LINUX_FEATURE2_SVEAES UINT32_C(0x00000004) + #define CPUINFO_ARM_LINUX_FEATURE2_SVEPMULL UINT32_C(0x00000008) + #define CPUINFO_ARM_LINUX_FEATURE2_SVEBITPERM UINT32_C(0x00000010) + #define CPUINFO_ARM_LINUX_FEATURE2_SVESHA3 UINT32_C(0x00000020) + #define CPUINFO_ARM_LINUX_FEATURE2_SVESM4 UINT32_C(0x00000040) + #define CPUINFO_ARM_LINUX_FEATURE2_FLAGM2 UINT32_C(0x00000080) + #define CPUINFO_ARM_LINUX_FEATURE2_FRINT UINT32_C(0x00000100) + #define CPUINFO_ARM_LINUX_FEATURE2_SVEI8MM UINT32_C(0x00000200) + #define CPUINFO_ARM_LINUX_FEATURE2_SVEF32MM UINT32_C(0x00000400) + #define CPUINFO_ARM_LINUX_FEATURE2_SVEF64MM UINT32_C(0x00000800) + #define CPUINFO_ARM_LINUX_FEATURE2_SVEBF16 UINT32_C(0x00001000) + #define CPUINFO_ARM_LINUX_FEATURE2_I8MM UINT32_C(0x00002000) + #define CPUINFO_ARM_LINUX_FEATURE2_BF16 UINT32_C(0x00004000) + #define CPUINFO_ARM_LINUX_FEATURE2_DGH UINT32_C(0x00008000) + #define CPUINFO_ARM_LINUX_FEATURE2_RNG UINT32_C(0x00010000) + #define CPUINFO_ARM_LINUX_FEATURE2_BTI UINT32_C(0x00020000) +#endif + +#define CPUINFO_ARM_LINUX_VALID_ARCHITECTURE UINT32_C(0x00010000) +#define CPUINFO_ARM_LINUX_VALID_IMPLEMENTER UINT32_C(0x00020000) +#define CPUINFO_ARM_LINUX_VALID_VARIANT UINT32_C(0x00040000) +#define CPUINFO_ARM_LINUX_VALID_PART UINT32_C(0x00080000) +#define CPUINFO_ARM_LINUX_VALID_REVISION UINT32_C(0x00100000) +#define CPUINFO_ARM_LINUX_VALID_PROCESSOR UINT32_C(0x00200000) +#define CPUINFO_ARM_LINUX_VALID_FEATURES UINT32_C(0x00400000) +#if CPUINFO_ARCH_ARM + #define CPUINFO_ARM_LINUX_VALID_ICACHE_SIZE UINT32_C(0x01000000) + #define CPUINFO_ARM_LINUX_VALID_ICACHE_SETS UINT32_C(0x02000000) + #define CPUINFO_ARM_LINUX_VALID_ICACHE_WAYS UINT32_C(0x04000000) + #define CPUINFO_ARM_LINUX_VALID_ICACHE_LINE UINT32_C(0x08000000) + #define CPUINFO_ARM_LINUX_VALID_DCACHE_SIZE UINT32_C(0x10000000) + #define CPUINFO_ARM_LINUX_VALID_DCACHE_SETS UINT32_C(0x20000000) + #define CPUINFO_ARM_LINUX_VALID_DCACHE_WAYS UINT32_C(0x40000000) + #define CPUINFO_ARM_LINUX_VALID_DCACHE_LINE UINT32_C(0x80000000) +#endif + +#define CPUINFO_ARM_LINUX_VALID_INFO UINT32_C(0x007F0000) +#define CPUINFO_ARM_LINUX_VALID_MIDR UINT32_C(0x003F0000) +#if CPUINFO_ARCH_ARM + #define CPUINFO_ARM_LINUX_VALID_ICACHE UINT32_C(0x0F000000) + #define CPUINFO_ARM_LINUX_VALID_DCACHE UINT32_C(0xF0000000) + #define CPUINFO_ARM_LINUX_VALID_CACHE_LINE UINT32_C(0x88000000) +#endif + +struct cpuinfo_arm_linux_processor { + uint32_t architecture_version; +#if CPUINFO_ARCH_ARM + uint32_t architecture_flags; + struct cpuinfo_arm_linux_proc_cpuinfo_cache proc_cpuinfo_cache; +#endif + uint32_t features; + uint32_t features2; + /** + * Main ID Register value. + */ + uint32_t midr; + enum cpuinfo_vendor vendor; + enum cpuinfo_uarch uarch; + uint32_t uarch_index; + /** + * ID of the physical package which includes this logical processor. + * The value is parsed from /sys/devices/system/cpu/cpu/topology/physical_package_id + */ + uint32_t package_id; + /** + * Minimum processor ID on the package which includes this logical processor. + * This value can serve as an ID for the cluster of logical processors: it is the + * same for all logical processors on the same package. + */ + uint32_t package_leader_id; + /** + * Number of logical processors in the package. + */ + uint32_t package_processor_count; + /** + * Maximum frequency, in kHZ. + * The value is parsed from /sys/devices/system/cpu/cpu/cpufreq/cpuinfo_max_freq + * If failed to read or parse the file, the value is 0. + */ + uint32_t max_frequency; + /** + * Minimum frequency, in kHZ. + * The value is parsed from /sys/devices/system/cpu/cpu/cpufreq/cpuinfo_min_freq + * If failed to read or parse the file, the value is 0. + */ + uint32_t min_frequency; + /** Linux processor ID */ + uint32_t system_processor_id; + uint32_t flags; +}; + +struct cpuinfo_arm_linux_cluster { + uint32_t processor_id_min; + uint32_t processor_id_max; +}; + +/* Returns true if the two processors do belong to the same cluster */ +static inline bool cpuinfo_arm_linux_processor_equals( + struct cpuinfo_arm_linux_processor processor_i[restrict static 1], + struct cpuinfo_arm_linux_processor processor_j[restrict static 1]) +{ + const uint32_t joint_flags = processor_i->flags & processor_j->flags; + + bool same_max_frequency = false; + if (joint_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + if (processor_i->max_frequency != processor_j->max_frequency) { + return false; + } else { + same_max_frequency = true; + } + } + + bool same_min_frequency = false; + if (joint_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) { + if (processor_i->min_frequency != processor_j->min_frequency) { + return false; + } else { + same_min_frequency = true; + } + } + + if ((joint_flags & CPUINFO_ARM_LINUX_VALID_MIDR) == CPUINFO_ARM_LINUX_VALID_MIDR) { + if (processor_i->midr == processor_j->midr) { + if (midr_is_cortex_a53(processor_i->midr)) { + return same_min_frequency & same_max_frequency; + } else { + return true; + } + } + } + + return same_max_frequency && same_min_frequency; +} + +/* Returns true if the two processors certainly don't belong to the same cluster */ +static inline bool cpuinfo_arm_linux_processor_not_equals( + struct cpuinfo_arm_linux_processor processor_i[restrict static 1], + struct cpuinfo_arm_linux_processor processor_j[restrict static 1]) +{ + const uint32_t joint_flags = processor_i->flags & processor_j->flags; + + if (joint_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + if (processor_i->max_frequency != processor_j->max_frequency) { + return true; + } + } + + if (joint_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) { + if (processor_i->min_frequency != processor_j->min_frequency) { + return true; + } + } + + if ((joint_flags & CPUINFO_ARM_LINUX_VALID_MIDR) == CPUINFO_ARM_LINUX_VALID_MIDR) { + if (processor_i->midr != processor_j->midr) { + return true; + } + } + + return false; +} + +CPUINFO_INTERNAL bool cpuinfo_arm_linux_parse_proc_cpuinfo( + char hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX], + char revision[restrict static CPUINFO_REVISION_VALUE_MAX], + uint32_t max_processors_count, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors_count]); + +#if CPUINFO_ARCH_ARM + CPUINFO_INTERNAL bool cpuinfo_arm_linux_hwcap_from_getauxval( + uint32_t hwcap[restrict static 1], + uint32_t hwcap2[restrict static 1]); + CPUINFO_INTERNAL bool cpuinfo_arm_linux_hwcap_from_procfs( + uint32_t hwcap[restrict static 1], + uint32_t hwcap2[restrict static 1]); + + CPUINFO_INTERNAL void cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo( + uint32_t features, + uint32_t features2, + uint32_t midr, + uint32_t architecture_version, + uint32_t architecture_flags, + const struct cpuinfo_arm_chipset chipset[restrict static 1], + struct cpuinfo_arm_isa isa[restrict static 1]); +#elif CPUINFO_ARCH_ARM64 + CPUINFO_INTERNAL void cpuinfo_arm_linux_hwcap_from_getauxval( + uint32_t hwcap[restrict static 1], + uint32_t hwcap2[restrict static 1]); + + CPUINFO_INTERNAL void cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo( + uint32_t features, + uint32_t features2, + uint32_t midr, + const struct cpuinfo_arm_chipset chipset[restrict static 1], + struct cpuinfo_arm_isa isa[restrict static 1]); +#endif + +#ifdef __ANDROID__ + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_android_decode_chipset( + const struct cpuinfo_android_properties properties[restrict static 1], + uint32_t cores, + uint32_t max_cpu_freq_max); +#else + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_linux_decode_chipset( + const char hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX], + const char revision[restrict static CPUINFO_REVISION_VALUE_MAX], + uint32_t cores, + uint32_t max_cpu_freq_max); +#endif + +CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_linux_decode_chipset_from_proc_cpuinfo_hardware( + const char proc_cpuinfo_hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX], + uint32_t cores, uint32_t max_cpu_freq_max, bool is_tegra); + +#ifdef __ANDROID__ + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_android_decode_chipset_from_ro_product_board( + const char ro_product_board[restrict static CPUINFO_BUILD_PROP_VALUE_MAX], + uint32_t cores, uint32_t max_cpu_freq_max); + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_android_decode_chipset_from_ro_board_platform( + const char ro_board_platform[restrict static CPUINFO_BUILD_PROP_VALUE_MAX], + uint32_t cores, uint32_t max_cpu_freq_max); + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_android_decode_chipset_from_ro_mediatek_platform( + const char ro_mediatek_platform[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]); + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_android_decode_chipset_from_ro_arch( + const char ro_arch[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]); + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_android_decode_chipset_from_ro_chipname( + const char ro_chipname[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]); + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_android_decode_chipset_from_ro_hardware_chipname( + const char ro_hardware_chipname[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]); +#else + CPUINFO_INTERNAL struct cpuinfo_arm_chipset + cpuinfo_arm_linux_decode_chipset_from_proc_cpuinfo_revision( + const char proc_cpuinfo_revision[restrict static CPUINFO_REVISION_VALUE_MAX]); +#endif + +CPUINFO_INTERNAL bool cpuinfo_arm_linux_detect_core_clusters_by_heuristic( + uint32_t usable_processors, + uint32_t max_processors, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors]); + +CPUINFO_INTERNAL void cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan( + uint32_t max_processors, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors]); + +CPUINFO_INTERNAL void cpuinfo_arm_linux_count_cluster_processors( + uint32_t max_processors, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors]); + +CPUINFO_INTERNAL uint32_t cpuinfo_arm_linux_detect_cluster_midr( + const struct cpuinfo_arm_chipset chipset[restrict static 1], + uint32_t max_processors, + uint32_t usable_processors, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors]); + +extern CPUINFO_INTERNAL const uint32_t* cpuinfo_linux_cpu_to_uarch_index_map; +extern CPUINFO_INTERNAL uint32_t cpuinfo_linux_cpu_to_uarch_index_map_entries; diff --git a/source/3rdparty/cpuinfo/src/arm/linux/chipset.c b/source/3rdparty/cpuinfo/src/arm/linux/chipset.c new file mode 100644 index 0000000..f369fba --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/chipset.c @@ -0,0 +1,3937 @@ +#include +#include +#include +#include + +#include +#ifdef __ANDROID__ + #include +#endif +#include +#include + + +static inline bool is_ascii_whitespace(char c) { + switch (c) { + case ' ': + case '\t': + case '\r': + case '\n': + return true; + default: + return false; + } +} + +static inline bool is_ascii_alphabetic(char c) { + const char lower_c = c | '\x20'; + return (uint8_t) (lower_c - 'a') <= (uint8_t) ('z' - 'a'); +} + +static inline bool is_ascii_alphabetic_uppercase(char c) { + return (uint8_t) (c - 'A') <= (uint8_t) ('Z' - 'A'); +} + +static inline bool is_ascii_numeric(char c) { + return (uint8_t) (c - '0') < 10; +} + +static inline uint16_t load_u16le(const void* ptr) { +#if defined(__ARM_ARCH_7A__) || defined(__aarch64__) + return *((const uint16_t*) ptr); +#else + const uint8_t* byte_ptr = (const uint8_t*) ptr; + return ((uint16_t) byte_ptr[1] << 8) | (uint16_t) byte_ptr[0]; +#endif +} + +static inline uint32_t load_u24le(const void* ptr) { +#if defined(__ARM_ARCH_7A__) || defined(__aarch64__) + return ((uint32_t) ((const uint8_t*) ptr)[2] << 16) | ((uint32_t) *((const uint16_t*) ptr)); +#else + const uint8_t* byte_ptr = (const uint8_t*) ptr; + return ((uint32_t) byte_ptr[2] << 16) | ((uint32_t) byte_ptr[1] << 8) | (uint32_t) byte_ptr[0]; +#endif +} + +static inline uint32_t load_u32le(const void* ptr) { +#if defined(__ARM_ARCH_7A__) || defined(__aarch64__) + return *((const uint32_t*) ptr); +#else + return ((uint32_t) ((const uint8_t*) ptr)[3] << 24) | load_u24le(ptr); +#endif +} + +/* + * Map from ARM chipset series ID to ARM chipset vendor ID. + * This map is used to avoid storing vendor IDs in tables. + */ +static enum cpuinfo_arm_chipset_vendor chipset_series_vendor[cpuinfo_arm_chipset_series_max] = { + [cpuinfo_arm_chipset_series_unknown] = cpuinfo_arm_chipset_vendor_unknown, + [cpuinfo_arm_chipset_series_qualcomm_qsd] = cpuinfo_arm_chipset_vendor_qualcomm, + [cpuinfo_arm_chipset_series_qualcomm_msm] = cpuinfo_arm_chipset_vendor_qualcomm, + [cpuinfo_arm_chipset_series_qualcomm_apq] = cpuinfo_arm_chipset_vendor_qualcomm, + [cpuinfo_arm_chipset_series_qualcomm_snapdragon] = cpuinfo_arm_chipset_vendor_qualcomm, + [cpuinfo_arm_chipset_series_mediatek_mt] = cpuinfo_arm_chipset_vendor_mediatek, + [cpuinfo_arm_chipset_series_samsung_exynos] = cpuinfo_arm_chipset_vendor_samsung, + [cpuinfo_arm_chipset_series_hisilicon_k3v] = cpuinfo_arm_chipset_vendor_hisilicon, + [cpuinfo_arm_chipset_series_hisilicon_hi] = cpuinfo_arm_chipset_vendor_hisilicon, + [cpuinfo_arm_chipset_series_hisilicon_kirin] = cpuinfo_arm_chipset_vendor_hisilicon, + [cpuinfo_arm_chipset_series_actions_atm] = cpuinfo_arm_chipset_vendor_actions, + [cpuinfo_arm_chipset_series_allwinner_a] = cpuinfo_arm_chipset_vendor_allwinner, + [cpuinfo_arm_chipset_series_amlogic_aml] = cpuinfo_arm_chipset_vendor_amlogic, + [cpuinfo_arm_chipset_series_amlogic_s] = cpuinfo_arm_chipset_vendor_amlogic, + [cpuinfo_arm_chipset_series_broadcom_bcm] = cpuinfo_arm_chipset_vendor_broadcom, + [cpuinfo_arm_chipset_series_lg_nuclun] = cpuinfo_arm_chipset_vendor_lg, + [cpuinfo_arm_chipset_series_leadcore_lc] = cpuinfo_arm_chipset_vendor_leadcore, + [cpuinfo_arm_chipset_series_marvell_pxa] = cpuinfo_arm_chipset_vendor_marvell, + [cpuinfo_arm_chipset_series_mstar_6a] = cpuinfo_arm_chipset_vendor_mstar, + [cpuinfo_arm_chipset_series_novathor_u] = cpuinfo_arm_chipset_vendor_novathor, + [cpuinfo_arm_chipset_series_nvidia_tegra_t] = cpuinfo_arm_chipset_vendor_nvidia, + [cpuinfo_arm_chipset_series_nvidia_tegra_ap] = cpuinfo_arm_chipset_vendor_nvidia, + [cpuinfo_arm_chipset_series_nvidia_tegra_sl] = cpuinfo_arm_chipset_vendor_nvidia, + [cpuinfo_arm_chipset_series_pinecone_surge_s] = cpuinfo_arm_chipset_vendor_pinecone, + [cpuinfo_arm_chipset_series_renesas_mp] = cpuinfo_arm_chipset_vendor_renesas, + [cpuinfo_arm_chipset_series_rockchip_rk] = cpuinfo_arm_chipset_vendor_rockchip, + [cpuinfo_arm_chipset_series_spreadtrum_sc] = cpuinfo_arm_chipset_vendor_spreadtrum, + [cpuinfo_arm_chipset_series_telechips_tcc] = cpuinfo_arm_chipset_vendor_telechips, + [cpuinfo_arm_chipset_series_texas_instruments_omap] = cpuinfo_arm_chipset_vendor_texas_instruments, + [cpuinfo_arm_chipset_series_wondermedia_wm] = cpuinfo_arm_chipset_vendor_wondermedia, +}; + +/** + * Tries to match /(MSM|APQ)\d{4}([A-Z\-]*)/ signature (case-insensitive) for Qualcomm MSM and APQ chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board, ro.board.platform + * or ro.chipname) to match. + * @param end - end of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board, ro.board.platform or + * ro.chipname) to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_msm_apq( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect at least 7 symbols: 3 symbols "MSM" or "APQ" + 4 digits */ + if (start + 7 > end) { + return false; + } + + /* Check that string starts with "MSM" or "APQ", case-insensitive. + * The first three characters are loaded as 24-bit little endian word, binary ORed with 0x20 to convert to lower + * case, and compared to "MSM" and "APQ" strings as integers. + */ + const uint32_t series_signature = UINT32_C(0x00202020) | load_u24le(start); + enum cpuinfo_arm_chipset_series series; + switch (series_signature) { + case UINT32_C(0x6D736D): /* "msm" = reverse("msm") */ + series = cpuinfo_arm_chipset_series_qualcomm_msm; + break; + case UINT32_C(0x717061): /* "qpa" = reverse("apq") */ + series = cpuinfo_arm_chipset_series_qualcomm_apq; + break; + default: + return false; + } + + /* Sometimes there is a space ' ' following the MSM/APQ series */ + const char* pos = start + 3; + if (*pos == ' ') { + pos++; + + /* Expect at least 4 more symbols (4-digit model number) */ + if (pos + 4 > end) { + return false; + } + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 0; i < 4; i++) { + const uint32_t digit = (uint32_t) (uint8_t) (*pos++) - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Suffix is optional, so if we got to this point, parsing is successful. Commit parsed chipset. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_qualcomm, + .series = series, + .model = model, + }; + + /* Parse as many suffix characters as match the pattern [A-Za-z\-] */ + for (uint32_t i = 0; i < CPUINFO_ARM_CHIPSET_SUFFIX_MAX; i++) { + if (pos + i == end) { + break; + } + + const char c = pos[i]; + if (is_ascii_alphabetic(c)) { + /* Matched a letter [A-Za-z] */ + chipset->suffix[i] = c & '\xDF'; + } else if (c == '-') { + /* Matched a dash '-' */ + chipset->suffix[i] = c; + } else { + /* Neither of [A-Za-z\-] */ + break; + } + } + return true; +} + +/** + * Tries to match /SDM\d{3}$/ signature for Qualcomm Snapdragon chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_sdm( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect exactly 6 symbols: 3 symbols "SDM" + 3 digits */ + if (start + 6 != end) { + return false; + } + + /* Check that string starts with "SDM". + * The first three characters are loaded and compared as 24-bit little endian word. + */ + const uint32_t expected_sdm = load_u24le(start); + if (expected_sdm != UINT32_C(0x004D4453) /* "MDS" = reverse("SDM") */) { + return false; + } + + /* Validate and parse 3-digit model number */ + uint32_t model = 0; + for (uint32_t i = 3; i < 6; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Return parsed chipset. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_qualcomm, + .series = cpuinfo_arm_chipset_series_qualcomm_snapdragon, + .model = model, + }; + return true; +} + +/** + * Tries to match /SM\d{4}$/ signature for Qualcomm Snapdragon chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_sm( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect exactly 6 symbols: 2 symbols "SM" + 4 digits */ + if (start + 6 != end) { + return false; + } + + /* Check that string starts with "SM". + * The first three characters are loaded and compared as 16-bit little endian word. + */ + const uint32_t expected_sm = load_u16le(start); + if (expected_sm != UINT16_C(0x4D53) /* "MS" = reverse("SM") */) { + return false; + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 2; i < 6; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Return parsed chipset. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_qualcomm, + .series = cpuinfo_arm_chipset_series_qualcomm_snapdragon, + .model = model, + }; + return true; +} + + +struct special_map_entry { + const char* platform; + uint16_t model; + uint8_t series; + char suffix; +}; + +static const struct special_map_entry qualcomm_hardware_map_entries[] = { + { + /* "Kona" -> Qualcomm Kona */ + .platform = "Kona", + .series = cpuinfo_arm_chipset_series_qualcomm_snapdragon, + .model = 865, + }, + { + /* "Bengal" -> Qualcomm Bengal */ + .platform = "Bengal", + .series = cpuinfo_arm_chipset_series_qualcomm_snapdragon, + .model = 662, + }, + { + /* "Bengalp" -> Qualcomm Bengalp */ + .platform = "Bengalp", + .series = cpuinfo_arm_chipset_series_qualcomm_snapdragon, + .model = 662, + }, + { + /* "Lito" -> Qualcomm Lito */ + .platform = "Lito", + .series = cpuinfo_arm_chipset_series_qualcomm_snapdragon, + .model = 765, + .suffix = 'G' + }, + { + /* "Lagoon" -> Qualcomm Lagoon */ + .platform = "Lagoon", + .series = cpuinfo_arm_chipset_series_qualcomm_snapdragon, + .model = 0, + }, +}; + + +int strcicmp(char const *a, char const *b) +{ + for (;; a++, b++) { + int d = tolower((unsigned char)*a) - tolower((unsigned char)*b); + if (d != 0 || !*a) + return d; + } +} + +static bool match_qualcomm_special( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + for (size_t i = 0; i < CPUINFO_COUNT_OF(qualcomm_hardware_map_entries); i++) { + int length = end - start; + if (strcicmp(qualcomm_hardware_map_entries[i].platform, start) == 0 && + qualcomm_hardware_map_entries[i].platform[length] == 0) + { + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = chipset_series_vendor[qualcomm_hardware_map_entries[i].series], + .series = (enum cpuinfo_arm_chipset_series) qualcomm_hardware_map_entries[i].series, + .model = qualcomm_hardware_map_entries[i].model, + .suffix = { + [0] = qualcomm_hardware_map_entries[i].suffix, + }, + }; + return true; + } + } + return false; + +} + +/** + * Tries to match /Samsung Exynos\d{4}$/ signature (case-insensitive) for Samsung Exynos chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_samsung_exynos( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* + * Expect at 18-19 symbols: + * - "Samsung" (7 symbols) + space + "Exynos" (6 symbols) + optional space 4-digit model number + */ + const size_t length = end - start; + switch (length) { + case 18: + case 19: + break; + default: + return false; + } + + /* + * Check that the string starts with "samsung exynos", case-insensitive. + * Blocks of 4 characters are loaded and compared as little-endian 32-bit word. + * Case-insensitive characters are binary ORed with 0x20 to convert them to lowercase. + */ + const uint32_t expected_sams = UINT32_C(0x20202000) | load_u32le(start); + if (expected_sams != UINT32_C(0x736D6153) /* "smaS" = reverse("Sams") */) { + return false; + } + const uint32_t expected_ung = UINT32_C(0x00202020) | load_u32le(start + 4); + if (expected_ung != UINT32_C(0x20676E75) /* " ung" = reverse("ung ") */) { + return false; + } + const uint32_t expected_exyn = UINT32_C(0x20202000) | load_u32le(start + 8); + if (expected_exyn != UINT32_C(0x6E797845) /* "nyxE" = reverse("Exyn") */) { + return false; + } + const uint16_t expected_os = UINT16_C(0x2020) | load_u16le(start + 12); + if (expected_os != UINT16_C(0x736F) /* "so" = reverse("os") */) { + return false; + } + + const char* pos = start + 14; + + /* There can be a space ' ' following the "Exynos" string */ + if (*pos == ' ') { + pos++; + + /* If optional space if present, we expect exactly 19 characters */ + if (length != 19) { + return false; + } + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 0; i < 4; i++) { + const uint32_t digit = (uint32_t) (uint8_t) (*pos++) - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Return parsed chipset */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_samsung, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = model, + }; + return true; +} + +/** + * Tries to match /exynos\d{4}$/ signature for Samsung Exynos chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the platform identifier (ro.board.platform or ro.chipname) to match. + * @param end - end of the platform identifier (ro.board.platform or ro.chipname) to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_exynos( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect exactly 10 symbols: "exynos" (6 symbols) + 4-digit model number */ + if (start + 10 != end) { + return false; + } + + /* Load first 4 bytes as little endian 32-bit word */ + const uint32_t expected_exyn = load_u32le(start); + if (expected_exyn != UINT32_C(0x6E797865) /* "nyxe" = reverse("exyn") */ ) { + return false; + } + + /* Load next 2 bytes as little endian 16-bit word */ + const uint16_t expected_os = load_u16le(start + 4); + if (expected_os != UINT16_C(0x736F) /* "so" = reverse("os") */ ) { + return false; + } + + /* Check and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 6; i < 10; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Return parsed chipset. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_samsung, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = model, + }; + return true; +} + +/** + * Tries to match /universal\d{4}$/ signature for Samsung Exynos chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board or ro.chipname) + * to match. + * @param end - end of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board or ro.chipname) + * to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_universal( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect exactly 13 symbols: "universal" (9 symbols) + 4-digit model number */ + if (start + 13 != end) { + return false; + } + + /* + * Check that the string starts with "universal". + * Blocks of 4 characters are loaded and compared as little-endian 32-bit word. + * Case-insensitive characters are binary ORed with 0x20 to convert them to lowercase. + */ + const uint8_t expected_u = UINT8_C(0x20) | (uint8_t) start[0]; + if (expected_u != UINT8_C(0x75) /* "u" */) { + return false; + } + const uint32_t expected_nive = UINT32_C(0x20202020) | load_u32le(start + 1); + if (expected_nive != UINT32_C(0x6576696E) /* "evin" = reverse("nive") */ ) { + return false; + } + const uint32_t expected_ersa = UINT32_C(0x20202020) | load_u32le(start + 5); + if (expected_ersa != UINT32_C(0x6C617372) /* "lasr" = reverse("rsal") */) { + return false; + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 9; i < 13; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Return parsed chipset. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_samsung, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = model, + }; + return true; +} + +/** + * Compares, case insensitively, a string to known values "SMDK4210" and "SMDK4x12" for Samsung Exynos chipsets. + * If platform identifier matches one of the SMDK* values, extracts model information into \p chipset argument. + * For "SMDK4x12" match, decodes the chipset name using number of cores. + * + * @param start - start of the platform identifier (/proc/cpuinfo Hardware string or ro.product.board) to match. + * @param end - end of the platform identifier (/proc/cpuinfo Hardware string or ro.product.board) to match. + * @param cores - number of cores in the chipset. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_and_parse_smdk( + const char* start, const char* end, uint32_t cores, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect exactly 8 symbols: "SMDK" (4 symbols) + 4-digit model number */ + if (start + 8 != end) { + return false; + } + + /* + * Check that string starts with "MT" (case-insensitive). + * The first four characters are loaded as a 32-bit little endian word and converted to lowercase. + */ + const uint32_t expected_smdk = UINT32_C(0x20202020) | load_u32le(start); + if (expected_smdk != UINT32_C(0x6B646D73) /* "kdms" = reverse("smdk") */) { + return false; + } + + /* + * Check that string ends with "4210" or "4x12". + * The last four characters are loaded and compared as a 32-bit little endian word. + */ + uint32_t model = 0; + const uint32_t expected_model = load_u32le(start + 4); + switch (expected_model) { + case UINT32_C(0x30313234): /* "0124" = reverse("4210") */ + model = 4210; + break; + case UINT32_C(0x32317834): /* "21x4" = reverse("4x12") */ + switch (cores) { + case 2: + model = 4212; + break; + case 4: + model = 4412; + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core Exynos 4x12 chipset", cores); + } + } + + if (model == 0) { + return false; + } + + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_samsung, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = model, + }; + return true; +} + +/** + * Tries to match /MTK?\d{4}[A-Z/]*$/ signature for MediaTek MT chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board, ro.board.platform, + * ro.mediatek.platform, or ro.chipname) to match. + * @param end - end of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board, ro.board.platform, + * ro.mediatek.platform, or ro.chipname) to match. + * @param match_end - indicates if the function should attempt to match through the end of the string and fail if there + * are unparsed characters in the end, or match only MTK signature, model number, and some of the + * suffix characters (the ones that pass validation). + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_mt( + const char* start, const char* end, bool match_end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect at least 6 symbols: "MT" (2 symbols) + 4-digit model number */ + if (start + 6 > end) { + return false; + } + + /* + * Check that string starts with "MT" (case-insensitive). + * The first two characters are loaded as 16-bit little endian word and converted to lowercase. + */ + const uint16_t mt = UINT16_C(0x2020) | load_u16le(start); + if (mt != UINT16_C(0x746D) /* "tm" */) { + return false; + } + + + /* Some images report "MTK" rather than "MT" */ + const char* pos = start + 2; + if (((uint8_t) *pos | UINT8_C(0x20)) == (uint8_t) 'k') { + pos++; + + /* Expect 4 more symbols after "MTK" (4-digit model number) */ + if (pos + 4 > end) { + return false; + } + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 0; i < 4; i++) { + const uint32_t digit = (uint32_t) (uint8_t) (*pos++) - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Record parsed chipset. This implicitly zeroes-out suffix, which will be parsed later. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_mediatek, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = model, + }; + + if (match_end) { + /* Check that the potential suffix does not exceed maximum length */ + const size_t suffix_length = end - pos; + if (suffix_length > CPUINFO_ARM_CHIPSET_SUFFIX_MAX) { + return false; + } + + /* Validate suffix characters and copy them to chipset structure */ + for (size_t i = 0; i < suffix_length; i++) { + const char c = (*pos++); + if (is_ascii_alphabetic(c)) { + /* Matched a letter [A-Za-z], convert to uppercase */ + chipset->suffix[i] = c & '\xDF'; + } else if (c == '/') { + /* Matched a slash '/' */ + chipset->suffix[i] = c; + } else { + /* Invalid suffix character (neither of [A-Za-z/]) */ + return false; + } + } + } else { + /* Validate and parse as many suffix characters as we can */ + for (size_t i = 0; i < CPUINFO_ARM_CHIPSET_SUFFIX_MAX; i++) { + if (pos + i == end) { + break; + } + + const char c = pos[i]; + if (is_ascii_alphabetic(c)) { + /* Matched a letter [A-Za-z], convert to uppercase */ + chipset->suffix[i] = c & '\xDF'; + } else if (c == '/') { + /* Matched a slash '/' */ + chipset->suffix[i] = c; + } else { + /* Invalid suffix character (neither of [A-Za-z/]). This marks the end of the suffix. */ + break; + } + } + } + /* All suffix characters successfully validated and copied to chipset data */ + return true; +} + +/** + * Tries to match /[Kk]irin\s?\d{3}$/ signature for HiSilicon Kirin chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_kirin( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect 8-9 symbols: "Kirin" (5 symbols) + optional whitespace (1 symbol) + 3-digit model number */ + const size_t length = end - start; + switch (length) { + case 8: + case 9: + break; + default: + return false; + } + + /* Check that the string starts with "Kirin" or "kirin". */ + if (((uint8_t) start[0] | UINT8_C(0x20)) != (uint8_t) 'k') { + return false; + } + /* Symbols 1-5 are loaded and compared as little-endian 32-bit word. */ + const uint32_t irin = load_u32le(start + 1); + if (irin != UINT32_C(0x6E697269) /* "niri" = reverse("irin") */) { + return false; + } + + /* Check for optional whitespace after "Kirin" */ + if (is_ascii_whitespace(start[5])) { + /* When whitespace is present after "Kirin", expect 9 symbols total */ + if (length != 9) { + return false; + } + } + + /* Validate and parse 3-digit model number */ + uint32_t model = 0; + for (int32_t i = 0; i < 3; i++) { + const uint32_t digit = (uint32_t) (uint8_t) end[i - 3] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* + * Thats it, return parsed chipset. + * Technically, Kirin 910T has a suffix, but it never appears in the form of "910T" string. + * Instead, Kirin 910T devices report "hi6620oem" string (handled outside of this function). + */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_hisilicon, + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = model, + }; + return true; +} + +/** + * Tries to match /rk\d{4}[a-z]?$/ signature for Rockchip RK chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the platform identifier (/proc/cpuinfo Hardware string or ro.board.platform) to match. + * @param end - end of the platform identifier (/proc/cpuinfo Hardware string or ro.board.platform) to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_rk( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect 6-7 symbols: "RK" (2 symbols) + 4-digit model number + optional 1-letter suffix */ + const size_t length = end - start; + switch (length) { + case 6: + case 7: + break; + default: + return false; + } + + /* + * Check that string starts with "RK" (case-insensitive). + * The first two characters are loaded as 16-bit little endian word and converted to lowercase. + */ + const uint16_t expected_rk = UINT16_C(0x2020) | load_u16le(start); + if (expected_rk != UINT16_C(0x6B72) /* "kr" = reverse("rk") */) { + return false; + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 2; i < 6; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Parse optional suffix */ + char suffix = 0; + if (length == 7) { + /* Parse the suffix letter */ + const char c = start[6]; + if (is_ascii_alphabetic(c)) { + /* Convert to upper case */ + suffix = c & '\xDF'; + } else { + /* Invalid suffix character */ + return false; + } + } + + /* Return parsed chipset */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_rockchip, + .series = cpuinfo_arm_chipset_series_rockchip_rk, + .model = model, + .suffix = { + [0] = suffix, + }, + }; + return true; +} + +/** + * Tries to match, case-insentitively, /s[cp]\d{4}[a-z]*|scx15$/ signature for Spreadtrum SC chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board, + * ro.board.platform, or ro.chipname) to match. + * @param end - end of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board, + * ro.board.platform, or ro.chipname) to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_sc( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect at least 5 symbols: "scx15" */ + if (start + 5 > end) { + return false; + } + + /* + * Check that string starts with "S[CP]" (case-insensitive). + * The first two characters are loaded as 16-bit little endian word and converted to lowercase. + */ + const uint16_t expected_sc_or_sp = UINT16_C(0x2020) | load_u16le(start); + switch (expected_sc_or_sp) { + case UINT16_C(0x6373): /* "cs" = reverse("sc") */ + case UINT16_C(0x7073): /* "ps" = reverse("sp") */ + break; + default: + return false; + } + + /* Special case: "scx" prefix (SC7715 reported as "scx15") */ + if ((start[2] | '\x20') == 'x') { + /* Expect exactly 5 characters: "scx15" */ + if (start + 5 != end) { + return false; + } + + /* Check that string ends with "15" */ + const uint16_t expected_15 = load_u16le(start + 3); + if (expected_15 != UINT16_C(0x3531) /* "51" = reverse("15") */ ) { + return false; + } + + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_spreadtrum, + .series = cpuinfo_arm_chipset_series_spreadtrum_sc, + .model = 7715, + }; + return true; + } + + /* Expect at least 6 symbols: "S[CP]" (2 symbols) + 4-digit model number */ + if (start + 6 > end) { + return false; + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 2; i < 6; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Write parsed chipset */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_spreadtrum, + .series = cpuinfo_arm_chipset_series_spreadtrum_sc, + .model = model, + }; + + /* Validate and copy suffix letters. If suffix is too long, truncate at CPUINFO_ARM_CHIPSET_SUFFIX_MAX letters. */ + const char* suffix = start + 6; + for (size_t i = 0; i < CPUINFO_ARM_CHIPSET_SUFFIX_MAX; i++) { + if (suffix + i == end) { + break; + } + + const char c = suffix[i]; + if (!is_ascii_alphabetic(c)) { + /* Invalid suffix character */ + return false; + } + /* Convert suffix letter to uppercase */ + chipset->suffix[i] = c & '\xDF'; + } + return true; +} + +/** + * Tries to match /lc\d{4}[a-z]?$/ signature for Leadcore LC chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the platform identifier (ro.product.board or ro.board.platform) to match. + * @param end - end of the platform identifier (ro.product.board or ro.board.platform) to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_lc( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect at 6-7 symbols: "lc" (2 symbols) + 4-digit model number + optional 1-letter suffix */ + const size_t length = end - start; + switch (length) { + case 6: + case 7: + break; + default: + return false; + } + + /* Check that string starts with "lc". The first two characters are loaded as 16-bit little endian word */ + const uint16_t expected_lc = load_u16le(start); + if (expected_lc != UINT16_C(0x636C) /* "cl" = reverse("lc") */) { + return false; + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 2; i < 6; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Parse optional suffix letter */ + char suffix = 0; + if (length == 7) { + const char c = start[6]; + if (is_ascii_alphabetic(c)) { + /* Convert to uppercase */ + chipset->suffix[0] = c & '\xDF'; + } else { + /* Invalid suffix character */ + return false; + } + } + + /* Return parsed chipset */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_leadcore, + .series = cpuinfo_arm_chipset_series_leadcore_lc, + .model = model, + .suffix = { + [0] = suffix, + }, + }; + return true; +} + +/** + * Tries to match /PXA(\d{3,4}|1L88)$/ signature for Marvell PXA chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the platform identifier (/proc/cpuinfo Hardware string, ro.product.board or ro.chipname) + * to match. + * @param end - end of the platform identifier (/proc/cpuinfo Hardaware string, ro.product.board or ro.chipname) to + * match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_pxa( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect 6-7 symbols: "PXA" (3 symbols) + 3-4 digit model number */ + const size_t length = end - start; + switch (length) { + case 6: + case 7: + break; + default: + return false; + } + + /* Check that the string starts with "PXA". Symbols 1-3 are loaded and compared as little-endian 16-bit word. */ + if (start[0] != 'P') { + return false; + } + const uint16_t expected_xa = load_u16le(start + 1); + if (expected_xa != UINT16_C(0x4158) /* "AX" = reverse("XA") */) { + return false; + } + + uint32_t model = 0; + + + /* Check for a very common typo: "PXA1L88" for "PXA1088" */ + if (length == 7) { + /* Load 4 model "number" symbols as a little endian 32-bit word and compare to "1L88" */ + const uint32_t expected_1L88 = load_u32le(start + 3); + if (expected_1L88 == UINT32_C(0x38384C31) /* "88L1" = reverse("1L88") */) { + model = 1088; + goto write_chipset; + } + } + + /* Check and parse 3-4 digit model number */ + for (uint32_t i = 3; i < length; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Return parsed chipset. */ +write_chipset: + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_marvell, + .series = cpuinfo_arm_chipset_series_marvell_pxa, + .model = model, + }; + return true; +} + +/** + * Tries to match /BCM\d{4}$/ signature for Broadcom BCM chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_bcm( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect exactly 7 symbols: "BCM" (3 symbols) + 4-digit model number */ + if (start + 7 != end) { + return false; + } + + /* Check that the string starts with "BCM". + * The first three characters are loaded and compared as a 24-bit little endian word. + */ + const uint32_t expected_bcm = load_u24le(start); + if (expected_bcm != UINT32_C(0x004D4342) /* "MCB" = reverse("BCM") */) { + return false; + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 3; i < 7; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Return parsed chipset. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_broadcom, + .series = cpuinfo_arm_chipset_series_broadcom_bcm, + .model = model, + }; + return true; +} + +/** + * Tries to match /OMAP\d{4}$/ signature for Texas Instruments OMAP chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_omap( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect exactly 8 symbols: "OMAP" (4 symbols) + 4-digit model number */ + if (start + 8 != end) { + return false; + } + + /* Check that the string starts with "OMAP". Symbols 0-4 are loaded and compared as little-endian 32-bit word. */ + const uint32_t expected_omap = load_u32le(start); + if (expected_omap != UINT32_C(0x50414D4F) /* "PAMO" = reverse("OMAP") */) { + return false; + } + + /* Validate and parse 4-digit model number */ + uint32_t model = 0; + for (uint32_t i = 4; i < 8; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Return parsed chipset. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_texas_instruments, + .series = cpuinfo_arm_chipset_series_texas_instruments_omap, + .model = model, + }; + return true; +} + +/** + * Compares platform identifier string to known values for Broadcom chipsets. + * If the string matches one of the known values, the function decodes Broadcom chipset from frequency and number of + * cores into \p chipset argument. + * + * @param start - start of the platform identifier (ro.product.board or ro.board.platform) to match. + * @param end - end of the platform identifier (ro.product.board or ro.board.platform) to match. + * @param cores - number of cores in the chipset. + * @param max_cpu_freq_max - maximum of /sys/devices/system/cpu/cpu/cpofreq/cpu_freq_max values. + * @param[out] chipset - location where chipset information will be stored upon a successful match and decoding. + * + * @returns true if signature matched (even if exact model can't be decoded), false otherwise. + */ +static bool match_and_parse_broadcom( + const char* start, const char* end, uint32_t cores, uint32_t max_cpu_freq_max, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect 4-6 symbols: "java" (4 symbols), "rhea" (4 symbols), "capri" (5 symbols), or "hawaii" (6 symbols) */ + const size_t length = end - start; + switch (length) { + case 4: + case 5: + case 6: + break; + default: + return false; + } + + /* + * Compare the platform identifier to known values for Broadcom chipsets: + * - "rhea" + * - "java" + * - "capri" + * - "hawaii" + * Upon a successful match, decode chipset name from frequency and number of cores. + */ + uint32_t model = 0; + char suffix = 0; + const uint32_t expected_platform = load_u32le(start); + switch (expected_platform) { + case UINT32_C(0x61656872): /* "aehr" = reverse("rhea") */ + if (length == 4) { + /* + * Detected "rhea" platform: + * - 1 core @ 849999 KHz -> BCM21654 + * - 1 core @ 999999 KHz -> BCM21654G + */ + if (cores == 1) { + model = 21654; + if (max_cpu_freq_max >= 999999) { + suffix = 'G'; + } + } + } + break; + case UINT32_C(0x6176616A): /* "avaj" = reverse("java") */ + if (length == 4) { + /* + * Detected "java" platform: + * - 4 cores -> BCM23550 + */ + if (cores == 4) { + model = 23550; + } + } + break; + case UINT32_C(0x61776168): /* "awah" = reverse("hawa") */ + if (length == 6) { + /* Check that string equals "hawaii" */ + const uint16_t expected_ii = load_u16le(start + 4); + if (expected_ii == UINT16_C(0x6969) /* "ii" */ ) { + /* + * Detected "hawaii" platform: + * - 1 core -> BCM21663 + * - 2 cores @ 999999 KHz -> BCM21664 + * - 2 cores @ 1200000 KHz -> BCM21664T + */ + switch (cores) { + case 1: + model = 21663; + break; + case 2: + model = 21664; + if (max_cpu_freq_max >= 1200000) { + suffix = 'T'; + } + break; + } + } + } + break; + case UINT32_C(0x72706163): /* "rpac" = reverse("capr") */ + if (length == 5) { + /* Check that string equals "capri" */ + if (start[4] == 'i') { + /* + * Detected "capri" platform: + * - 2 cores -> BCM28155 + */ + if (cores == 2) { + model = 28155; + } + } + } + break; + } + + if (model != 0) { + /* Chipset was successfully decoded */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_broadcom, + .series = cpuinfo_arm_chipset_series_broadcom_bcm, + .model = model, + .suffix = { + [0] = suffix, + }, + }; + } + return model != 0; +} + +struct sunxi_map_entry { + uint8_t sunxi; + uint8_t cores; + uint8_t model; + char suffix; +}; + +static const struct sunxi_map_entry sunxi_map_entries[] = { +#if CPUINFO_ARCH_ARM + { + /* ("sun4i", 1) -> "A10" */ + .sunxi = 4, + .cores = 1, + .model = 10, + }, + { + /* ("sun5i", 1) -> "A13" */ + .sunxi = 5, + .cores = 1, + .model = 13, + }, + { + /* ("sun6i", 4) -> "A31" */ + .sunxi = 6, + .cores = 4, + .model = 31, + }, + { + /* ("sun7i", 2) -> "A20" */ + .sunxi = 7, + .cores = 2, + .model = 20, + + }, + { + /* ("sun8i", 2) -> "A23" */ + .sunxi = 8, + .cores = 2, + .model = 23, + }, + { + /* ("sun8i", 4) -> "A33" */ + .sunxi = 8, + .cores = 4, + .model = 33, + }, + { + /* ("sun8i", 8) -> "A83T" */ + .sunxi = 8, + .cores = 8, + .model = 83, + .suffix = 'T', + }, + { + /* ("sun9i", 8) -> "A80" */ + .sunxi = 9, + .cores = 8, + .model = 80, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* ("sun50i", 4) -> "A64" */ + .sunxi = 50, + .cores = 4, + .model = 64, + }, +}; + +/** + * Tries to match /proc/cpuinfo Hardware string to Allwinner /sun\d+i/ signature. + * If the string matches signature, the function decodes Allwinner chipset from the number in the signature and the + * number of cores, and stores it in \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param cores - number of cores in the chipset. + * @param[out] chipset - location where chipset information will be stored upon a successful match and decoding. + * + * @returns true if signature matched (even if exact model can't be decoded), false otherwise. + */ +static bool match_and_parse_sunxi( + const char* start, const char* end, uint32_t cores, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect at least 5 symbols: "sun" (3 symbols) + platform id (1-2 digits) + "i" (1 symbol) */ + if (start + 5 > end) { + return false; + } + + /* Compare the first 3 characters to "sun" */ + if (start[0] != 's') { + return false; + } + const uint16_t expected_un = load_u16le(start + 1); + if (expected_un != UINT16_C(0x6E75) /* "nu" = reverse("un") */) { + return false; + } + + /* Check and parse the first (required) digit of the sunXi platform id */ + uint32_t sunxi_platform = 0; + { + const uint32_t digit = (uint32_t) (uint8_t) start[3] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + sunxi_platform = digit; + } + + /* Parse optional second digit of the sunXi platform id */ + const char* pos = start + 4; + { + const uint32_t digit = (uint32_t) (uint8_t) (*pos) - '0'; + if (digit < 10) { + sunxi_platform = sunxi_platform * 10 + digit; + if (++pos == end) { + /* Expected one more character, final 'i' letter */ + return false; + } + } + } + + /* Validate the final 'i' letter */ + if (*pos != 'i') { + return false; + } + + /* Compare sunXi platform id and number of cores to tabulated values to decode chipset name */ + uint32_t model = 0; + char suffix = 0; + for (size_t i = 0; i < CPUINFO_COUNT_OF(sunxi_map_entries); i++) { + if (sunxi_platform == sunxi_map_entries[i].sunxi && cores == sunxi_map_entries[i].cores) { + model = sunxi_map_entries[i].model; + suffix = sunxi_map_entries[i].suffix; + break; + } + } + + if (model == 0) { + cpuinfo_log_info("unrecognized %"PRIu32"-core Allwinner sun%"PRIu32" platform", cores, sunxi_platform); + } + /* Create chipset name from decoded data */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_allwinner, + .series = cpuinfo_arm_chipset_series_allwinner_a, + .model = model, + .suffix = { + [0] = suffix, + }, + }; + return true; +} + +/** + * Compares /proc/cpuinfo Hardware string to "WMT" signature. + * If the string matches signature, the function decodes WonderMedia chipset from frequency and number of cores into + * \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param cores - number of cores in the chipset. + * @param max_cpu_freq_max - maximum of /sys/devices/system/cpu/cpu/cpofreq/cpu_freq_max values. + * @param[out] chipset - location where chipset information will be stored upon a successful match and decoding. + * + * @returns true if signature matched (even if exact model can't be decoded), false otherwise. + */ +static bool match_and_parse_wmt( + const char* start, const char* end, uint32_t cores, uint32_t max_cpu_freq_max, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expected 3 symbols: "WMT" */ + if (start + 3 != end) { + return false; + } + + /* Compare string to "WMT" */ + if (start[0] != 'W') { + return false; + } + const uint16_t expected_mt = load_u16le(start + 1); + if (expected_mt != UINT16_C(0x544D) /* "TM" = reverse("MT") */) { + return false; + } + + /* Decode chipset name from frequency and number of cores */ + uint32_t model = 0; + switch (cores) { + case 1: + switch (max_cpu_freq_max) { + case 1008000: + /* 1 core @ 1008000 KHz -> WM8950 */ + model = 8950; + break; + case 1200000: + /* 1 core @ 1200000 KHz -> WM8850 */ + model = 8850; + break; + } + break; + case 2: + if (max_cpu_freq_max == 1500000) { + /* 2 cores @ 1500000 KHz -> WM8880 */ + model = 8880; + } + break; + } + + if (model == 0) { + cpuinfo_log_info("unrecognized WonderMedia platform with %"PRIu32" cores at %"PRIu32" KHz", + cores, max_cpu_freq_max); + } + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_wondermedia, + .series = cpuinfo_arm_chipset_series_wondermedia_wm, + .model = model, + }; + return true; +} + +struct huawei_map_entry { + uint32_t platform; + uint32_t model; +}; + +static const struct huawei_map_entry huawei_platform_map[] = { + { + /* "ALP" -> Kirin 970 */ + .platform = UINT32_C(0x00504C41), /* "\0PLA" = reverse("ALP\0") */ + .model = 970, + }, + { + /* "BAC" -> Kirin 659 */ + .platform = UINT32_C(0x00434142), /* "\0CAB" = reverse("BAC\0") */ + .model = 659, + }, + { + /* "BLA" -> Kirin 970 */ + .platform = UINT32_C(0x00414C42), /* "\0ALB" = reverse("BLA\0") */ + .model = 970, + }, + { + /* "BKL" -> Kirin 970 */ + .platform = UINT32_C(0x004C4B42), /* "\0LKB" = reverse("BKL\0") */ + .model = 970, + }, + { + /* "CLT" -> Kirin 970 */ + .platform = UINT32_C(0x00544C43), /* "\0TLC" = reverse("CLT\0") */ + .model = 970, + }, + { + /* "COL" -> Kirin 970 */ + .platform = UINT32_C(0x004C4F43), /* "\0LOC" = reverse("COL\0") */ + .model = 970, + }, + { + /* "COR" -> Kirin 970 */ + .platform = UINT32_C(0x00524F43), /* "\0ROC" = reverse("COR\0") */ + .model = 970, + }, + { + /* "DUK" -> Kirin 960 */ + .platform = UINT32_C(0x004B5544), /* "\0KUD" = reverse("DUK\0") */ + .model = 960, + }, + { + /* "EML" -> Kirin 970 */ + .platform = UINT32_C(0x004C4D45), /* "\0LME" = reverse("EML\0") */ + .model = 970, + }, + { + /* "EVA" -> Kirin 955 */ + .platform = UINT32_C(0x00415645), /* "\0AVE" = reverse("EVA\0") */ + .model = 955, + }, + { + /* "FRD" -> Kirin 950 */ + .platform = UINT32_C(0x00445246), /* "\0DRF" = reverse("FRD\0") */ + .model = 950, + }, + { + /* "INE" -> Kirin 710 */ + .platform = UINT32_C(0x00454E49), /* "\0ENI" = reverse("INE\0") */ + .model = 710, + }, + { + /* "KNT" -> Kirin 950 */ + .platform = UINT32_C(0x00544E4B), /* "\0TNK" = reverse("KNT\0") */ + .model = 950, + }, + { + /* "LON" -> Kirin 960 */ + .platform = UINT32_C(0x004E4F4C), /* "\0NOL" = reverse("LON\0") */ + .model = 960, + }, + { + /* "LYA" -> Kirin 980 */ + .platform = UINT32_C(0x0041594C), /* "\0AYL" = reverse("LYA\0") */ + .model = 980, + }, + { + /* "MCN" -> Kirin 980 */ + .platform = UINT32_C(0x004E434D), /* "\0NCM" = reverse("MCN\0") */ + .model = 980, + }, + { + /* "MHA" -> Kirin 960 */ + .platform = UINT32_C(0x0041484D), /* "\0AHM" = reverse("MHA\0") */ + .model = 960, + }, + { + /* "NEO" -> Kirin 970 */ + .platform = UINT32_C(0x004F454E), /* "\0OEN" = reverse("NEO\0") */ + .model = 970, + }, + { + /* "NXT" -> Kirin 950 */ + .platform = UINT32_C(0x0054584E), /* "\0TXN" = reverse("NXT\0") */ + .model = 950, + }, + { + /* "PAN" -> Kirin 980 */ + .platform = UINT32_C(0x004E4150), /* "\0NAP" = reverse("PAN\0") */ + .model = 980, + }, + { + /* "PAR" -> Kirin 970 */ + .platform = UINT32_C(0x00524150), /* "\0RAP" = reverse("PAR\0") */ + .model = 970, + }, + { + /* "RVL" -> Kirin 970 */ + .platform = UINT32_C(0x004C5652), /* "\0LVR" = reverse("RVL\0") */ + .model = 970, + }, + { + /* "STF" -> Kirin 960 */ + .platform = UINT32_C(0x00465453), /* "\0FTS" = reverse("STF\0") */ + .model = 960, + }, + { + /* "SUE" -> Kirin 980 */ + .platform = UINT32_C(0x00455553), /* "\0EUS" = reverse("SUE\0") */ + .model = 980, + }, + { + /* "VIE" -> Kirin 955 */ + .platform = UINT32_C(0x00454956), /* "\0EIV" = reverse("VIE\0") */ + .model = 955, + }, + { + /* "VKY" -> Kirin 960 */ + .platform = UINT32_C(0x00594B56), /* "\0YKV" = reverse("VKY\0") */ + .model = 960, + }, + { + /* "VTR" -> Kirin 960 */ + .platform = UINT32_C(0x00525456), /* "\0RTV" = reverse("VTR\0") */ + .model = 960, + }, +}; + +/** + * Tries to match ro.product.board string to Huawei /([A-Z]{3})(\-[A-Z]?L\d{2})$/ signature where \1 is one of the + * known values for Huawei devices, which do not report chipset name elsewhere. + * If the string matches signature, the function decodes chipset (always HiSilicon Kirin for matched devices) from + * the Huawei platform ID in the signature and stores it in \p chipset argument. + * + * @param start - start of the ro.product.board string to match. + * @param end - end of the ro.product.board string to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match and decoding. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_and_parse_huawei( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* + * Expect length of either 3, 7 or 8, exactly: + * - 3-letter platform identifier (see huawei_platform_map) + * - 3-letter platform identifier + '-' + 'L' + two digits + * - 3-letter platform identifier + '-' + capital letter + 'L' + two digits + */ + const size_t length = end - start; + switch (length) { + case 3: + case 7: + case 8: + break; + default: + return false; + } + + /* + * Try to find the first three-letter substring in among the tabulated entries for Huawei devices. + * The first three letters are loaded and compared as a little-endian 24-bit word. + */ + uint32_t model = 0; + const uint32_t target_platform_id = load_u24le(start); + for (uint32_t i = 0; i < CPUINFO_COUNT_OF(huawei_platform_map); i++) { + if (huawei_platform_map[i].platform == target_platform_id) { + model = huawei_platform_map[i].model; + break; + } + } + + if (model == 0) { + /* Platform does not match the tabulated Huawei entries */ + return false; + } + + if (length > 3) { + /* + * Check that: + * - The symbol after platform id is a dash + * - The symbol after it is an uppercase letter. For 7-symbol strings, the symbol is just 'L'. + */ + if (start[3] != '-' || !is_ascii_alphabetic_uppercase(start[4])) { + return false; + } + + /* Check that the last 3 entries are /L\d\d/ */ + if (end[-3] != 'L' || !is_ascii_numeric(end[-2]) || !is_ascii_numeric(end[-1])) { + return false; + } + } + + /* All checks succeeded, commit chipset name */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_hisilicon, + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = model, + }; + return true; +} + +/** + * Tries to match /tcc\d{3}x$/ signature for Telechips TCCXXXx chipsets. + * If match successful, extracts model information into \p chipset argument. + * + * @param start - start of the /proc/cpuinfo Hardware string to match. + * @param end - end of the /proc/cpuinfo Hardware string to match. + * @param[out] chipset - location where chipset information will be stored upon a successful match. + * + * @returns true if signature matched, false otherwise. + */ +static bool match_tcc( + const char* start, const char* end, + struct cpuinfo_arm_chipset chipset[restrict static 1]) +{ + /* Expect exactly 7 symbols: "tcc" (3 symbols) + 3-digit model number + fixed "x" suffix */ + if (start + 7 != end) { + return false; + } + + /* Quick check for the first character */ + if (start[0] != 't') { + return false; + } + + /* Load the next 2 bytes as little endian 16-bit word */ + const uint16_t expected_cc = load_u16le(start + 1); + if (expected_cc != UINT16_C(0x6363) /* "cc" */ ) { + return false; + } + + /* Check and parse 3-digit model number */ + uint32_t model = 0; + for (uint32_t i = 3; i < 6; i++) { + const uint32_t digit = (uint32_t) (uint8_t) start[i] - '0'; + if (digit >= 10) { + /* Not really a digit */ + return false; + } + model = model * 10 + digit; + } + + /* Check the fixed 'x' suffix in the end */ + if (start[6] != 'x') { + return false; + } + + /* Commit parsed chipset. */ + *chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_telechips, + .series = cpuinfo_arm_chipset_series_telechips_tcc, + .model = model, + .suffix = { + [0] = 'X' + }, + }; + return true; +} + +/* + * Compares ro.board.platform string to Nvidia Tegra signatures ("tegra" and "tegra3") + * This check has effect on how /proc/cpuinfo Hardware string is interpreted. + * + * @param start - start of the ro.board.platform string to check. + * @param end - end of the ro.board.platform string to check. + * + * @returns true if the string matches an Nvidia Tegra signature, and false otherwise + */ +static bool is_tegra(const char* start, const char* end) { + /* Expect 5 ("tegra") or 6 ("tegra3") symbols */ + const size_t length = end - start; + switch (length) { + case 5: + case 6: + break; + default: + return false; + } + + /* Check that the first 5 characters match "tegra" */ + if (start[0] != 't') { + return false; + } + const uint32_t expected_egra = load_u32le(start + 1); + if (expected_egra != UINT32_C(0x61726765) /* "arge" = reverse("egra") */) { + return false; + } + + /* Check if the string is either "tegra" (length = 5) or "tegra3" (length != 5) and last character is '3' */ + return (length == 5 || start[5] == '3'); +} + +static const struct special_map_entry special_hardware_map_entries[] = { +#if CPUINFO_ARCH_ARM + { + /* "k3v2oem1" -> HiSilicon K3V2 */ + .platform = "k3v2oem1", + .series = cpuinfo_arm_chipset_series_hisilicon_k3v, + .model = 2, + }, + { + /* "hi6620oem" -> HiSilicon Kirin 910T */ + .platform = "hi6620oem", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 910, + .suffix = 'T' + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "hi6250" -> HiSilicon Kirin 650 */ + .platform = "hi6250", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 650, + }, + { + /* "hi6210sft" -> HiSilicon Kirin 620 */ + .platform = "hi6210sft", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 620, + }, + { + /* "hi3751" -> HiSilicon Hi3751 */ + .platform = "hi3751", + .series = cpuinfo_arm_chipset_series_hisilicon_hi, + .model = 3751, + }, +#if CPUINFO_ARCH_ARM + { + /* "hi3630" -> HiSilicon Kirin 920 */ + .platform = "hi3630", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 920, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "hi3635" -> HiSilicon Kirin 930 */ + .platform = "hi3635", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 930, + }, +#if CPUINFO_ARCH_ARM + { + /* "gs702a" -> Actions ATM7029 (Cortex-A5 + GC1000) */ + .platform = "gs702a", + .series = cpuinfo_arm_chipset_series_actions_atm, + .model = 7029, + }, + { + /* "gs702c" -> Actions ATM7029B (Cortex-A5 + SGX540) */ + .platform = "gs702c", + .series = cpuinfo_arm_chipset_series_actions_atm, + .model = 7029, + .suffix = 'B', + }, + { + /* "gs703d" -> Actions ATM7039S */ + .platform = "gs703d", + .series = cpuinfo_arm_chipset_series_actions_atm, + .model = 7039, + .suffix = 'S', + }, + { + /* "gs705a" -> Actions ATM7059A */ + .platform = "gs705a", + .series = cpuinfo_arm_chipset_series_actions_atm, + .model = 7059, + .suffix = 'A', + }, + { + /* "Amlogic Meson8" -> Amlogic S812 */ + .platform = "Amlogic Meson8", + .series = cpuinfo_arm_chipset_series_amlogic_s, + .model = 812, + }, + { + /* "Amlogic Meson8B" -> Amlogic S805 */ + .platform = "Amlogic Meson8B", + .series = cpuinfo_arm_chipset_series_amlogic_s, + .model = 805, + }, + { + /* "mapphone_CDMA" -> Texas Instruments OMAP4430 */ + .platform = "mapphone_CDMA", + .series = cpuinfo_arm_chipset_series_texas_instruments_omap, + .model = 4430, + }, + { + /* "Superior" -> Texas Instruments OMAP4470 */ + .platform = "Superior", + .series = cpuinfo_arm_chipset_series_texas_instruments_omap, + .model = 4470, + }, + { + /* "Tuna" (Samsung Galaxy Nexus) -> Texas Instruments OMAP4460 */ + .platform = "Tuna", + .series = cpuinfo_arm_chipset_series_texas_instruments_omap, + .model = 4460, + }, + { + /* "Manta" (Samsung Nexus 10) -> Samsung Exynos 5250 */ + .platform = "Manta", + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = 5250, + }, + { + /* "Odin" -> LG Nuclun 7111 */ + .platform = "Odin", + .series = cpuinfo_arm_chipset_series_lg_nuclun, + .model = 7111, + }, + { + /* "Madison" -> MStar 6A338 */ + .platform = "Madison", + .series = cpuinfo_arm_chipset_series_mstar_6a, + .model = 338, + }, +#endif /* CPUINFO_ARCH_ARM */ +}; + +static const struct special_map_entry tegra_hardware_map_entries[] = { +#if CPUINFO_ARCH_ARM + { + /* "cardhu" (Nvidia Cardhu developer tablet) -> Tegra T30 */ + .platform = "cardhu", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + }, + { + /* "kai" -> Tegra T30L */ + .platform = "kai", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "p3" (Samsung Galaxy Tab 8.9) -> Tegra T20 */ + .platform = "p3", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 20, + }, + { + /* "n1" (Samsung Galaxy R / Samsung Captivate Glide) -> Tegra AP20H */ + .platform = "n1", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_ap, + .model = 20, + .suffix = 'H', + }, + { + /* "SHW-M380S" (Samsung Galaxy Tab 10.1) -> Tegra T20 */ + .platform = "SHW-M380S", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 20, + }, + { + /* "m470" (Hisense Sero 7 Pro) -> Tegra T30L */ + .platform = "m470", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "endeavoru" (HTC One X) -> Tegra AP33 */ + .platform = "endeavoru", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_ap, + .model = 33, + }, + { + /* "evitareul" (HTC One X+) -> Tegra T33 */ + .platform = "evitareul", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 33, + }, + { + /* "enrc2b" (HTC One X+) -> Tegra T33 */ + .platform = "enrc2b", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 33, + }, + { + /* "mozart" (Asus Transformer Pad TF701T) -> Tegra T114 */ + .platform = "mozart", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "tegratab" (Tegra Note 7) -> Tegra T114 */ + .platform = "tegratab", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "tn8" (Nvidia Shield Tablet K1) -> Tegra T124 */ + .platform = "tn8", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 124, + }, + { + /* "roth" (Nvidia Shield Portable) -> Tegra T114 */ + .platform = "roth", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "pisces" (Xiaomi Mi 3) -> Tegra T114 */ + .platform = "pisces", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "mocha" (Xiaomi Mi Pad) -> Tegra T124 */ + .platform = "mocha", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 124, + }, + { + /* "stingray" (Motorola XOOM) -> Tegra AP20H */ + .platform = "stingray", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_ap, + .model = 20, + .suffix = 'H', + }, + { + /* "Ceres" (Wiko Highway 4G) -> Tegra SL460N */ + .platform = "Ceres", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_sl, + .model = 460, + .suffix = 'N', + }, + { + /* "MT799" (nabi 2 Tablet) -> Tegra T30 */ + .platform = "MT799", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + }, + { + /* "t8400n" (nabi DreamTab HD8) -> Tegra T114 */ + .platform = "t8400n", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "chagall" (Fujitsu Stylistic M532) -> Tegra T30 */ + .platform = "chagall", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + }, + { + /* "ventana" (Asus Transformer TF101) -> Tegra T20 */ + .platform = "ventana", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 20, + }, + { + /* "bobsleigh" (Fujitsu Arrows Tab F-05E) -> Tegra T33 */ + .platform = "bobsleigh", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 33, + }, + { + /* "tegra_fjdev101" (Fujitsu Arrows X F-10D) -> Tegra AP33 */ + .platform = "tegra_fjdev101", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_ap, + .model = 33, + }, + { + /* "tegra_fjdev103" (Fujitsu Arrows V F-04E) -> Tegra T33 */ + .platform = "tegra_fjdev103", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 33, + }, + { + /* "nbx03" (Sony Tablet S) -> Tegra T20 */ + .platform = "nbx03", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 20, + }, + { + /* "txs03" (Sony Xperia Tablet S) -> Tegra T30L */ + .platform = "txs03", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "x3" (LG Optimus 4X HD P880) -> Tegra AP33 */ + .platform = "x3", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_ap, + .model = 33, + }, + { + /* "vu10" (LG Optimus Vu P895) -> Tegra AP33 */ + .platform = "vu10", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_ap, + .model = 33, + }, + { + /* "BIRCH" (HP Slate 7 Plus) -> Tegra T30L */ + .platform = "BIRCH", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "macallan" (HP Slate 8 Pro) -> Tegra T114 */ + .platform = "macallan", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "maya" (HP SlateBook 10 x2) -> Tegra T114 */ + .platform = "maya", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "antares" (Toshiba AT100) -> Tegra T20 */ + .platform = "antares", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 20, + }, + { + /* "tostab12AL" (Toshiba AT300SE "Excite 10 SE") -> Tegra T30L */ + .platform = "tostab12AL", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "tostab12BL" (Toshiba AT10-A "Excite Pure") -> Tegra T30L */ + .platform = "tostab12BL", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "sphinx" (Toshiba AT270 "Excite 7.7") -> Tegra T30 */ + .platform = "sphinx", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + }, + { + /* "tostab11BS" (Toshiba AT570 "Regza 7.7") -> Tegra T30 */ + .platform = "tostab11BS", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + }, + { + /* "tostab12BA" (Toshiba AT10-LE-A "Excite Pro") -> Tegra T114 */ + .platform = "tostab12BA", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "vangogh" (Acer Iconia Tab A100) -> Tegra T20 */ + .platform = "vangogh", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 20, + }, + { + /* "a110" (Acer Iconia Tab A110) -> Tegra T30L */ + .platform = "a110", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "picasso_e" (Acer Iconia Tab A200) -> Tegra AP20H */ + .platform = "picasso_e", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_ap, + .model = 20, + .suffix = 'H', + }, + { + /* "picasso_e2" (Acer Iconia Tab A210) -> Tegra T30L */ + .platform = "picasso_e2", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "picasso" (Acer Iconia Tab A500) -> Tegra AP20H */ + .platform = "picasso", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_ap, + .model = 20, + .suffix = 'H', + }, + { + /* "picasso_m" (Acer Iconia Tab A510) -> Tegra T30 */ + .platform = "picasso_m", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + }, + { + /* "picasso_mf" (Acer Iconia Tab A700) -> Tegra T30 */ + .platform = "picasso_mf", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + }, + { + /* "avalon" (Toshiba AT300 "Excite 10") -> Tegra T30L */ + .platform = "avalon", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "NS_14T004" (iRiver NS-14T004) -> Tegra T30L */ + .platform = "NS_14T004", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, + { + /* "WIKIPAD" (Wikipad) -> Tegra T30 */ + .platform = "WIKIPAD", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + }, + { + /* "kb" (Pegatron Q00Q) -> Tegra T114 */ + .platform = "kb", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "foster_e" (Nvidia Shield TV, Flash) -> Tegra T210 */ + .platform = "foster_e", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 210, + }, + { + /* "foster_e_hdd" (Nvidia Shield TV, HDD) -> Tegra T210 */ + .platform = "foster_e_hdd", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 210, + }, + { + /* "darcy" (Nvidia Shield TV 2017) -> Tegra T210 */ + .platform = "darcy", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 210, + }, +}; + +/* + * Decodes chipset name from /proc/cpuinfo Hardware string. + * For some chipsets, the function relies frequency and on number of cores for chipset detection. + * + * @param[in] platform - /proc/cpuinfo Hardware string. + * @param cores - number of cores in the chipset. + * @param max_cpu_freq_max - maximum of /sys/devices/system/cpu/cpu/cpofreq/cpu_freq_max values. + * + * @returns Decoded chipset name. If chipset could not be decoded, the resulting structure would use `unknown` vendor + * and series identifiers. + */ +struct cpuinfo_arm_chipset cpuinfo_arm_linux_decode_chipset_from_proc_cpuinfo_hardware( + const char hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX], + uint32_t cores, uint32_t max_cpu_freq_max, bool is_tegra) +{ + struct cpuinfo_arm_chipset chipset; + const size_t hardware_length = strnlen(hardware, CPUINFO_HARDWARE_VALUE_MAX); + const char* hardware_end = hardware + hardware_length; + + if (is_tegra) { + /* + * Nvidia Tegra-specific path: compare /proc/cpuinfo Hardware string to + * tabulated Hardware values for popular chipsets/devices with Tegra chipsets. + * This path is only used when ro.board.platform indicates a Tegra chipset + * (albeit does not indicate which exactly Tegra chipset). + */ + for (size_t i = 0; i < CPUINFO_COUNT_OF(tegra_hardware_map_entries); i++) { + if (strncmp(tegra_hardware_map_entries[i].platform, hardware, hardware_length) == 0 && + tegra_hardware_map_entries[i].platform[hardware_length] == 0) + { + cpuinfo_log_debug( + "found /proc/cpuinfo Hardware string \"%.*s\" in Nvidia Tegra chipset table", + (int) hardware_length, hardware); + /* Create chipset name from entry */ + return (struct cpuinfo_arm_chipset) { + .vendor = chipset_series_vendor[tegra_hardware_map_entries[i].series], + .series = (enum cpuinfo_arm_chipset_series) tegra_hardware_map_entries[i].series, + .model = tegra_hardware_map_entries[i].model, + .suffix = { + [0] = tegra_hardware_map_entries[i].suffix, + }, + }; + } + } + } else { + /* Generic path: consider all other vendors */ + + bool word_start = true; + for (const char* pos = hardware; pos != hardware_end; pos++) { + const char c = *pos; + switch (c) { + case ' ': + case '\t': + case ',': + word_start = true; + break; + default: + if (word_start && is_ascii_alphabetic(c)) { + /* Check Qualcomm MSM/APQ signature */ + if (match_msm_apq(pos, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Qualcomm MSM/APQ signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Check SDMxxx (Qualcomm Snapdragon) signature */ + if (match_sdm(pos, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Qualcomm SDM signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Check SMxxxx (Qualcomm Snapdragon) signature */ + if (match_sm(pos, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Qualcomm SM signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Check MediaTek MT signature */ + if (match_mt(pos, hardware_end, true, &chipset)) { + cpuinfo_log_debug( + "matched MediaTek MT signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Check HiSilicon Kirin signature */ + if (match_kirin(pos, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched HiSilicon Kirin signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Check Rockchip RK signature */ + if (match_rk(pos, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Rockchip RK signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + if (match_qualcomm_special(pos, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Qualcomm signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + } + word_start = false; + break; + } + } + + /* Check Samsung Exynos signature */ + if (match_samsung_exynos(hardware, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Samsung Exynos signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Check universalXXXX (Samsung Exynos) signature */ + if (match_universal(hardware, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched UNIVERSAL (Samsung Exynos) signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + #if CPUINFO_ARCH_ARM + /* Match /SMDK(4410|4x12)$/ */ + if (match_and_parse_smdk(hardware, hardware_end, cores, &chipset)) { + cpuinfo_log_debug( + "matched SMDK (Samsung Exynos) signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + #endif + + /* Check Spreadtrum SC signature */ + if (match_sc(hardware, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Spreadtrum SC signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + #if CPUINFO_ARCH_ARM + /* Check Marvell PXA signature */ + if (match_pxa(hardware, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Marvell PXA signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + #endif + + /* Match /sun\d+i/ signature and map to Allwinner chipset name */ + if (match_and_parse_sunxi(hardware, hardware_end, cores, &chipset)) { + cpuinfo_log_debug( + "matched sunxi (Allwinner Ax) signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Check Broadcom BCM signature */ + if (match_bcm(hardware, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Broadcom BCM signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + #if CPUINFO_ARCH_ARM + /* Check Texas Instruments OMAP signature */ + if (match_omap(hardware, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Texas Instruments OMAP signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Check WonderMedia WMT signature and decode chipset from frequency and number of cores */ + if (match_and_parse_wmt(hardware, hardware_end, cores, max_cpu_freq_max, &chipset)) { + cpuinfo_log_debug( + "matched WonderMedia WMT signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + #endif + + /* Check Telechips TCC signature */ + if (match_tcc(hardware, hardware_end, &chipset)) { + cpuinfo_log_debug( + "matched Telechips TCC signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) hardware_length, hardware); + return chipset; + } + + /* Compare to tabulated Hardware values for popular chipsets/devices which can't be otherwise detected */ + for (size_t i = 0; i < CPUINFO_COUNT_OF(special_hardware_map_entries); i++) { + if (strncmp(special_hardware_map_entries[i].platform, hardware, hardware_length) == 0 && + special_hardware_map_entries[i].platform[hardware_length] == 0) + { + cpuinfo_log_debug( + "found /proc/cpuinfo Hardware string \"%.*s\" in special chipset table", + (int) hardware_length, hardware); + /* Create chipset name from entry */ + return (struct cpuinfo_arm_chipset) { + .vendor = chipset_series_vendor[special_hardware_map_entries[i].series], + .series = (enum cpuinfo_arm_chipset_series) special_hardware_map_entries[i].series, + .model = special_hardware_map_entries[i].model, + .suffix = { + [0] = special_hardware_map_entries[i].suffix, + }, + }; + } + } + } + + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_unknown, + .series = cpuinfo_arm_chipset_series_unknown, + }; +} + +#ifdef __ANDROID__ + static const struct special_map_entry special_board_map_entries[] = { + { + /* "hi6250" -> HiSilicon Kirin 650 */ + .platform = "hi6250", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 650, + }, + { + /* "hi6210sft" -> HiSilicon Kirin 620 */ + .platform = "hi6210sft", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 620, + }, +#if CPUINFO_ARCH_ARM + { + /* "hi3630" -> HiSilicon Kirin 920 */ + .platform = "hi3630", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 920, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "hi3635" -> HiSilicon Kirin 930 */ + .platform = "hi3635", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 930, + }, + { + /* "hi3650" -> HiSilicon Kirin 950 */ + .platform = "hi3650", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 950, + }, + { + /* "hi3660" -> HiSilicon Kirin 960 */ + .platform = "hi3660", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 960, + }, +#if CPUINFO_ARCH_ARM + { + /* "mp523x" -> Renesas MP5232 */ + .platform = "mp523x", + .series = cpuinfo_arm_chipset_series_renesas_mp, + .model = 5232, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "BEETHOVEN" (Huawei MadiaPad M3) -> HiSilicon Kirin 950 */ + .platform = "BEETHOVEN", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 950, + }, +#if CPUINFO_ARCH_ARM + { + /* "hws7701u" (Huawei MediaPad 7 Youth) -> Rockchip RK3168 */ + .platform = "hws7701u", + .series = cpuinfo_arm_chipset_series_rockchip_rk, + .model = 3168, + }, + { + /* "g2mv" (LG G2 mini LTE) -> Nvidia Tegra SL460N */ + .platform = "g2mv", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_sl, + .model = 460, + .suffix = 'N', + }, + { + /* "K00F" (Asus MeMO Pad 10) -> Rockchip RK3188 */ + .platform = "K00F", + .series = cpuinfo_arm_chipset_series_rockchip_rk, + .model = 3188, + }, + { + /* "T7H" (HP Slate 7) -> Rockchip RK3066 */ + .platform = "T7H", + .series = cpuinfo_arm_chipset_series_rockchip_rk, + .model = 3066, + }, + { + /* "tuna" (Samsung Galaxy Nexus) -> Texas Instruments OMAP4460 */ + .platform = "tuna", + .series = cpuinfo_arm_chipset_series_texas_instruments_omap, + .model = 4460, + }, + { + /* "grouper" (Asus Nexus 7 2012) -> Nvidia Tegra T30L */ + .platform = "grouper", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 30, + .suffix = 'L', + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "flounder" (HTC Nexus 9) -> Nvidia Tegra T132 */ + .platform = "flounder", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 132, + }, + { + /* "dragon" (Google Pixel C) -> Nvidia Tegra T210 */ + .platform = "dragon", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 210, + }, + { + /* "sailfish" (Google Pixel) -> Qualcomm MSM8996PRO */ + .platform = "sailfish", + .series = cpuinfo_arm_chipset_series_qualcomm_msm, + .model = 8996, + .suffix = 'P', + }, + { + /* "marlin" (Google Pixel XL) -> Qualcomm MSM8996PRO */ + .platform = "marlin", + .series = cpuinfo_arm_chipset_series_qualcomm_msm, + .model = 8996, + .suffix = 'P', + }, + }; + + /* + * Decodes chipset name from ro.product.board Android system property. + * For some chipsets, the function relies frequency and on number of cores for chipset detection. + * + * @param[in] platform - ro.product.board value. + * @param cores - number of cores in the chipset. + * @param max_cpu_freq_max - maximum of /sys/devices/system/cpu/cpu/cpofreq/cpu_freq_max values. + * + * @returns Decoded chipset name. If chipset could not be decoded, the resulting structure would use `unknown` vendor + * and series identifiers. + */ + struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_product_board( + const char ro_product_board[restrict static CPUINFO_BUILD_PROP_VALUE_MAX], + uint32_t cores, uint32_t max_cpu_freq_max) + { + struct cpuinfo_arm_chipset chipset; + const char* board = ro_product_board; + const size_t board_length = strnlen(ro_product_board, CPUINFO_BUILD_PROP_VALUE_MAX); + const char* board_end = ro_product_board + board_length; + + /* Check Qualcomm MSM/APQ signature */ + if (match_msm_apq(board, board_end, &chipset)) { + cpuinfo_log_debug( + "matched Qualcomm MSM/APQ signature in ro.product.board string \"%.*s\"", (int) board_length, board); + return chipset; + } + + /* Check universaXXXX (Samsung Exynos) signature */ + if (match_universal(board, board_end, &chipset)) { + cpuinfo_log_debug( + "matched UNIVERSAL (Samsung Exynos) signature in ro.product.board string \"%.*s\"", + (int) board_length, board); + return chipset; + } + + #if CPUINFO_ARCH_ARM + /* Check SMDK (Samsung Exynos) signature */ + if (match_and_parse_smdk(board, board_end, cores, &chipset)) { + cpuinfo_log_debug( + "matched SMDK (Samsung Exynos) signature in ro.product.board string \"%.*s\"", + (int) board_length, board); + return chipset; + } + #endif + + /* Check MediaTek MT signature */ + if (match_mt(board, board_end, true, &chipset)) { + cpuinfo_log_debug( + "matched MediaTek MT signature in ro.product.board string \"%.*s\"", + (int) board_length, board); + return chipset; + } + + /* Check Spreadtrum SC signature */ + if (match_sc(board, board_end, &chipset)) { + cpuinfo_log_debug( + "matched Spreadtrum SC signature in ro.product.board string \"%.*s\"", + (int) board_length, board); + return chipset; + } + + #if CPUINFO_ARCH_ARM + /* Check Marvell PXA signature */ + if (match_pxa(board, board_end, &chipset)) { + cpuinfo_log_debug( + "matched Marvell PXA signature in ro.product.board string \"%.*s\"", + (int) board_length, board); + return chipset; + } + + /* Check Leadcore LCxxxx signature */ + if (match_lc(board, board_end, &chipset)) { + cpuinfo_log_debug( + "matched Leadcore LC signature in ro.product.board string \"%.*s\"", + (int) board_length, board); + return chipset; + } + + /* + * Compare to tabulated ro.product.board values for Broadcom chipsets and decode chipset from frequency and + * number of cores. + */ + if (match_and_parse_broadcom(board, board_end, cores, max_cpu_freq_max, &chipset)) { + cpuinfo_log_debug( + "found ro.product.board string \"%.*s\" in Broadcom chipset table", + (int) board_length, board); + return chipset; + } + #endif + + /* Compare to tabulated ro.product.board values for Huawei devices which don't report chipset elsewhere */ + if (match_and_parse_huawei(board, board_end, &chipset)) { + cpuinfo_log_debug( + "found ro.product.board string \"%.*s\" in Huawei chipset table", + (int) board_length, board); + return chipset; + } + + /* Compare to tabulated ro.product.board values for popular chipsets/devices which can't be otherwise detected */ + for (size_t i = 0; i < CPUINFO_COUNT_OF(special_board_map_entries); i++) { + if (strncmp(special_board_map_entries[i].platform, board, board_length) == 0 && + special_board_map_entries[i].platform[board_length] == 0) + { + cpuinfo_log_debug( + "found ro.product.board string \"%.*s\" in special chipset table", + (int) board_length, board); + /* Create chipset name from entry */ + return (struct cpuinfo_arm_chipset) { + .vendor = chipset_series_vendor[special_board_map_entries[i].series], + .series = (enum cpuinfo_arm_chipset_series) special_board_map_entries[i].series, + .model = special_board_map_entries[i].model, + .suffix = { + [0] = special_board_map_entries[i].suffix, + /* The suffix of MSM8996PRO is truncated at the first letter, reconstruct it here. */ + [1] = special_board_map_entries[i].suffix == 'P' ? 'R' : 0, + [2] = special_board_map_entries[i].suffix == 'P' ? 'O' : 0, + }, + }; + } + } + + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_unknown, + .series = cpuinfo_arm_chipset_series_unknown, + }; + } + + struct amlogic_map_entry { + char ro_board_platform[6]; + uint16_t model; + uint8_t series; + char suffix[3]; + }; + + static const struct amlogic_map_entry amlogic_map_entries[] = { +#if CPUINFO_ARCH_ARM + { + /* "meson3" -> Amlogic AML8726-M */ + .ro_board_platform = "meson3", + .series = cpuinfo_arm_chipset_series_amlogic_aml, + .model = 8726, + .suffix = "-M", + }, + { + /* "meson6" -> Amlogic AML8726-MX */ + .ro_board_platform = "meson6", + .series = cpuinfo_arm_chipset_series_amlogic_aml, + .model = 8726, + .suffix = "-MX", + }, + { + /* "meson8" -> Amlogic S805 */ + .ro_board_platform = "meson8", + .series = cpuinfo_arm_chipset_series_amlogic_s, + .model = 805, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "gxbaby" -> Amlogic S905 */ + .ro_board_platform = "gxbaby", + .series = cpuinfo_arm_chipset_series_amlogic_s, + .model = 905, + }, + { + /* "gxl" -> Amlogic S905X */ + .ro_board_platform = "gxl", + .series = cpuinfo_arm_chipset_series_amlogic_s, + .model = 905, + .suffix = "X", + }, + { + /* "gxm" -> Amlogic S912 */ + .ro_board_platform = "gxm", + .series = cpuinfo_arm_chipset_series_amlogic_s, + .model = 912, + }, + }; + + static const struct special_map_entry special_platform_map_entries[] = { +#if CPUINFO_ARCH_ARM + { + /* "hi6620oem" -> HiSilicon Kirin 910T */ + .platform = "hi6620oem", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 910, + .suffix = 'T', + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "hi6250" -> HiSilicon Kirin 650 */ + .platform = "hi6250", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 650, + }, + { + /* "hi6210sft" -> HiSilicon Kirin 620 */ + .platform = "hi6210sft", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 620, + }, +#if CPUINFO_ARCH_ARM + { + /* "hi3630" -> HiSilicon Kirin 920 */ + .platform = "hi3630", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 920, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "hi3635" -> HiSilicon Kirin 930 */ + .platform = "hi3635", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 930, + }, + { + /* "hi3650" -> HiSilicon Kirin 950 */ + .platform = "hi3650", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 950, + }, + { + /* "hi3660" -> HiSilicon Kirin 960 */ + .platform = "hi3660", + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = 960, + }, +#if CPUINFO_ARCH_ARM + { + /* "k3v2oem1" -> HiSilicon K3V2 */ + .platform = "k3v2oem1", + .series = cpuinfo_arm_chipset_series_hisilicon_k3v, + .model = 2, + }, + { + /* "k3v200" -> HiSilicon K3V2 */ + .platform = "k3v200", + .series = cpuinfo_arm_chipset_series_hisilicon_k3v, + .model = 2, + }, + { + /* "montblanc" -> NovaThor U8500 */ + .platform = "montblanc", + .series = cpuinfo_arm_chipset_series_novathor_u, + .model = 8500, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "song" -> Pinecone Surge S1 */ + .platform = "song", + .series = cpuinfo_arm_chipset_series_pinecone_surge_s, + .model = 1, + }, +#if CPUINFO_ARCH_ARM + { + /* "rk322x" -> RockChip RK3229 */ + .platform = "rk322x", + .series = cpuinfo_arm_chipset_series_rockchip_rk, + .model = 3229, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* "tegra132" -> Nvidia Tegra T132 */ + .platform = "tegra132", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 132, + }, + { + /* "tegra210_dragon" -> Nvidia Tegra T210 */ + .platform = "tegra210_dragon", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 210, + }, +#if CPUINFO_ARCH_ARM + { + /* "tegra4" -> Nvidia Tegra T114 */ + .platform = "tegra4", + .series = cpuinfo_arm_chipset_series_nvidia_tegra_t, + .model = 114, + }, + { + /* "s5pc110" -> Samsung Exynos 3110 */ + .platform = "s5pc110", + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = 3110, + }, +#endif /* CPUINFO_ARCH_ARM */ + }; + + /* + * Decodes chipset name from ro.board.platform Android system property. + * For some chipsets, the function relies frequency and on number of cores for chipset detection. + * + * @param[in] platform - ro.board.platform value. + * @param cores - number of cores in the chipset. + * @param max_cpu_freq_max - maximum of /sys/devices/system/cpu/cpu/cpofreq/cpu_freq_max values. + * + * @returns Decoded chipset name. If chipset could not be decoded, the resulting structure would use `unknown` vendor + * and series identifiers. + */ + struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_board_platform( + const char platform[restrict static CPUINFO_BUILD_PROP_VALUE_MAX], + uint32_t cores, uint32_t max_cpu_freq_max) + { + struct cpuinfo_arm_chipset chipset; + const size_t platform_length = strnlen(platform, CPUINFO_BUILD_PROP_VALUE_MAX); + const char* platform_end = platform + platform_length; + + /* Check Qualcomm MSM/APQ signature */ + if (match_msm_apq(platform, platform_end, &chipset)) { + cpuinfo_log_debug( + "matched Qualcomm MSM/APQ signature in ro.board.platform string \"%.*s\"", + (int) platform_length, platform); + return chipset; + } + + /* Check exynosXXXX (Samsung Exynos) signature */ + if (match_exynos(platform, platform_end, &chipset)) { + cpuinfo_log_debug( + "matched exynosXXXX (Samsung Exynos) signature in ro.board.platform string \"%.*s\"", + (int) platform_length, platform); + return chipset; + } + + /* Check MediaTek MT signature */ + if (match_mt(platform, platform_end, true, &chipset)) { + cpuinfo_log_debug( + "matched MediaTek MT signature in ro.board.platform string \"%.*s\"", (int) platform_length, platform); + return chipset; + } + + /* Check HiSilicon Kirin signature */ + if (match_kirin(platform, platform_end, &chipset)) { + cpuinfo_log_debug( + "matched HiSilicon Kirin signature in ro.board.platform string \"%.*s\"", (int) platform_length, platform); + return chipset; + } + + /* Check Spreadtrum SC signature */ + if (match_sc(platform, platform_end, &chipset)) { + cpuinfo_log_debug( + "matched Spreadtrum SC signature in ro.board.platform string \"%.*s\"", (int) platform_length, platform); + return chipset; + } + + /* Check Rockchip RK signature */ + if (match_rk(platform, platform_end, &chipset)) { + cpuinfo_log_debug( + "matched Rockchip RK signature in ro.board.platform string \"%.*s\"", (int) platform_length, platform); + return chipset; + } + + #if CPUINFO_ARCH_ARM + /* Check Leadcore LCxxxx signature */ + if (match_lc(platform, platform_end, &chipset)) { + cpuinfo_log_debug( + "matched Leadcore LC signature in ro.board.platform string \"%.*s\"", (int) platform_length, platform); + return chipset; + } + #endif + + /* Compare to tabulated ro.board.platform values for Huawei devices which don't report chipset elsewhere */ + if (match_and_parse_huawei(platform, platform_end, &chipset)) { + cpuinfo_log_debug( + "found ro.board.platform string \"%.*s\" in Huawei chipset table", + (int) platform_length, platform); + return chipset; + } + + #if CPUINFO_ARCH_ARM + /* + * Compare to known ro.board.platform values for Broadcom devices and + * detect chipset from frequency and number of cores + */ + if (match_and_parse_broadcom(platform, platform_end, cores, max_cpu_freq_max, &chipset)) { + cpuinfo_log_debug( + "found ro.board.platform string \"%.*s\" in Broadcom chipset table", + (int) platform_length, platform); + return chipset; + } + + /* + * Compare to ro.board.platform value ("omap4") for OMAP4xxx chipsets. + * Upon successful match, detect OMAP4430 from frequency and number of cores. + */ + if (platform_length == 5 && cores == 2 && max_cpu_freq_max == 1008000 && memcmp(platform, "omap4", 5) == 0) { + cpuinfo_log_debug( + "matched Texas Instruments OMAP4 signature in ro.board.platform string \"%.*s\"", + (int) platform_length, platform); + + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_texas_instruments, + .series = cpuinfo_arm_chipset_series_texas_instruments_omap, + .model = 4430, + }; + } + #endif + + /* + * Compare to tabulated ro.board.platform values for Amlogic chipsets/devices which can't be otherwise detected. + * The tabulated Amlogic ro.board.platform values have not more than 6 characters. + */ + if (platform_length <= 6) { + for (size_t i = 0; i < CPUINFO_COUNT_OF(amlogic_map_entries); i++) { + if (strncmp(amlogic_map_entries[i].ro_board_platform, platform, 6) == 0) { + cpuinfo_log_debug( + "found ro.board.platform string \"%.*s\" in Amlogic chipset table", + (int) platform_length, platform); + /* Create chipset name from entry */ + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_amlogic, + .series = (enum cpuinfo_arm_chipset_series) amlogic_map_entries[i].series, + .model = amlogic_map_entries[i].model, + .suffix = { + [0] = amlogic_map_entries[i].suffix[0], + [1] = amlogic_map_entries[i].suffix[1], + [2] = amlogic_map_entries[i].suffix[2], + }, + }; + } + } + } + + /* Compare to tabulated ro.board.platform values for popular chipsets/devices which can't be otherwise detected */ + for (size_t i = 0; i < CPUINFO_COUNT_OF(special_platform_map_entries); i++) { + if (strncmp(special_platform_map_entries[i].platform, platform, platform_length) == 0 && + special_platform_map_entries[i].platform[platform_length] == 0) + { + /* Create chipset name from entry */ + cpuinfo_log_debug( + "found ro.board.platform string \"%.*s\" in special chipset table", (int) platform_length, platform); + return (struct cpuinfo_arm_chipset) { + .vendor = chipset_series_vendor[special_platform_map_entries[i].series], + .series = (enum cpuinfo_arm_chipset_series) special_platform_map_entries[i].series, + .model = special_platform_map_entries[i].model, + .suffix = { + [0] = special_platform_map_entries[i].suffix, + }, + }; + } + } + + /* None of the ro.board.platform signatures matched, indicate unknown chipset */ + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_unknown, + .series = cpuinfo_arm_chipset_series_unknown, + }; + } + + /* + * Decodes chipset name from ro.mediatek.platform Android system property. + * + * @param[in] platform - ro.mediatek.platform value. + * + * @returns Decoded chipset name. If chipset could not be decoded, the resulting structure would use `unknown` + * vendor and series identifiers. + */ + struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_mediatek_platform( + const char platform[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]) + { + struct cpuinfo_arm_chipset chipset; + const char* platform_end = platform + strnlen(platform, CPUINFO_BUILD_PROP_VALUE_MAX); + + /* Check MediaTek MT signature */ + if (match_mt(platform, platform_end, false, &chipset)) { + return chipset; + } + + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_unknown, + .series = cpuinfo_arm_chipset_series_unknown, + }; + } + + + /* + * Decodes chipset name from ro.arch Android system property. + * + * The ro.arch property is matched only against Samsung Exynos signature. Systems with other chipset rarely + * configure ro.arch Android system property, and can be decoded through other properties, but some Exynos + * chipsets are identified only in ro.arch. + * + * @param[in] arch - ro.arch value. + * + * @returns Decoded chipset name. If chipset could not be decoded, the resulting structure would use `unknown` + * vendor and series identifiers. + */ + struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_arch( + const char arch[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]) + { + struct cpuinfo_arm_chipset chipset; + const char* arch_end = arch + strnlen(arch, CPUINFO_BUILD_PROP_VALUE_MAX); + + /* Check Samsung exynosXXXX signature */ + if (match_exynos(arch, arch_end, &chipset)) { + return chipset; + } + + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_unknown, + .series = cpuinfo_arm_chipset_series_unknown, + }; + } + + /* + * Decodes chipset name from ro.chipname or ro.hardware.chipname Android system property. + * + * @param[in] chipname - ro.chipname or ro.hardware.chipname value. + * + * @returns Decoded chipset name. If chipset could not be decoded, the resulting structure would use `unknown` vendor + * and series identifiers. + */ + + struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_chipname( + const char chipname[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]) + { + struct cpuinfo_arm_chipset chipset; + const size_t chipname_length = strnlen(chipname, CPUINFO_BUILD_PROP_VALUE_MAX); + const char* chipname_end = chipname + chipname_length; + + /* Check Qualcomm MSM/APQ signatures */ + if (match_msm_apq(chipname, chipname_end, &chipset)) { + cpuinfo_log_debug( + "matched Qualcomm MSM/APQ signature in ro.chipname string \"%.*s\"", + (int) chipname_length, chipname); + return chipset; + } + + /* Check SMxxxx (Qualcomm Snapdragon) signature */ + if (match_sm(chipname, chipname_end, &chipset)) { + cpuinfo_log_debug( + "matched Qualcomm SM signature in /proc/cpuinfo Hardware string \"%.*s\"", + (int) chipname_length, chipname); + return chipset; + } + + /* Check exynosXXXX (Samsung Exynos) signature */ + if (match_exynos(chipname, chipname_end, &chipset)) { + cpuinfo_log_debug( + "matched exynosXXXX (Samsung Exynos) signature in ro.chipname string \"%.*s\"", + (int) chipname_length, chipname); + return chipset; + } + + /* Check universalXXXX (Samsung Exynos) signature */ + if (match_universal(chipname, chipname_end, &chipset)) { + cpuinfo_log_debug( + "matched UNIVERSAL (Samsung Exynos) signature in ro.chipname Hardware string \"%.*s\"", + (int) chipname_length, chipname); + return chipset; + } + + /* Check MediaTek MT signature */ + if (match_mt(chipname, chipname_end, true, &chipset)) { + cpuinfo_log_debug( + "matched MediaTek MT signature in ro.chipname string \"%.*s\"", + (int) chipname_length, chipname); + return chipset; + } + + /* Check Spreadtrum SC signature */ + if (match_sc(chipname, chipname_end, &chipset)) { + cpuinfo_log_debug( + "matched Spreadtrum SC signature in ro.chipname string \"%.*s\"", + (int) chipname_length, chipname); + return chipset; + } + + #if CPUINFO_ARCH_ARM + /* Check Marvell PXA signature */ + if (match_pxa(chipname, chipname_end, &chipset)) { + cpuinfo_log_debug( + "matched Marvell PXA signature in ro.chipname string \"%.*s\"", + (int) chipname_length, chipname); + return chipset; + } + + /* Compare to ro.chipname value ("mp523x") for Renesas MP5232 which can't be otherwise detected */ + if (chipname_length == 6 && memcmp(chipname, "mp523x", 6) == 0) { + cpuinfo_log_debug( + "matched Renesas MP5232 signature in ro.chipname string \"%.*s\"", + (int) chipname_length, chipname); + + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_renesas, + .series = cpuinfo_arm_chipset_series_renesas_mp, + .model = 5232, + }; + } + #endif + + return (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_unknown, + .series = cpuinfo_arm_chipset_series_unknown, + }; + } +#endif /* __ANDROID__ */ + +/* + * Fix common bugs, typos, and renames in chipset name. + * + * @param[in,out] chipset - chipset name to fix. + * @param cores - number of cores in the chipset. + * @param max_cpu_freq_max - maximum of /sys/devices/system/cpu/cpu/cpofreq/cpu_freq_max values. + */ +void cpuinfo_arm_fixup_chipset( + struct cpuinfo_arm_chipset chipset[restrict static 1], uint32_t cores, uint32_t max_cpu_freq_max) +{ + switch (chipset->series) { + case cpuinfo_arm_chipset_series_qualcomm_msm: + /* Check if there is suffix */ + if (chipset->suffix[0] == 0) { + /* No suffix, but the model may be misreported */ + switch (chipset->model) { + case 8216: + /* MSM8216 was renamed to MSM8916 */ + cpuinfo_log_info("reinterpreted MSM8216 chipset as MSM8916"); + chipset->model = 8916; + break; + case 8916: + /* Common bug: MSM8939 (Octa-core) reported as MSM8916 (Quad-core) */ + switch (cores) { + case 4: + break; + case 8: + cpuinfo_log_info("reinterpreted MSM8916 chipset with 8 cores as MSM8939"); + chipset->model = 8939; + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core MSM%"PRIu32" chipset", + cores, chipset->model); + chipset->model = 0; + } + break; + case 8937: + /* Common bug: MSM8917 (Quad-core) reported as MSM8937 (Octa-core) */ + switch (cores) { + case 4: + cpuinfo_log_info("reinterpreted MSM8937 chipset with 4 cores as MSM8917"); + chipset->model = 8917; + break; + case 8: + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core MSM%"PRIu32" chipset", + cores, chipset->model); + chipset->model = 0; + } + break; + case 8960: + /* Common bug: APQ8064 (Quad-core) reported as MSM8960 (Dual-core) */ + switch (cores) { + case 2: + break; + case 4: + cpuinfo_log_info("reinterpreted MSM8960 chipset with 4 cores as APQ8064"); + chipset->series = cpuinfo_arm_chipset_series_qualcomm_apq; + chipset->model = 8064; + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core MSM%"PRIu32" chipset", + cores, chipset->model); + chipset->model = 0; + } + break; + case 8996: + /* Common bug: MSM8994 (Octa-core) reported as MSM8996 (Quad-core) */ + switch (cores) { + case 4: + break; + case 8: + cpuinfo_log_info("reinterpreted MSM8996 chipset with 8 cores as MSM8994"); + chipset->model = 8994; + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core MSM%"PRIu32" chipset", + cores, chipset->model); + chipset->model = 0; + } + break; +#if CPUINFO_ARCH_ARM + case 8610: + /* Common bug: MSM8612 (Quad-core) reported as MSM8610 (Dual-core) */ + switch (cores) { + case 2: + break; + case 4: + cpuinfo_log_info("reinterpreted MSM8610 chipset with 4 cores as MSM8612"); + chipset->model = 8612; + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core MSM%"PRIu32" chipset", + cores, chipset->model); + chipset->model = 0; + } + break; +#endif /* CPUINFO_ARCH_ARM */ + } + } else { + /* Suffix may need correction */ + const uint32_t suffix_word = load_u32le(chipset->suffix); + if (suffix_word == UINT32_C(0x004D534D) /* "\0MSM" = reverse("MSM\0") */) { + /* + * Common bug: model name repeated twice, e.g. "MSM8916MSM8916" + * In this case, model matching code parses the second "MSM" as a suffix + */ + chipset->suffix[0] = 0; + chipset->suffix[1] = 0; + chipset->suffix[2] = 0; + } else { + switch (chipset->model) { + case 8976: + /* MSM8976SG -> MSM8976PRO */ + if (suffix_word == UINT32_C(0x00004753) /* "\0\0GS" = reverse("SG\0\0") */ ) { + chipset->suffix[0] = 'P'; + chipset->suffix[1] = 'R'; + chipset->suffix[2] = 'O'; + } + break; + case 8996: + /* MSM8996PRO -> MSM8996PRO-AB or MSM8996PRO-AC */ + if (suffix_word == UINT32_C(0x004F5250) /* "\0ORP" = reverse("PRO\0") */ ) { + chipset->suffix[3] = '-'; + chipset->suffix[4] = 'A'; + chipset->suffix[5] = 'B' + (char) (max_cpu_freq_max >= 2188800); + } + break; + } + } + } + break; + case cpuinfo_arm_chipset_series_qualcomm_apq: + { + /* Suffix may need correction */ + const uint32_t expected_apq = load_u32le(chipset->suffix); + if (expected_apq == UINT32_C(0x00515041) /* "\0QPA" = reverse("APQ\0") */) { + /* + * Common bug: model name repeated twice, e.g. "APQ8016APQ8016" + * In this case, model matching code parses the second "APQ" as a suffix + */ + chipset->suffix[0] = 0; + chipset->suffix[1] = 0; + chipset->suffix[2] = 0; + } + break; + } + case cpuinfo_arm_chipset_series_samsung_exynos: + switch (chipset->model) { +#if CPUINFO_ARCH_ARM + case 4410: + /* Exynos 4410 was renamed to Exynos 4412 */ + chipset->model = 4412; + break; + case 5420: + /* Common bug: Exynos 5260 (Hexa-core) reported as Exynos 5420 (Quad-core) */ + switch (cores) { + case 4: + break; + case 6: + cpuinfo_log_info("reinterpreted Exynos 5420 chipset with 6 cores as Exynos 5260"); + chipset->model = 5260; + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core Exynos 5420 chipset", cores); + chipset->model = 0; + } + break; +#endif /* CPUINFO_ARCH_ARM */ + case 7580: + /* Common bug: Exynos 7578 (Quad-core) reported as Exynos 7580 (Octa-core) */ + switch (cores) { + case 4: + cpuinfo_log_info("reinterpreted Exynos 7580 chipset with 4 cores as Exynos 7578"); + chipset->model = 7578; + break; + case 8: + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core Exynos 7580 chipset", cores); + chipset->model = 0; + } + break; + } + break; + case cpuinfo_arm_chipset_series_mediatek_mt: + if (chipset->model == 6752) { + /* Common bug: MT6732 (Quad-core) reported as MT6752 (Octa-core) */ + switch (cores) { + case 4: + cpuinfo_log_info("reinterpreted MT6752 chipset with 4 cores as MT6732"); + chipset->model = 6732; + break; + case 8: + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core MT6752 chipset", cores); + chipset->model = 0; + } + } + if (chipset->suffix[0] == 'T') { + /* Normalization: "TURBO" and "TRUBO" (apparently a typo) -> "T" */ + const uint32_t suffix_word = load_u32le(chipset->suffix + 1); + switch (suffix_word) { + case UINT32_C(0x4F425255): /* "OBRU" = reverse("URBO") */ + case UINT32_C(0x4F425552): /* "OBUR" = reverse("RUBO") */ + if (chipset->suffix[5] == 0) { + chipset->suffix[1] = 0; + chipset->suffix[2] = 0; + chipset->suffix[3] = 0; + chipset->suffix[4] = 0; + } + break; + } + } + break; + case cpuinfo_arm_chipset_series_rockchip_rk: + if (chipset->model == 3288) { + /* Common bug: Rockchip RK3399 (Hexa-core) always reported as RK3288 (Quad-core) */ + switch (cores) { + case 4: + break; + case 6: + cpuinfo_log_info("reinterpreted RK3288 chipset with 6 cores as RK3399"); + chipset->model = 3399; + break; + default: + cpuinfo_log_warning("system reported invalid %"PRIu32"-core RK3288 chipset", cores); + chipset->model = 0; + } + } + break; + default: + break; + } +} + +/* Map from ARM chipset vendor ID to its string representation */ +static const char* chipset_vendor_string[cpuinfo_arm_chipset_vendor_max] = { + [cpuinfo_arm_chipset_vendor_unknown] = "Unknown", + [cpuinfo_arm_chipset_vendor_qualcomm] = "Qualcomm", + [cpuinfo_arm_chipset_vendor_mediatek] = "MediaTek", + [cpuinfo_arm_chipset_vendor_samsung] = "Samsung", + [cpuinfo_arm_chipset_vendor_hisilicon] = "HiSilicon", + [cpuinfo_arm_chipset_vendor_actions] = "Actions", + [cpuinfo_arm_chipset_vendor_allwinner] = "Allwinner", + [cpuinfo_arm_chipset_vendor_amlogic] = "Amlogic", + [cpuinfo_arm_chipset_vendor_broadcom] = "Broadcom", + [cpuinfo_arm_chipset_vendor_lg] = "LG", + [cpuinfo_arm_chipset_vendor_leadcore] = "Leadcore", + [cpuinfo_arm_chipset_vendor_marvell] = "Marvell", + [cpuinfo_arm_chipset_vendor_mstar] = "MStar", + [cpuinfo_arm_chipset_vendor_novathor] = "NovaThor", + [cpuinfo_arm_chipset_vendor_nvidia] = "Nvidia", + [cpuinfo_arm_chipset_vendor_pinecone] = "Pinecone", + [cpuinfo_arm_chipset_vendor_renesas] = "Renesas", + [cpuinfo_arm_chipset_vendor_rockchip] = "Rockchip", + [cpuinfo_arm_chipset_vendor_spreadtrum] = "Spreadtrum", + [cpuinfo_arm_chipset_vendor_telechips] = "Telechips", + [cpuinfo_arm_chipset_vendor_texas_instruments] = "Texas Instruments", + [cpuinfo_arm_chipset_vendor_wondermedia] = "WonderMedia", +}; + +/* Map from ARM chipset series ID to its string representation */ +static const char* chipset_series_string[cpuinfo_arm_chipset_series_max] = { + [cpuinfo_arm_chipset_series_unknown] = NULL, + [cpuinfo_arm_chipset_series_qualcomm_qsd] = "QSD", + [cpuinfo_arm_chipset_series_qualcomm_msm] = "MSM", + [cpuinfo_arm_chipset_series_qualcomm_apq] = "APQ", + [cpuinfo_arm_chipset_series_qualcomm_snapdragon] = "Snapdragon ", + [cpuinfo_arm_chipset_series_mediatek_mt] = "MT", + [cpuinfo_arm_chipset_series_samsung_exynos] = "Exynos ", + [cpuinfo_arm_chipset_series_hisilicon_k3v] = "K3V", + [cpuinfo_arm_chipset_series_hisilicon_hi] = "Hi", + [cpuinfo_arm_chipset_series_hisilicon_kirin] = "Kirin ", + [cpuinfo_arm_chipset_series_actions_atm] = "ATM", + [cpuinfo_arm_chipset_series_allwinner_a] = "A", + [cpuinfo_arm_chipset_series_amlogic_aml] = "AML", + [cpuinfo_arm_chipset_series_amlogic_s] = "S", + [cpuinfo_arm_chipset_series_broadcom_bcm] = "BCM", + [cpuinfo_arm_chipset_series_lg_nuclun] = "Nuclun ", + [cpuinfo_arm_chipset_series_leadcore_lc] = "LC", + [cpuinfo_arm_chipset_series_marvell_pxa] = "PXA", + [cpuinfo_arm_chipset_series_mstar_6a] = "6A", + [cpuinfo_arm_chipset_series_novathor_u] = "U", + [cpuinfo_arm_chipset_series_nvidia_tegra_t] = "Tegra T", + [cpuinfo_arm_chipset_series_nvidia_tegra_ap] = "Tegra AP", + [cpuinfo_arm_chipset_series_nvidia_tegra_sl] = "Tegra SL", + [cpuinfo_arm_chipset_series_pinecone_surge_s] = "Surge S", + [cpuinfo_arm_chipset_series_renesas_mp] = "MP", + [cpuinfo_arm_chipset_series_rockchip_rk] = "RK", + [cpuinfo_arm_chipset_series_spreadtrum_sc] = "SC", + [cpuinfo_arm_chipset_series_telechips_tcc] = "TCC", + [cpuinfo_arm_chipset_series_texas_instruments_omap] = "OMAP", + [cpuinfo_arm_chipset_series_wondermedia_wm] = "WM", +}; + +/* Convert chipset name represented by cpuinfo_arm_chipset structure to a string representation */ +void cpuinfo_arm_chipset_to_string( + const struct cpuinfo_arm_chipset chipset[restrict static 1], + char name[restrict static CPUINFO_ARM_CHIPSET_NAME_MAX]) +{ + enum cpuinfo_arm_chipset_vendor vendor = chipset->vendor; + if (vendor >= cpuinfo_arm_chipset_vendor_max) { + vendor = cpuinfo_arm_chipset_vendor_unknown; + } + enum cpuinfo_arm_chipset_series series = chipset->series; + if (series >= cpuinfo_arm_chipset_series_max) { + series = cpuinfo_arm_chipset_series_unknown; + } + const char* vendor_string = chipset_vendor_string[vendor]; + const char* series_string = chipset_series_string[series]; + const uint32_t model = chipset->model; + if (model == 0) { + if (series == cpuinfo_arm_chipset_series_unknown) { + strncpy(name, vendor_string, CPUINFO_ARM_CHIPSET_NAME_MAX); + } else { + snprintf(name, CPUINFO_ARM_CHIPSET_NAME_MAX, + "%s %s", vendor_string, series_string); + } + } else { + const size_t suffix_length = strnlen(chipset->suffix, CPUINFO_ARM_CHIPSET_SUFFIX_MAX); + snprintf(name, CPUINFO_ARM_CHIPSET_NAME_MAX, + "%s %s%"PRIu32"%.*s", vendor_string, series_string, model, (int) suffix_length, chipset->suffix); + } +} + +#ifdef __ANDROID__ + static inline struct cpuinfo_arm_chipset disambiguate_qualcomm_chipset( + const struct cpuinfo_arm_chipset proc_cpuinfo_hardware_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_product_board_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_board_platform_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_chipname_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_hardware_chipname_chipset[restrict static 1]) + { + if (ro_hardware_chipname_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_hardware_chipname_chipset; + } + if (ro_chipname_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_chipname_chipset; + } + if (proc_cpuinfo_hardware_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *proc_cpuinfo_hardware_chipset; + } + if (ro_product_board_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_product_board_chipset; + } + return *ro_board_platform_chipset; + } + + static inline struct cpuinfo_arm_chipset disambiguate_mediatek_chipset( + const struct cpuinfo_arm_chipset proc_cpuinfo_hardware_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_product_board_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_board_platform_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_mediatek_platform_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_chipname_chipset[restrict static 1]) + { + if (ro_chipname_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_chipname_chipset; + } + if (proc_cpuinfo_hardware_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *proc_cpuinfo_hardware_chipset; + } + if (ro_product_board_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_product_board_chipset; + } + if (ro_board_platform_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_board_platform_chipset; + } + return *ro_mediatek_platform_chipset; + } + + static inline struct cpuinfo_arm_chipset disambiguate_hisilicon_chipset( + const struct cpuinfo_arm_chipset proc_cpuinfo_hardware_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_product_board_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_board_platform_chipset[restrict static 1]) + { + if (proc_cpuinfo_hardware_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *proc_cpuinfo_hardware_chipset; + } + if (ro_product_board_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_product_board_chipset; + } + return *ro_board_platform_chipset; + } + + static inline struct cpuinfo_arm_chipset disambiguate_amlogic_chipset( + const struct cpuinfo_arm_chipset proc_cpuinfo_hardware_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_board_platform_chipset[restrict static 1]) + { + if (proc_cpuinfo_hardware_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *proc_cpuinfo_hardware_chipset; + } + return *ro_board_platform_chipset; + } + + static inline struct cpuinfo_arm_chipset disambiguate_marvell_chipset( + const struct cpuinfo_arm_chipset proc_cpuinfo_hardware_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_product_board_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_chipname_chipset[restrict static 1]) + { + if (ro_chipname_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_chipname_chipset; + } + if (ro_product_board_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_product_board_chipset; + } + return *proc_cpuinfo_hardware_chipset; + } + + static inline struct cpuinfo_arm_chipset disambiguate_rockchip_chipset( + const struct cpuinfo_arm_chipset proc_cpuinfo_hardware_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_product_board_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_board_platform_chipset[restrict static 1]) + { + if (ro_product_board_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_product_board_chipset; + } + if (proc_cpuinfo_hardware_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *proc_cpuinfo_hardware_chipset; + } + return *ro_board_platform_chipset; + } + + static inline struct cpuinfo_arm_chipset disambiguate_spreadtrum_chipset( + const struct cpuinfo_arm_chipset proc_cpuinfo_hardware_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_product_board_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_board_platform_chipset[restrict static 1], + const struct cpuinfo_arm_chipset ro_chipname_chipset[restrict static 1]) + { + if (ro_chipname_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_chipname_chipset; + } + if (ro_product_board_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *ro_product_board_chipset; + } + if (proc_cpuinfo_hardware_chipset->series != cpuinfo_arm_chipset_series_unknown) { + return *proc_cpuinfo_hardware_chipset; + } + return *ro_board_platform_chipset; + } + + /* + * Decodes chipset name from Android system properties: + * - /proc/cpuinfo Hardware string + * - ro.product.board + * - ro.board.platform + * - ro.mediatek.platform + * - ro.chipname + * For some chipsets, the function relies frequency and on number of cores for chipset detection. + * + * @param[in] properties - structure with the Android system properties described above. + * @param cores - number of cores in the chipset. + * @param max_cpu_freq_max - maximum of /sys/devices/system/cpu/cpu/cpofreq/cpu_freq_max values. + * + * @returns Decoded chipset name. If chipset could not be decoded, the resulting structure would use `unknown` vendor + * and series identifiers. + */ + struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset( + const struct cpuinfo_android_properties properties[restrict static 1], + uint32_t cores, + uint32_t max_cpu_freq_max) + { + struct cpuinfo_arm_chipset chipset = { + .vendor = cpuinfo_arm_chipset_vendor_unknown, + .series = cpuinfo_arm_chipset_series_unknown, + }; + + const bool tegra_platform = is_tegra( + properties->ro_board_platform, + properties->ro_board_platform + strnlen(properties->ro_board_platform, CPUINFO_BUILD_PROP_VALUE_MAX)); + + struct cpuinfo_arm_chipset chipsets[cpuinfo_android_chipset_property_max] = { + [cpuinfo_android_chipset_property_proc_cpuinfo_hardware] = + cpuinfo_arm_linux_decode_chipset_from_proc_cpuinfo_hardware( + properties->proc_cpuinfo_hardware, cores, max_cpu_freq_max, tegra_platform), + [cpuinfo_android_chipset_property_ro_product_board] = + cpuinfo_arm_android_decode_chipset_from_ro_product_board( + properties->ro_product_board, cores, max_cpu_freq_max), + [cpuinfo_android_chipset_property_ro_board_platform] = + cpuinfo_arm_android_decode_chipset_from_ro_board_platform( + properties->ro_board_platform, cores, max_cpu_freq_max), + [cpuinfo_android_chipset_property_ro_mediatek_platform] = + cpuinfo_arm_android_decode_chipset_from_ro_mediatek_platform(properties->ro_mediatek_platform), + [cpuinfo_android_chipset_property_ro_arch] = + cpuinfo_arm_android_decode_chipset_from_ro_arch(properties->ro_arch), + [cpuinfo_android_chipset_property_ro_chipname] = + cpuinfo_arm_android_decode_chipset_from_ro_chipname(properties->ro_chipname), + [cpuinfo_android_chipset_property_ro_hardware_chipname] = + cpuinfo_arm_android_decode_chipset_from_ro_chipname(properties->ro_hardware_chipname), + }; + enum cpuinfo_arm_chipset_vendor vendor = cpuinfo_arm_chipset_vendor_unknown; + for (size_t i = 0; i < cpuinfo_android_chipset_property_max; i++) { + const enum cpuinfo_arm_chipset_vendor decoded_vendor = chipsets[i].vendor; + if (decoded_vendor != cpuinfo_arm_chipset_vendor_unknown) { + if (vendor == cpuinfo_arm_chipset_vendor_unknown) { + vendor = decoded_vendor; + } else if (vendor != decoded_vendor) { + /* Parsing different system properties produces different chipset vendors. This situation is rare. */ + cpuinfo_log_error( + "chipset detection failed: different chipset vendors reported in different system properties"); + goto finish; + } + } + } + if (vendor == cpuinfo_arm_chipset_vendor_unknown) { + cpuinfo_log_warning( + "chipset detection failed: none of the system properties matched known signatures"); + goto finish; + } + + /* Fix common bugs in reported chipsets */ + for (size_t i = 0; i < cpuinfo_android_chipset_property_max; i++) { + cpuinfo_arm_fixup_chipset(&chipsets[i], cores, max_cpu_freq_max); + } + + /* + * Propagate suffixes: consider all pairs of chipsets, if both chipsets in the pair are from the same series, + * and one's suffix is a prefix of another's chipset suffix, use the longest suffix. + */ + for (size_t i = 0; i < cpuinfo_android_chipset_property_max; i++) { + const size_t chipset_i_suffix_length = strnlen(chipsets[i].suffix, CPUINFO_ARM_CHIPSET_SUFFIX_MAX); + for (size_t j = 0; j < i; j++) { + if (chipsets[i].series == chipsets[j].series) { + const size_t chipset_j_suffix_length = strnlen(chipsets[j].suffix, CPUINFO_ARM_CHIPSET_SUFFIX_MAX); + if (chipset_i_suffix_length != chipset_j_suffix_length) { + const size_t common_prefix_length = (chipset_i_suffix_length < chipset_j_suffix_length) ? + chipset_i_suffix_length : chipset_j_suffix_length; + if (common_prefix_length == 0 || + memcmp(chipsets[i].suffix, chipsets[j].suffix, common_prefix_length) == 0) + { + if (chipset_i_suffix_length > chipset_j_suffix_length) { + memcpy(chipsets[j].suffix, chipsets[i].suffix, chipset_i_suffix_length); + } else { + memcpy(chipsets[i].suffix, chipsets[j].suffix, chipset_j_suffix_length); + } + } + } + } + } + } + + for (size_t i = 0; i < cpuinfo_android_chipset_property_max; i++) { + if (chipsets[i].series != cpuinfo_arm_chipset_series_unknown) { + if (chipset.series == cpuinfo_arm_chipset_series_unknown) { + chipset = chipsets[i]; + } else if (chipsets[i].series != chipset.series || chipsets[i].model != chipset.model || + strncmp(chipsets[i].suffix, chipset.suffix, CPUINFO_ARM_CHIPSET_SUFFIX_MAX) != 0) + { + cpuinfo_log_info( + "different chipsets reported in different system properties; " + "vendor-specific disambiguation heuristic would be used"); + switch (vendor) { + case cpuinfo_arm_chipset_vendor_qualcomm: + return disambiguate_qualcomm_chipset( + &chipsets[cpuinfo_android_chipset_property_proc_cpuinfo_hardware], + &chipsets[cpuinfo_android_chipset_property_ro_product_board], + &chipsets[cpuinfo_android_chipset_property_ro_board_platform], + &chipsets[cpuinfo_android_chipset_property_ro_chipname], + &chipsets[cpuinfo_android_chipset_property_ro_hardware_chipname]); + case cpuinfo_arm_chipset_vendor_mediatek: + return disambiguate_mediatek_chipset( + &chipsets[cpuinfo_android_chipset_property_proc_cpuinfo_hardware], + &chipsets[cpuinfo_android_chipset_property_ro_product_board], + &chipsets[cpuinfo_android_chipset_property_ro_board_platform], + &chipsets[cpuinfo_android_chipset_property_ro_mediatek_platform], + &chipsets[cpuinfo_android_chipset_property_ro_chipname]); + case cpuinfo_arm_chipset_vendor_hisilicon: + return disambiguate_hisilicon_chipset( + &chipsets[cpuinfo_android_chipset_property_proc_cpuinfo_hardware], + &chipsets[cpuinfo_android_chipset_property_ro_product_board], + &chipsets[cpuinfo_android_chipset_property_ro_board_platform]); + case cpuinfo_arm_chipset_vendor_amlogic: + return disambiguate_amlogic_chipset( + &chipsets[cpuinfo_android_chipset_property_proc_cpuinfo_hardware], + &chipsets[cpuinfo_android_chipset_property_ro_board_platform]); + case cpuinfo_arm_chipset_vendor_marvell: + return disambiguate_marvell_chipset( + &chipsets[cpuinfo_android_chipset_property_proc_cpuinfo_hardware], + &chipsets[cpuinfo_android_chipset_property_ro_product_board], + &chipsets[cpuinfo_android_chipset_property_ro_chipname]); + case cpuinfo_arm_chipset_vendor_rockchip: + return disambiguate_rockchip_chipset( + &chipsets[cpuinfo_android_chipset_property_proc_cpuinfo_hardware], + &chipsets[cpuinfo_android_chipset_property_ro_product_board], + &chipsets[cpuinfo_android_chipset_property_ro_board_platform]); + case cpuinfo_arm_chipset_vendor_spreadtrum: + return disambiguate_spreadtrum_chipset( + &chipsets[cpuinfo_android_chipset_property_proc_cpuinfo_hardware], + &chipsets[cpuinfo_android_chipset_property_ro_product_board], + &chipsets[cpuinfo_android_chipset_property_ro_board_platform], + &chipsets[cpuinfo_android_chipset_property_ro_chipname]); + default: + cpuinfo_log_error( + "chipset detection failed: " + "could not disambiguate different chipsets reported in different system properties"); + /* chipset variable contains valid, but inconsistent chipset information, overwrite it */ + chipset = (struct cpuinfo_arm_chipset) { + .vendor = cpuinfo_arm_chipset_vendor_unknown, + .series = cpuinfo_arm_chipset_series_unknown, + }; + goto finish; + } + } + } + } + + finish: + return chipset; + } +#else /* !defined(__ANDROID__) */ + /* + * Fix commonly misreported Broadcom BCM models on Raspberry Pi boards. + * + * @param[in,out] chipset - chipset name to fix. + * @param[in] revision - /proc/cpuinfo Revision string. + */ + void cpuinfo_arm_fixup_raspberry_pi_chipset( + struct cpuinfo_arm_chipset chipset[restrict static 1], + const char revision[restrict static CPUINFO_HARDWARE_VALUE_MAX]) + { + const size_t revision_length = strnlen(revision, CPUINFO_REVISION_VALUE_MAX); + + /* Parse revision codes according to https://www.raspberrypi.org/documentation/hardware/raspberrypi/revision-codes/README.md */ + #if CPUINFO_ARCH_ARM + if (revision_length == 4) { + /* + * Old-style revision codes. + * All Raspberry Pi models with old-style revision code use Broadcom BCM2835. + */ + + /* BCM2835 often misreported as BCM2708 */ + if (chipset->model == 2708) { + chipset->model = 2835; + } + return; + } + #endif + if ((size_t) (revision_length - 5) <= (size_t) (8 - 5) /* 5 <= length(revision) <= 8 */) { + /* New-style revision codes */ + + uint32_t model = 0; + switch (revision[revision_length - 4]) { + case '0': + /* BCM2835 */ + model = 2835; + break; + case '1': + /* BCM2836 */ + model = 2836; + break; + case '2': + /* BCM2837 */ + model = 2837; + break; + case '3': + /* BCM2711 */ + model = 2711; + break; + } + + if (model != 0) { + chipset->model = model; + chipset->suffix[0] = 0; + } + } + } + + /* + * Decodes chipset name from /proc/cpuinfo Hardware string. + * For some chipsets, the function relies frequency and on number of cores for chipset detection. + * + * @param[in] hardware - /proc/cpuinfo Hardware string. + * @param cores - number of cores in the chipset. + * @param max_cpu_freq_max - maximum of /sys/devices/system/cpu/cpu/cpofreq/cpu_freq_max values. + * + * @returns Decoded chipset name. If chipset could not be decoded, the resulting structure would use `unknown` vendor + * and series identifiers. + */ + struct cpuinfo_arm_chipset cpuinfo_arm_linux_decode_chipset( + const char hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX], + const char revision[restrict static CPUINFO_REVISION_VALUE_MAX], + uint32_t cores, + uint32_t max_cpu_freq_max) + { + struct cpuinfo_arm_chipset chipset = + cpuinfo_arm_linux_decode_chipset_from_proc_cpuinfo_hardware( + hardware, cores, max_cpu_freq_max, false); + if (chipset.vendor == cpuinfo_arm_chipset_vendor_unknown) { + cpuinfo_log_warning( + "chipset detection failed: /proc/cpuinfo Hardware string did not match known signatures"); + } else if (chipset.vendor == cpuinfo_arm_chipset_vendor_broadcom) { + /* Raspberry Pi kernel reports bogus chipset models; detect chipset from RPi revision */ + cpuinfo_arm_fixup_raspberry_pi_chipset(&chipset, revision); + } else { + cpuinfo_arm_fixup_chipset(&chipset, cores, max_cpu_freq_max); + } + return chipset; + } + +#endif diff --git a/source/3rdparty/cpuinfo/src/arm/linux/clusters.c b/source/3rdparty/cpuinfo/src/arm/linux/clusters.c new file mode 100644 index 0000000..430773d --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/clusters.c @@ -0,0 +1,493 @@ +#include +#include +#include +#include + +#include +#include +#if defined(__ANDROID__) + #include +#endif +#include +#include +#include +#include +#include + +static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) { + return (bitfield & mask) == mask; +} + +/* + * Assigns logical processors to clusters of cores using heuristic based on the typical configuration of clusters for + * 5, 6, 8, and 10 cores: + * - 5 cores (ARM32 Android only): 2 clusters of 4+1 cores + * - 6 cores: 2 clusters of 4+2 cores + * - 8 cores: 2 clusters of 4+4 cores + * - 10 cores: 3 clusters of 4+4+2 cores + * + * The function must be called after parsing OS-provided information on core clusters. + * Its purpose is to detect clusters of cores when OS-provided information is lacking or incomplete, i.e. + * - Linux kernel is not configured to report information in sysfs topology leaf. + * - Linux kernel reports topology information only for online cores, and only cores on one cluster are online, e.g.: + * - Exynos 8890 has 8 cores in 4+4 clusters, but only the first cluster of 4 cores is reported, and cluster + * configuration of logical processors 4-7 is not reported (all remaining processors 4-7 form cluster 1) + * - MT6797 has 10 cores in 4+4+2, but only the first cluster of 4 cores is reported, and cluster configuration + * of logical processors 4-9 is not reported (processors 4-7 form cluster 1, and processors 8-9 form cluster 2). + * + * Heuristic assignment of processors to the above pre-defined clusters fails if such assignment would contradict + * information provided by the operating system: + * - Any of the OS-reported processor clusters is different than the corresponding heuristic cluster. + * - Processors in a heuristic cluster have no OS-provided cluster siblings information, but have known and different + * minimum/maximum frequency. + * - Processors in a heuristic cluster have no OS-provided cluster siblings information, but have known and different + * MIDR components. + * + * If the heuristic assignment of processors to clusters of cores fails, all processors' clusters are unchanged. + * + * @param usable_processors - number of processors in the @p processors array with CPUINFO_LINUX_FLAG_VALID flags. + * @param max_processors - number of elements in the @p processors array. + * @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE and PRESENT flags, minimum/maximum + * frequency, MIDR information, and core cluster (package siblings list) information. + * + * @retval true if the heuristic successfully assigned all processors into clusters of cores. + * @retval false if known details about processors contradict the heuristic configuration of core clusters. + */ +bool cpuinfo_arm_linux_detect_core_clusters_by_heuristic( + uint32_t usable_processors, + uint32_t max_processors, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors]) +{ + uint32_t cluster_processors[3]; + switch (usable_processors) { + case 10: + cluster_processors[0] = 4; + cluster_processors[1] = 4; + cluster_processors[2] = 2; + break; + case 8: + cluster_processors[0] = 4; + cluster_processors[1] = 4; + break; + case 6: + cluster_processors[0] = 4; + cluster_processors[1] = 2; + break; +#if defined(__ANDROID__) && CPUINFO_ARCH_ARM + case 5: + /* + * The only processor with 5 cores is Leadcore L1860C (ARMv7, mobile), + * but this configuration is not too unreasonable for a virtualized ARM server. + */ + cluster_processors[0] = 4; + cluster_processors[1] = 1; + break; +#endif + default: + return false; + } + + /* + * Assignment of processors to core clusters is done in two passes: + * 1. Verify that the clusters proposed by heuristic are compatible with known details about processors. + * 2. If verification passed, update core clusters for the processors. + */ + + uint32_t cluster = 0; + uint32_t expected_cluster_processors = 0; + uint32_t cluster_start, cluster_flags, cluster_midr, cluster_max_frequency, cluster_min_frequency; + bool expected_cluster_exists; + for (uint32_t i = 0; i < max_processors; i++) { + if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + if (expected_cluster_processors == 0) { + /* Expect this processor to start a new cluster */ + + expected_cluster_exists = !!(processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER); + if (expected_cluster_exists) { + if (processors[i].package_leader_id != i) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "processor %"PRIu32" is expected to start a new cluster #%"PRIu32" with %"PRIu32" cores, " + "but system siblings lists reported it as a sibling of processor %"PRIu32, + i, cluster, cluster_processors[cluster], processors[i].package_leader_id); + return false; + } + } else { + cluster_flags = 0; + } + + cluster_start = i; + expected_cluster_processors = cluster_processors[cluster++]; + } else { + /* Expect this processor to belong to the same cluster as processor */ + + if (expected_cluster_exists) { + /* + * The cluster suggested by the heuristic was already parsed from system siblings lists. + * For all processors we expect in the cluster, check that: + * - They have pre-assigned cluster from siblings lists (CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER flag). + * - They were assigned to the same cluster based on siblings lists + * (package_leader_id points to the first processor in the cluster). + */ + + if ((processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) == 0) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "processor %"PRIu32" is expected to belong to the cluster of processor %"PRIu32", " + "but system siblings lists did not report it as a sibling of processor %"PRIu32, + i, cluster_start, cluster_start); + return false; + } + if (processors[i].package_leader_id != cluster_start) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "processor %"PRIu32" is expected to belong to the cluster of processor %"PRIu32", " + "but system siblings lists reported it to belong to the cluster of processor %"PRIu32, + i, cluster_start, cluster_start); + return false; + } + } else { + /* + * The cluster suggest by the heuristic was not parsed from system siblings lists. + * For all processors we expect in the cluster, check that: + * - They have no pre-assigned cluster from siblings lists. + * - If their min/max CPU frequency is known, it is the same. + * - If any part of their MIDR (Implementer, Variant, Part, Revision) is known, it is the same. + */ + + if (processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "processor %"PRIu32" is expected to be unassigned to any cluster, " + "but system siblings lists reported it to belong to the cluster of processor %"PRIu32, + i, processors[i].package_leader_id); + return false; + } + + if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) { + if (cluster_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) { + if (cluster_min_frequency != processors[i].min_frequency) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "minimum frequency of processor %"PRIu32" (%"PRIu32" KHz) is different than of its expected cluster (%"PRIu32" KHz)", + i, processors[i].min_frequency, cluster_min_frequency); + return false; + } + } else { + cluster_min_frequency = processors[i].min_frequency; + cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY; + } + } + + if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + if (cluster_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + if (cluster_max_frequency != processors[i].max_frequency) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "maximum frequency of processor %"PRIu32" (%"PRIu32" KHz) is different than of its expected cluster (%"PRIu32" KHz)", + i, processors[i].max_frequency, cluster_max_frequency); + return false; + } + } else { + cluster_max_frequency = processors[i].max_frequency; + cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY; + } + } + + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) { + if (cluster_flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) { + if ((cluster_midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK)) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "CPU Implementer of processor %"PRIu32" (0x%02"PRIx32") is different than of its expected cluster (0x%02"PRIx32")", + i, midr_get_implementer(processors[i].midr), midr_get_implementer(cluster_midr)); + return false; + } + } else { + cluster_midr = midr_copy_implementer(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER; + } + } + + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) { + if (cluster_flags & CPUINFO_ARM_LINUX_VALID_VARIANT) { + if ((cluster_midr & CPUINFO_ARM_MIDR_VARIANT_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_VARIANT_MASK)) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "CPU Variant of processor %"PRIu32" (0x%"PRIx32") is different than of its expected cluster (0x%"PRIx32")", + i, midr_get_variant(processors[i].midr), midr_get_variant(cluster_midr)); + return false; + } + } else { + cluster_midr = midr_copy_variant(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT; + } + } + + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) { + if (cluster_flags & CPUINFO_ARM_LINUX_VALID_PART) { + if ((cluster_midr & CPUINFO_ARM_MIDR_PART_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_PART_MASK)) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "CPU Part of processor %"PRIu32" (0x%03"PRIx32") is different than of its expected cluster (0x%03"PRIx32")", + i, midr_get_part(processors[i].midr), midr_get_part(cluster_midr)); + return false; + } + } else { + cluster_midr = midr_copy_part(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART; + } + } + + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) { + if (cluster_flags & CPUINFO_ARM_LINUX_VALID_REVISION) { + if ((cluster_midr & CPUINFO_ARM_MIDR_REVISION_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_REVISION_MASK)) { + cpuinfo_log_debug( + "heuristic detection of core clusters failed: " + "CPU Revision of processor %"PRIu32" (0x%"PRIx32") is different than of its expected cluster (0x%"PRIx32")", + i, midr_get_revision(cluster_midr), midr_get_revision(processors[i].midr)); + return false; + } + } else { + cluster_midr = midr_copy_revision(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION; + } + } + } + } + expected_cluster_processors--; + } + } + + /* Verification passed, assign all processors to new clusters */ + cluster = 0; + expected_cluster_processors = 0; + for (uint32_t i = 0; i < max_processors; i++) { + if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + if (expected_cluster_processors == 0) { + /* Expect this processor to start a new cluster */ + + cluster_start = i; + expected_cluster_processors = cluster_processors[cluster++]; + } else { + /* Expect this processor to belong to the same cluster as processor */ + + if (!(processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) { + cpuinfo_log_debug("assigned processor %"PRIu32" to cluster of processor %"PRIu32" based on heuristic", + i, cluster_start); + } + + processors[i].package_leader_id = cluster_start; + processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER; + } + expected_cluster_processors--; + } + } + return true; +} + +/* + * Assigns logical processors to clusters of cores in sequential manner: + * - Clusters detected from OS-provided information are unchanged: + * - Processors assigned to these clusters stay assigned to the same clusters + * - No new processors are added to these clusters + * - Processors without pre-assigned cluster are clustered in one sequential scan: + * - If known details (min/max frequency, MIDR components) of a processor are compatible with a preceding + * processor, without pre-assigned cluster, the processor is assigned to the cluster of the preceding processor. + * - If known details (min/max frequency, MIDR components) of a processor are not compatible with a preceding + * processor, the processor is assigned to a newly created cluster. + * + * The function must be called after parsing OS-provided information on core clusters, and usually is called only + * if heuristic assignment of processors to clusters (cpuinfo_arm_linux_cluster_processors_by_heuristic) failed. + * + * Its purpose is to detect clusters of cores when OS-provided information is lacking or incomplete, i.e. + * - Linux kernel is not configured to report information in sysfs topology leaf. + * - Linux kernel reports topology information only for online cores, and all cores on some of the clusters are offline. + * + * Sequential assignment of processors to clusters always succeeds, and upon exit, all usable processors in the + * @p processors array have cluster information. + * + * @param max_processors - number of elements in the @p processors array. + * @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE and PRESENT flags, minimum/maximum + * frequency, MIDR information, and core cluster (package siblings list) information. + * + * @retval true if the heuristic successfully assigned all processors into clusters of cores. + * @retval false if known details about processors contradict the heuristic configuration of core clusters. + */ +void cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan( + uint32_t max_processors, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors]) +{ + uint32_t cluster_flags = 0; + uint32_t cluster_processors = 0; + uint32_t cluster_start, cluster_midr, cluster_max_frequency, cluster_min_frequency; + for (uint32_t i = 0; i < max_processors; i++) { + if ((processors[i].flags & (CPUINFO_LINUX_FLAG_VALID | CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) == CPUINFO_LINUX_FLAG_VALID) { + if (cluster_processors == 0) { + goto new_cluster; + } + + if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) { + if (cluster_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) { + if (cluster_min_frequency != processors[i].min_frequency) { + cpuinfo_log_info( + "minimum frequency of processor %"PRIu32" (%"PRIu32" KHz) is different than of preceding cluster (%"PRIu32" KHz); " + "processor %"PRIu32" starts to a new cluster", + i, processors[i].min_frequency, cluster_min_frequency, i); + goto new_cluster; + } + } else { + cluster_min_frequency = processors[i].min_frequency; + cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY; + } + } + + if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + if (cluster_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + if (cluster_max_frequency != processors[i].max_frequency) { + cpuinfo_log_debug( + "maximum frequency of processor %"PRIu32" (%"PRIu32" KHz) is different than of preceding cluster (%"PRIu32" KHz); " + "processor %"PRIu32" starts a new cluster", + i, processors[i].max_frequency, cluster_max_frequency, i); + goto new_cluster; + } + } else { + cluster_max_frequency = processors[i].max_frequency; + cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY; + } + } + + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) { + if (cluster_flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) { + if ((cluster_midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK)) { + cpuinfo_log_debug( + "CPU Implementer of processor %"PRIu32" (0x%02"PRIx32") is different than of preceding cluster (0x%02"PRIx32"); " + "processor %"PRIu32" starts to a new cluster", + i, midr_get_implementer(processors[i].midr), midr_get_implementer(cluster_midr), i); + goto new_cluster; + } + } else { + cluster_midr = midr_copy_implementer(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER; + } + } + + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) { + if (cluster_flags & CPUINFO_ARM_LINUX_VALID_VARIANT) { + if ((cluster_midr & CPUINFO_ARM_MIDR_VARIANT_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_VARIANT_MASK)) { + cpuinfo_log_debug( + "CPU Variant of processor %"PRIu32" (0x%"PRIx32") is different than of its expected cluster (0x%"PRIx32")" + "processor %"PRIu32" starts to a new cluster", + i, midr_get_variant(processors[i].midr), midr_get_variant(cluster_midr), i); + goto new_cluster; + } + } else { + cluster_midr = midr_copy_variant(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT; + } + } + + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) { + if (cluster_flags & CPUINFO_ARM_LINUX_VALID_PART) { + if ((cluster_midr & CPUINFO_ARM_MIDR_PART_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_PART_MASK)) { + cpuinfo_log_debug( + "CPU Part of processor %"PRIu32" (0x%03"PRIx32") is different than of its expected cluster (0x%03"PRIx32")" + "processor %"PRIu32" starts to a new cluster", + i, midr_get_part(processors[i].midr), midr_get_part(cluster_midr), i); + goto new_cluster; + } + } else { + cluster_midr = midr_copy_part(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART; + } + } + + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) { + if (cluster_flags & CPUINFO_ARM_LINUX_VALID_REVISION) { + if ((cluster_midr & CPUINFO_ARM_MIDR_REVISION_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_REVISION_MASK)) { + cpuinfo_log_debug( + "CPU Revision of processor %"PRIu32" (0x%"PRIx32") is different than of its expected cluster (0x%"PRIx32")" + "processor %"PRIu32" starts to a new cluster", + i, midr_get_revision(cluster_midr), midr_get_revision(processors[i].midr), i); + goto new_cluster; + } + } else { + cluster_midr = midr_copy_revision(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION; + } + } + + /* All checks passed, attach processor to the preceding cluster */ + cluster_processors++; + processors[i].package_leader_id = cluster_start; + processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER; + cpuinfo_log_debug("assigned processor %"PRIu32" to preceding cluster of processor %"PRIu32, i, cluster_start); + continue; + +new_cluster: + /* Create a new cluster starting with processor i */ + cluster_start = i; + processors[i].package_leader_id = i; + processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER; + cluster_processors = 1; + + /* Copy known information from processor to cluster, and set the flags accordingly */ + cluster_flags = 0; + if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) { + cluster_min_frequency = processors[i].min_frequency; + cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY; + } + if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + cluster_max_frequency = processors[i].max_frequency; + cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY; + } + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) { + cluster_midr = midr_copy_implementer(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER; + } + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) { + cluster_midr = midr_copy_variant(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT; + } + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) { + cluster_midr = midr_copy_part(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART; + } + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) { + cluster_midr = midr_copy_revision(cluster_midr, processors[i].midr); + cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION; + } + } + } +} + +/* + * Counts the number of logical processors in each core cluster. + * This function should be called after all processors are assigned to core clusters. + * + * @param max_processors - number of elements in the @p processors array. + * @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE and PRESENT flags, + * and decoded core cluster (package_leader_id) information. + * The function expects the value of processors[i].package_processor_count to be zero. + * Upon return, processors[i].package_processor_count will contain the number of logical + * processors in the respective core cluster. + */ +void cpuinfo_arm_linux_count_cluster_processors( + uint32_t max_processors, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors]) +{ + /* First pass: accumulate the number of processors at the group leader's package_processor_count */ + for (uint32_t i = 0; i < max_processors; i++) { + if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + const uint32_t package_leader_id = processors[i].package_leader_id; + processors[package_leader_id].package_processor_count += 1; + } + } + /* Second pass: copy the package_processor_count from the group leader processor */ + for (uint32_t i = 0; i < max_processors; i++) { + if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + const uint32_t package_leader_id = processors[i].package_leader_id; + processors[i].package_processor_count = processors[package_leader_id].package_processor_count; + } + } +} diff --git a/source/3rdparty/cpuinfo/src/arm/linux/cp.h b/source/3rdparty/cpuinfo/src/arm/linux/cp.h new file mode 100644 index 0000000..63940ec --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/cp.h @@ -0,0 +1,44 @@ +#include + + +#if CPUINFO_MOCK + extern uint32_t cpuinfo_arm_fpsid; + extern uint32_t cpuinfo_arm_mvfr0; + extern uint32_t cpuinfo_arm_wcid; + + static inline uint32_t read_fpsid(void) { + return cpuinfo_arm_fpsid; + } + + static inline uint32_t read_mvfr0(void) { + return cpuinfo_arm_mvfr0; + } + + static inline uint32_t read_wcid(void) { + return cpuinfo_arm_wcid; + } +#else + #if !defined(__ARM_ARCH_7A__) && !defined(__ARM_ARCH_8A__) && !(defined(__ARM_ARCH) && (__ARM_ARCH >= 7)) + /* + * CoProcessor 10 is inaccessible from user mode since ARMv7, + * and clang refuses to compile inline assembly when targeting ARMv7+ + */ + static inline uint32_t read_fpsid(void) { + uint32_t fpsid; + __asm__ __volatile__("MRC p10, 0x7, %[fpsid], cr0, cr0, 0" : [fpsid] "=r" (fpsid)); + return fpsid; + } + + static inline uint32_t read_mvfr0(void) { + uint32_t mvfr0; + __asm__ __volatile__("MRC p10, 0x7, %[mvfr0], cr7, cr0, 0" : [mvfr0] "=r" (mvfr0)); + return mvfr0; + } + #endif + + static inline uint32_t read_wcid(void) { + uint32_t wcid; + __asm__ __volatile__("MRC p1, 0, %[wcid], c0, c0" : [wcid] "=r" (wcid)); + return wcid; + } +#endif diff --git a/source/3rdparty/cpuinfo/src/arm/linux/cpuinfo.c b/source/3rdparty/cpuinfo/src/arm/linux/cpuinfo.c new file mode 100644 index 0000000..90e1631 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/cpuinfo.c @@ -0,0 +1,908 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Size, in chars, of the on-stack buffer used for parsing lines of /proc/cpuinfo. + * This is also the limit on the length of a single line. + */ +#define BUFFER_SIZE 1024 + + +static uint32_t parse_processor_number( + const char* processor_start, + const char* processor_end) +{ + const size_t processor_length = (size_t) (processor_end - processor_start); + + if (processor_length == 0) { + cpuinfo_log_warning("Processor number in /proc/cpuinfo is ignored: string is empty"); + return 0; + } + + uint32_t processor_number = 0; + for (const char* digit_ptr = processor_start; digit_ptr != processor_end; digit_ptr++) { + const uint32_t digit = (uint32_t) (*digit_ptr - '0'); + if (digit > 10) { + cpuinfo_log_warning("non-decimal suffix %.*s in /proc/cpuinfo processor number is ignored", + (int) (processor_end - digit_ptr), digit_ptr); + break; + } + + processor_number = processor_number * 10 + digit; + } + + return processor_number; +} + +/* + * Full list of ARM features reported in /proc/cpuinfo: + * + * * swp - support for SWP instruction (deprecated in ARMv7, can be removed in future) + * * half - support for half-word loads and stores. These instruction are part of ARMv4, + * so no need to check it on supported CPUs. + * * thumb - support for 16-bit Thumb instruction set. Note that BX instruction is detected + * by ARMv4T architecture, not by this flag. + * * 26bit - old CPUs merged 26-bit PC and program status register (flags) into 32-bit PC + * and had special instructions for working with packed PC. Now it is all deprecated. + * * fastmult - most old ARM CPUs could only compute 2 bits of multiplication result per clock + * cycle, but CPUs with M suffix (e.g. ARM7TDMI) could compute 4 bits per cycle. + * Of course, now it makes no sense. + * * fpa - floating point accelerator available. On original ARM ABI all floating-point operations + * generated FPA instructions. If FPA was not available, these instructions generated + * "illegal operation" interrupts, and the OS processed them by emulating the FPA instructions. + * Debian used this ABI before it switched to EABI. Now FPA is deprecated. + * * vfp - vector floating point instructions. Available on most modern CPUs (as part of VFPv3). + * Required by Android ARMv7A ABI and by Ubuntu on ARM. + * Note: there is no flag for VFPv2. + * * edsp - V5E instructions: saturating add/sub and 16-bit x 16-bit -> 32/64-bit multiplications. + * Required on Android, supported by all CPUs in production. + * * java - Jazelle extension. Supported on most CPUs. + * * iwmmxt - Intel/Marvell Wireless MMX instructions. 64-bit integer SIMD. + * Supported on XScale (Since PXA270) and Sheeva (PJ1, PJ4) architectures. + * Note that there is no flag for WMMX2 instructions. + * * crunch - Maverick Crunch instructions. Junk. + * * thumbee - ThumbEE instructions. Almost no documentation is available. + * * neon - NEON instructions (aka Advanced SIMD). MVFR1 register gives more + * fine-grained information on particular supported features, but + * the Linux kernel exports only a single flag for all of them. + * According to ARMv7A docs it also implies the availability of VFPv3 + * (with 32 double-precision registers d0-d31). + * * vfpv3 - VFPv3 instructions. Available on most modern CPUs. Augment VFPv2 by + * conversion to/from integers and load constant instructions. + * Required by Android ARMv7A ABI and by Ubuntu on ARM. + * * vfpv3d16 - VFPv3 instructions with only 16 double-precision registers (d0-d15). + * * tls - software thread ID registers. + * Used by kernel (and likely libc) for efficient implementation of TLS. + * * vfpv4 - fused multiply-add instructions. + * * idiva - DIV instructions available in ARM mode. + * * idivt - DIV instructions available in Thumb mode. + * * vfpd32 - VFP (of any version) with 32 double-precision registers d0-d31. + * * lpae - Large Physical Address Extension (physical address up to 40 bits). + * * evtstrm - generation of Event Stream by timer. + * * aes - AES instructions. + * * pmull - Polinomial Multiplication instructions. + * * sha1 - SHA1 instructions. + * * sha2 - SHA2 instructions. + * * crc32 - CRC32 instructions. + * + * /proc/cpuinfo on ARM is populated in file arch/arm/kernel/setup.c in Linux kernel + * Note that some devices may use patched Linux kernels with different feature names. + * However, the names above were checked on a large number of /proc/cpuinfo listings. + */ +static void parse_features( + const char* features_start, + const char* features_end, + struct cpuinfo_arm_linux_processor processor[restrict static 1]) +{ + const char* feature_start = features_start; + const char* feature_end; + + /* Mark the features as valid */ + processor->flags |= CPUINFO_ARM_LINUX_VALID_FEATURES | CPUINFO_ARM_LINUX_VALID_PROCESSOR; + + do { + feature_end = feature_start + 1; + for (; feature_end != features_end; feature_end++) { + if (*feature_end == ' ') { + break; + } + } + const size_t feature_length = (size_t) (feature_end - feature_start); + + switch (feature_length) { + case 2: + if (memcmp(feature_start, "fp", feature_length) == 0) { +#if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_FP; +#endif +#if CPUINFO_ARCH_ARM + } else if (memcmp(feature_start, "wp", feature_length) == 0) { + /* + * Some AArch64 kernels, including the one on Nexus 5X, + * erroneously report "swp" as "wp" to AArch32 programs + */ + processor->features |= CPUINFO_ARM_LINUX_FEATURE_SWP; +#endif + } else { + goto unexpected; + } + break; + case 3: + if (memcmp(feature_start, "aes", feature_length) == 0) { + #if CPUINFO_ARCH_ARM + processor->features2 |= CPUINFO_ARM_LINUX_FEATURE2_AES; + #elif CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_AES; + #endif +#if CPUINFO_ARCH_ARM + } else if (memcmp(feature_start, "swp", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_SWP; + } else if (memcmp(feature_start, "fpa", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_FPA; + } else if (memcmp(feature_start, "vfp", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_VFP; + } else if (memcmp(feature_start, "tls", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_TLS; +#endif /* CPUINFO_ARCH_ARM */ + } else { + goto unexpected; + } + break; + case 4: + if (memcmp(feature_start, "sha1", feature_length) == 0) { + #if CPUINFO_ARCH_ARM + processor->features2 |= CPUINFO_ARM_LINUX_FEATURE2_SHA1; + #elif CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_SHA1; + #endif + } else if (memcmp(feature_start, "sha2", feature_length) == 0) { + #if CPUINFO_ARCH_ARM + processor->features2 |= CPUINFO_ARM_LINUX_FEATURE2_SHA2; + #elif CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_SHA2; + #endif + } else if (memcmp(feature_start, "fphp", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_FPHP; + #endif + } else if (memcmp(feature_start, "fcma", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_FCMA; + #endif +#if CPUINFO_ARCH_ARM + } else if (memcmp(feature_start, "half", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_HALF; + } else if (memcmp(feature_start, "edsp", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_EDSP; + } else if (memcmp(feature_start, "java", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_JAVA; + } else if (memcmp(feature_start, "neon", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_NEON; + } else if (memcmp(feature_start, "lpae", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_LPAE; + } else if (memcmp(feature_start, "tlsi", feature_length) == 0) { + /* + * Some AArch64 kernels, including the one on Nexus 5X, + * erroneously report "tls" as "tlsi" to AArch32 programs + */ + processor->features |= CPUINFO_ARM_LINUX_FEATURE_TLS; +#endif /* CPUINFO_ARCH_ARM */ + } else { + goto unexpected; + } + break; + case 5: + if (memcmp(feature_start, "pmull", feature_length) == 0) { + #if CPUINFO_ARCH_ARM + processor->features2 |= CPUINFO_ARM_LINUX_FEATURE2_PMULL; + #elif CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_PMULL; + #endif + } else if (memcmp(feature_start, "crc32", feature_length) == 0) { + #if CPUINFO_ARCH_ARM + processor->features2 |= CPUINFO_ARM_LINUX_FEATURE2_CRC32; + #elif CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_CRC32; + #endif + } else if (memcmp(feature_start, "asimd", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_ASIMD; + #endif + } else if (memcmp(feature_start, "cpuid", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_CPUID; + #endif + } else if (memcmp(feature_start, "jscvt", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_JSCVT; + #endif + } else if (memcmp(feature_start, "lrcpc", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_LRCPC; + #endif +#if CPUINFO_ARCH_ARM + } else if (memcmp(feature_start, "thumb", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_THUMB; + } else if (memcmp(feature_start, "26bit", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_26BIT; + } else if (memcmp(feature_start, "vfpv3", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_VFPV3; + } else if (memcmp(feature_start, "vfpv4", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_VFPV4; + } else if (memcmp(feature_start, "idiva", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_IDIVA; + } else if (memcmp(feature_start, "idivt", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_IDIVT; +#endif /* CPUINFO_ARCH_ARM */ + } else { + goto unexpected; + } + break; +#if CPUINFO_ARCH_ARM + case 6: + if (memcmp(feature_start, "iwmmxt", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_IWMMXT; + } else if (memcmp(feature_start, "crunch", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_CRUNCH; + } else if (memcmp(feature_start, "vfpd32", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_VFPD32; + } else { + goto unexpected; + } + break; +#endif /* CPUINFO_ARCH_ARM */ + case 7: + if (memcmp(feature_start, "evtstrm", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_EVTSTRM; + } else if (memcmp(feature_start, "atomics", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_ATOMICS; + #endif + } else if (memcmp(feature_start, "asimdhp", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_ASIMDHP; + #endif +#if CPUINFO_ARCH_ARM + } else if (memcmp(feature_start, "thumbee", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_THUMBEE; +#endif /* CPUINFO_ARCH_ARM */ + } else { + goto unexpected; + } + break; + case 8: + if (memcmp(feature_start, "asimdrdm", feature_length) == 0) { + #if CPUINFO_ARCH_ARM64 + processor->features |= CPUINFO_ARM_LINUX_FEATURE_ASIMDRDM; + #endif +#if CPUINFO_ARCH_ARM + } else if (memcmp(feature_start, "fastmult", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_FASTMULT; + } else if (memcmp(feature_start, "vfpv3d16", feature_length) == 0) { + processor->features |= CPUINFO_ARM_LINUX_FEATURE_VFPV3D16; +#endif /* CPUINFO_ARCH_ARM */ + } else { + goto unexpected; + } + break; + default: + unexpected: + cpuinfo_log_warning("unexpected /proc/cpuinfo feature \"%.*s\" is ignored", + (int) feature_length, feature_start); + break; + } + feature_start = feature_end; + for (; feature_start != features_end; feature_start++) { + if (*feature_start != ' ') { + break; + } + } + } while (feature_start != feature_end); +} + +static void parse_cpu_architecture( + const char* cpu_architecture_start, + const char* cpu_architecture_end, + struct cpuinfo_arm_linux_processor processor[restrict static 1]) +{ + const size_t cpu_architecture_length = (size_t) (cpu_architecture_end - cpu_architecture_start); + /* Early AArch64 kernels report "CPU architecture: AArch64" instead of a numeric value 8 */ + if (cpu_architecture_length == 7) { + if (memcmp(cpu_architecture_start, "AArch64", cpu_architecture_length) == 0) { + processor->midr = midr_set_architecture(processor->midr, UINT32_C(0xF)); + processor->architecture_version = 8; + processor->flags |= CPUINFO_ARM_LINUX_VALID_ARCHITECTURE | CPUINFO_ARM_LINUX_VALID_PROCESSOR; + return; + } + } + + + uint32_t architecture = 0; + const char* cpu_architecture_ptr = cpu_architecture_start; + for (; cpu_architecture_ptr != cpu_architecture_end; cpu_architecture_ptr++) { + const uint32_t digit = (*cpu_architecture_ptr) - '0'; + + /* Verify that CPU architecture is a decimal number */ + if (digit >= 10) { + break; + } + + architecture = architecture * 10 + digit; + } + + if (cpu_architecture_ptr == cpu_architecture_start) { + cpuinfo_log_warning("CPU architecture %.*s in /proc/cpuinfo is ignored due to non-digit at the beginning of the string", + (int) cpu_architecture_length, cpu_architecture_start); + } else { + if (architecture != 0) { + processor->architecture_version = architecture; + processor->flags |= CPUINFO_ARM_LINUX_VALID_ARCHITECTURE | CPUINFO_ARM_LINUX_VALID_PROCESSOR; + + for (; cpu_architecture_ptr != cpu_architecture_end; cpu_architecture_ptr++) { + const char feature = *cpu_architecture_ptr; + switch (feature) { +#if CPUINFO_ARCH_ARM + case 'T': + processor->architecture_flags |= CPUINFO_ARM_LINUX_ARCH_T; + break; + case 'E': + processor->architecture_flags |= CPUINFO_ARM_LINUX_ARCH_E; + break; + case 'J': + processor->architecture_flags |= CPUINFO_ARM_LINUX_ARCH_J; + break; +#endif /* CPUINFO_ARCH_ARM */ + case ' ': + case '\t': + /* Ignore whitespace at the end */ + break; + default: + cpuinfo_log_warning("skipped unknown architectural feature '%c' for ARMv%"PRIu32, + feature, architecture); + break; + } + } + } else { + cpuinfo_log_warning("CPU architecture %.*s in /proc/cpuinfo is ignored due to invalid value (0)", + (int) cpu_architecture_length, cpu_architecture_start); + } + } + + uint32_t midr_architecture = UINT32_C(0xF); +#if CPUINFO_ARCH_ARM + switch (processor->architecture_version) { + case 6: + midr_architecture = UINT32_C(0x7); /* ARMv6 */ + break; + case 5: + if ((processor->architecture_flags & CPUINFO_ARM_LINUX_ARCH_TEJ) == CPUINFO_ARM_LINUX_ARCH_TEJ) { + midr_architecture = UINT32_C(0x6); /* ARMv5TEJ */ + } else if ((processor->architecture_flags & CPUINFO_ARM_LINUX_ARCH_TE) == CPUINFO_ARM_LINUX_ARCH_TE) { + midr_architecture = UINT32_C(0x5); /* ARMv5TE */ + } else { + midr_architecture = UINT32_C(0x4); /* ARMv5T */ + } + break; + } +#endif + processor->midr = midr_set_architecture(processor->midr, midr_architecture); +} + +static void parse_cpu_part( + const char* cpu_part_start, + const char* cpu_part_end, + struct cpuinfo_arm_linux_processor processor[restrict static 1]) +{ + const size_t cpu_part_length = (size_t) (cpu_part_end - cpu_part_start); + + /* + * CPU part should contain hex prefix (0x) and one to three hex digits. + * I have never seen less than three digits as a value of this field, + * but I don't think it is impossible to see such values in future. + * Value can not contain more than three hex digits since + * Main ID Register (MIDR) assigns only a 12-bit value for CPU part. + */ + if (cpu_part_length < 3 || cpu_part_length > 5) { + cpuinfo_log_warning("CPU part %.*s in /proc/cpuinfo is ignored due to unexpected length (%zu)", + (int) cpu_part_length, cpu_part_start, cpu_part_length); + return; + } + + /* Verify the presence of hex prefix */ + if (cpu_part_start[0] != '0' || cpu_part_start[1] != 'x') { + cpuinfo_log_warning("CPU part %.*s in /proc/cpuinfo is ignored due to lack of 0x prefix", + (int) cpu_part_length, cpu_part_start); + return; + } + + /* Verify that characters after hex prefix are hexadecimal digits and decode them */ + uint32_t cpu_part = 0; + for (const char* digit_ptr = cpu_part_start + 2; digit_ptr != cpu_part_end; digit_ptr++) { + const char digit_char = *digit_ptr; + uint32_t digit; + if (digit_char >= '0' && digit_char <= '9') { + digit = digit_char - '0'; + } else if ((uint32_t) (digit_char - 'A') < 6) { + digit = 10 + (digit_char - 'A'); + } else if ((uint32_t) (digit_char - 'a') < 6) { + digit = 10 + (digit_char - 'a'); + } else { + cpuinfo_log_warning("CPU part %.*s in /proc/cpuinfo is ignored due to unexpected non-hex character %c at offset %zu", + (int) cpu_part_length, cpu_part_start, digit_char, (size_t) (digit_ptr - cpu_part_start)); + return; + } + cpu_part = cpu_part * 16 + digit; + } + + processor->midr = midr_set_part(processor->midr, cpu_part); + processor->flags |= CPUINFO_ARM_LINUX_VALID_PART | CPUINFO_ARM_LINUX_VALID_PROCESSOR; +} + +static void parse_cpu_implementer( + const char* cpu_implementer_start, + const char* cpu_implementer_end, + struct cpuinfo_arm_linux_processor processor[restrict static 1]) +{ + const size_t cpu_implementer_length = cpu_implementer_end - cpu_implementer_start; + + /* + * Value should contain hex prefix (0x) and one or two hex digits. + * I have never seen single hex digit as a value of this field, + * but I don't think it is impossible in future. + * Value can not contain more than two hex digits since + * Main ID Register (MIDR) assigns only an 8-bit value for CPU implementer. + */ + switch (cpu_implementer_length) { + case 3: + case 4: + break; + default: + cpuinfo_log_warning("CPU implementer %.*s in /proc/cpuinfo is ignored due to unexpected length (%zu)", + (int) cpu_implementer_length, cpu_implementer_start, cpu_implementer_length); + return; + } + + /* Verify the presence of hex prefix */ + if (cpu_implementer_start[0] != '0' || cpu_implementer_start[1] != 'x') { + cpuinfo_log_warning("CPU implementer %.*s in /proc/cpuinfo is ignored due to lack of 0x prefix", + (int) cpu_implementer_length, cpu_implementer_start); + return; + } + + /* Verify that characters after hex prefix are hexadecimal digits and decode them */ + uint32_t cpu_implementer = 0; + for (const char* digit_ptr = cpu_implementer_start + 2; digit_ptr != cpu_implementer_end; digit_ptr++) { + const char digit_char = *digit_ptr; + uint32_t digit; + if (digit_char >= '0' && digit_char <= '9') { + digit = digit_char - '0'; + } else if ((uint32_t) (digit_char - 'A') < 6) { + digit = 10 + (digit_char - 'A'); + } else if ((uint32_t) (digit_char - 'a') < 6) { + digit = 10 + (digit_char - 'a'); + } else { + cpuinfo_log_warning("CPU implementer %.*s in /proc/cpuinfo is ignored due to unexpected non-hex character '%c' at offset %zu", + (int) cpu_implementer_length, cpu_implementer_start, digit_char, (size_t) (digit_ptr - cpu_implementer_start)); + return; + } + cpu_implementer = cpu_implementer * 16 + digit; + } + + processor->midr = midr_set_implementer(processor->midr, cpu_implementer); + processor->flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER | CPUINFO_ARM_LINUX_VALID_PROCESSOR; +} + +static void parse_cpu_variant( + const char* cpu_variant_start, + const char* cpu_variant_end, + struct cpuinfo_arm_linux_processor processor[restrict static 1]) +{ + const size_t cpu_variant_length = cpu_variant_end - cpu_variant_start; + + /* + * Value should contain hex prefix (0x) and one hex digit. + * Value can not contain more than one hex digits since + * Main ID Register (MIDR) assigns only a 4-bit value for CPU variant. + */ + if (cpu_variant_length != 3) { + cpuinfo_log_warning("CPU variant %.*s in /proc/cpuinfo is ignored due to unexpected length (%zu)", + (int) cpu_variant_length, cpu_variant_start, cpu_variant_length); + return; + } + + /* Skip if there is no hex prefix (0x) */ + if (cpu_variant_start[0] != '0' || cpu_variant_start[1] != 'x') { + cpuinfo_log_warning("CPU variant %.*s in /proc/cpuinfo is ignored due to lack of 0x prefix", + (int) cpu_variant_length, cpu_variant_start); + return; + } + + /* Check if the value after hex prefix is indeed a hex digit and decode it. */ + const char digit_char = cpu_variant_start[2]; + uint32_t cpu_variant; + if ((uint32_t) (digit_char - '0') < 10) { + cpu_variant = (uint32_t) (digit_char - '0'); + } else if ((uint32_t) (digit_char - 'A') < 6) { + cpu_variant = 10 + (uint32_t) (digit_char - 'A'); + } else if ((uint32_t) (digit_char - 'a') < 6) { + cpu_variant = 10 + (uint32_t) (digit_char - 'a'); + } else { + cpuinfo_log_warning("CPU variant %.*s in /proc/cpuinfo is ignored due to unexpected non-hex character '%c'", + (int) cpu_variant_length, cpu_variant_start, digit_char); + return; + } + + processor->midr = midr_set_variant(processor->midr, cpu_variant); + processor->flags |= CPUINFO_ARM_LINUX_VALID_VARIANT | CPUINFO_ARM_LINUX_VALID_PROCESSOR; +} + +static void parse_cpu_revision( + const char* cpu_revision_start, + const char* cpu_revision_end, + struct cpuinfo_arm_linux_processor processor[restrict static 1]) +{ + uint32_t cpu_revision = 0; + for (const char* digit_ptr = cpu_revision_start; digit_ptr != cpu_revision_end; digit_ptr++) { + const uint32_t digit = (uint32_t) (*digit_ptr - '0'); + + /* Verify that the character in CPU revision is a decimal digit */ + if (digit >= 10) { + cpuinfo_log_warning("CPU revision %.*s in /proc/cpuinfo is ignored due to unexpected non-digit character '%c' at offset %zu", + (int) (cpu_revision_end - cpu_revision_start), cpu_revision_start, + *digit_ptr, (size_t) (digit_ptr - cpu_revision_start)); + return; + } + + cpu_revision = cpu_revision * 10 + digit; + } + + processor->midr = midr_set_revision(processor->midr, cpu_revision); + processor->flags |= CPUINFO_ARM_LINUX_VALID_REVISION | CPUINFO_ARM_LINUX_VALID_PROCESSOR; +} + +#if CPUINFO_ARCH_ARM +/* + * Decode one of the cache-related numbers reported by Linux kernel + * for pre-ARMv7 architecture. + * An example cache-related information in /proc/cpuinfo: + * + * I size : 32768 + * I assoc : 4 + * I line length : 32 + * I sets : 256 + * D size : 16384 + * D assoc : 4 + * D line length : 32 + * D sets : 128 + * + */ +static void parse_cache_number( + const char* number_start, + const char* number_end, + const char* number_name, + uint32_t number_ptr[restrict static 1], + uint32_t flags[restrict static 1], + uint32_t number_mask) +{ + uint32_t number = 0; + for (const char* digit_ptr = number_start; digit_ptr != number_end; digit_ptr++) { + const uint32_t digit = *digit_ptr - '0'; + if (digit >= 10) { + cpuinfo_log_warning("%s %.*s in /proc/cpuinfo is ignored due to unexpected non-digit character '%c' at offset %zu", + number_name, (int) (number_end - number_start), number_start, + *digit_ptr, (size_t) (digit_ptr - number_start)); + return; + } + + number = number * 10 + digit; + } + + if (number == 0) { + cpuinfo_log_warning("%s %.*s in /proc/cpuinfo is ignored due to invalid value of zero reported by the kernel", + number_name, (int) (number_end - number_start), number_start); + } + + /* If the number specifies a cache line size, verify that is a reasonable power of 2 */ + if (number_mask & CPUINFO_ARM_LINUX_VALID_CACHE_LINE) { + switch (number) { + case 16: + case 32: + case 64: + case 128: + break; + default: + cpuinfo_log_warning("invalid %s %.*s is ignored: a value of 16, 32, 64, or 128 expected", + number_name, (int) (number_end - number_start), number_start); + } + } + + *number_ptr = number; + *flags |= number_mask | CPUINFO_ARM_LINUX_VALID_PROCESSOR; +} +#endif /* CPUINFO_ARCH_ARM */ + +struct proc_cpuinfo_parser_state { + char* hardware; + char* revision; + uint32_t processor_index; + uint32_t max_processors_count; + struct cpuinfo_arm_linux_processor* processors; + struct cpuinfo_arm_linux_processor dummy_processor; +}; + +/* + * Decode a single line of /proc/cpuinfo information. + * Lines have format [ ]*:[ ] + * An example of /proc/cpuinfo (from Pandaboard-ES): + * + * Processor : ARMv7 Processor rev 10 (v7l) + * processor : 0 + * BogoMIPS : 1392.74 + * + * processor : 1 + * BogoMIPS : 1363.33 + * + * Features : swp half thumb fastmult vfp edsp thumbee neon vfpv3 + * CPU implementer : 0x41 + * CPU architecture: 7 + * CPU variant : 0x2 + * CPU part : 0xc09 + * CPU revision : 10 + * + * Hardware : OMAP4 Panda board + * Revision : 0020 + * Serial : 0000000000000000 + */ +static bool parse_line( + const char* line_start, + const char* line_end, + struct proc_cpuinfo_parser_state state[restrict static 1], + uint64_t line_number) +{ + /* Empty line. Skip. */ + if (line_start == line_end) { + return true; + } + + /* Search for ':' on the line. */ + const char* separator = line_start; + for (; separator != line_end; separator++) { + if (*separator == ':') { + break; + } + } + /* Skip line if no ':' separator was found. */ + if (separator == line_end) { + cpuinfo_log_info("Line %.*s in /proc/cpuinfo is ignored: key/value separator ':' not found", + (int) (line_end - line_start), line_start); + return true; + } + + /* Skip trailing spaces in key part. */ + const char* key_end = separator; + for (; key_end != line_start; key_end--) { + if (key_end[-1] != ' ' && key_end[-1] != '\t') { + break; + } + } + /* Skip line if key contains nothing but spaces. */ + if (key_end == line_start) { + cpuinfo_log_info("Line %.*s in /proc/cpuinfo is ignored: key contains only spaces", + (int) (line_end - line_start), line_start); + return true; + } + + /* Skip leading spaces in value part. */ + const char* value_start = separator + 1; + for (; value_start != line_end; value_start++) { + if (*value_start != ' ') { + break; + } + } + /* Value part contains nothing but spaces. Skip line. */ + if (value_start == line_end) { + cpuinfo_log_info("Line %.*s in /proc/cpuinfo is ignored: value contains only spaces", + (int) (line_end - line_start), line_start); + return true; + } + + /* Skip trailing spaces in value part (if any) */ + const char* value_end = line_end; + for (; value_end != value_start; value_end--) { + if (value_end[-1] != ' ') { + break; + } + } + + const uint32_t processor_index = state->processor_index; + const uint32_t max_processors_count = state->max_processors_count; + struct cpuinfo_arm_linux_processor* processors = state->processors; + struct cpuinfo_arm_linux_processor* processor = &state->dummy_processor; + if (processor_index < max_processors_count) { + processor = &processors[processor_index]; + } + + const size_t key_length = key_end - line_start; + switch (key_length) { + case 6: + if (memcmp(line_start, "Serial", key_length) == 0) { + /* Usually contains just zeros, useless */ +#if CPUINFO_ARCH_ARM + } else if (memcmp(line_start, "I size", key_length) == 0) { + parse_cache_number(value_start, value_end, + "instruction cache size", &processor->proc_cpuinfo_cache.i_size, + &processor->flags, CPUINFO_ARM_LINUX_VALID_ICACHE_SIZE); + } else if (memcmp(line_start, "I sets", key_length) == 0) { + parse_cache_number(value_start, value_end, + "instruction cache sets", &processor->proc_cpuinfo_cache.i_sets, + &processor->flags, CPUINFO_ARM_LINUX_VALID_ICACHE_SETS); + } else if (memcmp(line_start, "D size", key_length) == 0) { + parse_cache_number(value_start, value_end, + "data cache size", &processor->proc_cpuinfo_cache.d_size, + &processor->flags, CPUINFO_ARM_LINUX_VALID_DCACHE_SIZE); + } else if (memcmp(line_start, "D sets", key_length) == 0) { + parse_cache_number(value_start, value_end, + "data cache sets", &processor->proc_cpuinfo_cache.d_sets, + &processor->flags, CPUINFO_ARM_LINUX_VALID_DCACHE_SETS); +#endif /* CPUINFO_ARCH_ARM */ + } else { + goto unknown; + } + break; +#if CPUINFO_ARCH_ARM + case 7: + if (memcmp(line_start, "I assoc", key_length) == 0) { + parse_cache_number(value_start, value_end, + "instruction cache associativity", &processor->proc_cpuinfo_cache.i_assoc, + &processor->flags, CPUINFO_ARM_LINUX_VALID_ICACHE_WAYS); + } else if (memcmp(line_start, "D assoc", key_length) == 0) { + parse_cache_number(value_start, value_end, + "data cache associativity", &processor->proc_cpuinfo_cache.d_assoc, + &processor->flags, CPUINFO_ARM_LINUX_VALID_DCACHE_WAYS); + } else { + goto unknown; + } + break; +#endif /* CPUINFO_ARCH_ARM */ + case 8: + if (memcmp(line_start, "CPU part", key_length) == 0) { + parse_cpu_part(value_start, value_end, processor); + } else if (memcmp(line_start, "Features", key_length) == 0) { + parse_features(value_start, value_end, processor); + } else if (memcmp(line_start, "BogoMIPS", key_length) == 0) { + /* BogoMIPS is useless, don't parse */ + } else if (memcmp(line_start, "Hardware", key_length) == 0) { + size_t value_length = value_end - value_start; + if (value_length > CPUINFO_HARDWARE_VALUE_MAX) { + cpuinfo_log_info( + "length of Hardware value \"%.*s\" in /proc/cpuinfo exceeds limit (%d): truncating to the limit", + (int) value_length, value_start, CPUINFO_HARDWARE_VALUE_MAX); + value_length = CPUINFO_HARDWARE_VALUE_MAX; + } else { + state->hardware[value_length] = '\0'; + } + memcpy(state->hardware, value_start, value_length); + cpuinfo_log_debug("parsed /proc/cpuinfo Hardware = \"%.*s\"", (int) value_length, value_start); + } else if (memcmp(line_start, "Revision", key_length) == 0) { + size_t value_length = value_end - value_start; + if (value_length > CPUINFO_REVISION_VALUE_MAX) { + cpuinfo_log_info( + "length of Revision value \"%.*s\" in /proc/cpuinfo exceeds limit (%d): truncating to the limit", + (int) value_length, value_start, CPUINFO_REVISION_VALUE_MAX); + value_length = CPUINFO_REVISION_VALUE_MAX; + } else { + state->revision[value_length] = '\0'; + } + memcpy(state->revision, value_start, value_length); + cpuinfo_log_debug("parsed /proc/cpuinfo Revision = \"%.*s\"", (int) value_length, value_start); + } else { + goto unknown; + } + break; + case 9: + if (memcmp(line_start, "processor", key_length) == 0) { + const uint32_t new_processor_index = parse_processor_number(value_start, value_end); + if (new_processor_index < processor_index) { + /* Strange: decreasing processor number */ + cpuinfo_log_warning( + "unexpectedly low processor number %"PRIu32" following processor %"PRIu32" in /proc/cpuinfo", + new_processor_index, processor_index); + } else if (new_processor_index > processor_index + 1) { + /* Strange, but common: skipped processor $(processor_index + 1) */ + cpuinfo_log_info( + "unexpectedly high processor number %"PRIu32" following processor %"PRIu32" in /proc/cpuinfo", + new_processor_index, processor_index); + } + if (new_processor_index < max_processors_count) { + /* Record that the processor was mentioned in /proc/cpuinfo */ + processors[new_processor_index].flags |= CPUINFO_ARM_LINUX_VALID_PROCESSOR; + } else { + /* Log and ignore processor */ + cpuinfo_log_warning("processor %"PRIu32" in /proc/cpuinfo is ignored: index exceeds system limit %"PRIu32, + new_processor_index, max_processors_count - 1); + } + state->processor_index = new_processor_index; + return true; + } else if (memcmp(line_start, "Processor", key_length) == 0) { + /* TODO: parse to fix misreported architecture, similar to Android's cpufeatures */ + } else { + goto unknown; + } + break; + case 11: + if (memcmp(line_start, "CPU variant", key_length) == 0) { + parse_cpu_variant(value_start, value_end, processor); + } else { + goto unknown; + } + break; + case 12: + if (memcmp(line_start, "CPU revision", key_length) == 0) { + parse_cpu_revision(value_start, value_end, processor); + } else { + goto unknown; + } + break; +#if CPUINFO_ARCH_ARM + case 13: + if (memcmp(line_start, "I line length", key_length) == 0) { + parse_cache_number(value_start, value_end, + "instruction cache line size", &processor->proc_cpuinfo_cache.i_line_length, + &processor->flags, CPUINFO_ARM_LINUX_VALID_ICACHE_LINE); + } else if (memcmp(line_start, "D line length", key_length) == 0) { + parse_cache_number(value_start, value_end, + "data cache line size", &processor->proc_cpuinfo_cache.d_line_length, + &processor->flags, CPUINFO_ARM_LINUX_VALID_DCACHE_LINE); + } else { + goto unknown; + } + break; +#endif /* CPUINFO_ARCH_ARM */ + case 15: + if (memcmp(line_start, "CPU implementer", key_length) == 0) { + parse_cpu_implementer(value_start, value_end, processor); + } else if (memcmp(line_start, "CPU implementor", key_length) == 0) { + parse_cpu_implementer(value_start, value_end, processor); + } else { + goto unknown; + } + break; + case 16: + if (memcmp(line_start, "CPU architecture", key_length) == 0) { + parse_cpu_architecture(value_start, value_end, processor); + } else { + goto unknown; + } + break; + default: + unknown: + cpuinfo_log_debug("unknown /proc/cpuinfo key: %.*s", (int) key_length, line_start); + + } + return true; +} + +bool cpuinfo_arm_linux_parse_proc_cpuinfo( + char hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX], + char revision[restrict static CPUINFO_REVISION_VALUE_MAX], + uint32_t max_processors_count, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors_count]) +{ + struct proc_cpuinfo_parser_state state = { + .hardware = hardware, + .revision = revision, + .processor_index = 0, + .max_processors_count = max_processors_count, + .processors = processors, + }; + return cpuinfo_linux_parse_multiline_file("/proc/cpuinfo", BUFFER_SIZE, + (cpuinfo_line_callback) parse_line, &state); +} diff --git a/source/3rdparty/cpuinfo/src/arm/linux/hwcap.c b/source/3rdparty/cpuinfo/src/arm/linux/hwcap.c new file mode 100644 index 0000000..35e9994 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/hwcap.c @@ -0,0 +1,159 @@ +#include + +#include +#include +#include +#include +#include +#include +#include + +#if CPUINFO_MOCK + #include +#endif +#include +#include +#include + +#if CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_ARM && !defined(__ANDROID__) + #include +#else + #define AT_HWCAP 16 + #define AT_HWCAP2 26 +#endif + + +#if CPUINFO_MOCK + static uint32_t mock_hwcap = 0; + void cpuinfo_set_hwcap(uint32_t hwcap) { + mock_hwcap = hwcap; + } + + static uint32_t mock_hwcap2 = 0; + void cpuinfo_set_hwcap2(uint32_t hwcap2) { + mock_hwcap2 = hwcap2; + } +#endif + + +#if CPUINFO_ARCH_ARM + typedef unsigned long (*getauxval_function_t)(unsigned long); + + bool cpuinfo_arm_linux_hwcap_from_getauxval( + uint32_t hwcap[restrict static 1], + uint32_t hwcap2[restrict static 1]) + { + #if CPUINFO_MOCK + *hwcap = mock_hwcap; + *hwcap2 = mock_hwcap2; + return true; + #elif defined(__ANDROID__) + /* Android: dynamically check if getauxval is supported */ + void* libc = NULL; + getauxval_function_t getauxval = NULL; + + dlerror(); + libc = dlopen("libc.so", RTLD_LAZY); + if (libc == NULL) { + cpuinfo_log_warning("failed to load libc.so: %s", dlerror()); + goto cleanup; + } + + getauxval = (getauxval_function_t) dlsym(libc, "getauxval"); + if (getauxval == NULL) { + cpuinfo_log_info("failed to locate getauxval in libc.so: %s", dlerror()); + goto cleanup; + } + + *hwcap = getauxval(AT_HWCAP); + *hwcap2 = getauxval(AT_HWCAP2); + + cleanup: + if (libc != NULL) { + dlclose(libc); + libc = NULL; + } + return getauxval != NULL; + #else + /* GNU/Linux: getauxval is always supported */ + *hwcap = getauxval(AT_HWCAP); + *hwcap2 = getauxval(AT_HWCAP2); + return true; + #endif + } + + #ifdef __ANDROID__ + bool cpuinfo_arm_linux_hwcap_from_procfs( + uint32_t hwcap[restrict static 1], + uint32_t hwcap2[restrict static 1]) + { + #if CPUINFO_MOCK + *hwcap = mock_hwcap; + *hwcap2 = mock_hwcap2; + return true; + #else + uint32_t hwcaps[2] = { 0, 0 }; + bool result = false; + int file = -1; + + file = open("/proc/self/auxv", O_RDONLY); + if (file == -1) { + cpuinfo_log_warning("failed to open /proc/self/auxv: %s", strerror(errno)); + goto cleanup; + } + + ssize_t bytes_read; + do { + Elf32_auxv_t elf_auxv; + bytes_read = read(file, &elf_auxv, sizeof(Elf32_auxv_t)); + if (bytes_read < 0) { + cpuinfo_log_warning("failed to read /proc/self/auxv: %s", strerror(errno)); + goto cleanup; + } else if (bytes_read > 0) { + if (bytes_read == sizeof(elf_auxv)) { + switch (elf_auxv.a_type) { + case AT_HWCAP: + hwcaps[0] = (uint32_t) elf_auxv.a_un.a_val; + break; + case AT_HWCAP2: + hwcaps[1] = (uint32_t) elf_auxv.a_un.a_val; + break; + } + } else { + cpuinfo_log_warning( + "failed to read %zu bytes from /proc/self/auxv: %zu bytes available", + sizeof(elf_auxv), (size_t) bytes_read); + goto cleanup; + } + } + } while (bytes_read == sizeof(Elf32_auxv_t)); + + /* Success, commit results */ + *hwcap = hwcaps[0]; + *hwcap2 = hwcaps[1]; + result = true; + + cleanup: + if (file != -1) { + close(file); + file = -1; + } + return result; + #endif + } + #endif /* __ANDROID__ */ +#elif CPUINFO_ARCH_ARM64 + void cpuinfo_arm_linux_hwcap_from_getauxval( + uint32_t hwcap[restrict static 1], + uint32_t hwcap2[restrict static 1]) + { + #if CPUINFO_MOCK + *hwcap = mock_hwcap; + *hwcap2 = mock_hwcap2; + #else + *hwcap = (uint32_t) getauxval(AT_HWCAP); + *hwcap2 = (uint32_t) getauxval(AT_HWCAP2); + return ; + #endif + } +#endif diff --git a/source/3rdparty/cpuinfo/src/arm/linux/init.c b/source/3rdparty/cpuinfo/src/arm/linux/init.c new file mode 100644 index 0000000..d3da5a9 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/init.c @@ -0,0 +1,765 @@ +#include +#include +#include +#include + +#include +#include +#if defined(__ANDROID__) + #include +#endif +#include +#include +#include +#include +#include + + +struct cpuinfo_arm_isa cpuinfo_isa = { 0 }; + +static struct cpuinfo_package package = { { 0 } }; + +static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) { + return (bitfield & mask) == mask; +} + +static inline uint32_t min(uint32_t a, uint32_t b) { + return a < b ? a : b; +} + +static inline int cmp(uint32_t a, uint32_t b) { + return (a > b) - (a < b); +} + +static bool cluster_siblings_parser( + uint32_t processor, uint32_t siblings_start, uint32_t siblings_end, + struct cpuinfo_arm_linux_processor* processors) +{ + processors[processor].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER; + uint32_t package_leader_id = processors[processor].package_leader_id; + + for (uint32_t sibling = siblings_start; sibling < siblings_end; sibling++) { + if (!bitmask_all(processors[sibling].flags, CPUINFO_LINUX_FLAG_VALID)) { + cpuinfo_log_info("invalid processor %"PRIu32" reported as a sibling for processor %"PRIu32, + sibling, processor); + continue; + } + + const uint32_t sibling_package_leader_id = processors[sibling].package_leader_id; + if (sibling_package_leader_id < package_leader_id) { + package_leader_id = sibling_package_leader_id; + } + + processors[sibling].package_leader_id = package_leader_id; + processors[sibling].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER; + } + + processors[processor].package_leader_id = package_leader_id; + + return true; +} + +static int cmp_arm_linux_processor(const void* ptr_a, const void* ptr_b) { + const struct cpuinfo_arm_linux_processor* processor_a = (const struct cpuinfo_arm_linux_processor*) ptr_a; + const struct cpuinfo_arm_linux_processor* processor_b = (const struct cpuinfo_arm_linux_processor*) ptr_b; + + /* Move usable processors towards the start of the array */ + const bool usable_a = bitmask_all(processor_a->flags, CPUINFO_LINUX_FLAG_VALID); + const bool usable_b = bitmask_all(processor_b->flags, CPUINFO_LINUX_FLAG_VALID); + if (usable_a != usable_b) { + return (int) usable_b - (int) usable_a; + } + + /* Compare based on core type (e.g. Cortex-A57 < Cortex-A53) */ + const uint32_t midr_a = processor_a->midr; + const uint32_t midr_b = processor_b->midr; + if (midr_a != midr_b) { + const uint32_t score_a = midr_score_core(midr_a); + const uint32_t score_b = midr_score_core(midr_b); + if (score_a != score_b) { + return score_a > score_b ? -1 : 1; + } + } + + /* Compare based on core frequency (e.g. 2.0 GHz < 1.2 GHz) */ + const uint32_t frequency_a = processor_a->max_frequency; + const uint32_t frequency_b = processor_b->max_frequency; + if (frequency_a != frequency_b) { + return frequency_a > frequency_b ? -1 : 1; + } + + /* Compare based on cluster leader id (i.e. cluster 1 < cluster 0) */ + const uint32_t cluster_a = processor_a->package_leader_id; + const uint32_t cluster_b = processor_b->package_leader_id; + if (cluster_a != cluster_b) { + return cluster_a > cluster_b ? -1 : 1; + } + + /* Compare based on system processor id (i.e. processor 0 < processor 1) */ + const uint32_t id_a = processor_a->system_processor_id; + const uint32_t id_b = processor_b->system_processor_id; + return cmp(id_a, id_b); +} + +void cpuinfo_arm_linux_init(void) { + struct cpuinfo_arm_linux_processor* arm_linux_processors = NULL; + struct cpuinfo_processor* processors = NULL; + struct cpuinfo_core* cores = NULL; + struct cpuinfo_cluster* clusters = NULL; + struct cpuinfo_uarch_info* uarchs = NULL; + struct cpuinfo_cache* l1i = NULL; + struct cpuinfo_cache* l1d = NULL; + struct cpuinfo_cache* l2 = NULL; + struct cpuinfo_cache* l3 = NULL; + const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL; + const struct cpuinfo_core** linux_cpu_to_core_map = NULL; + uint32_t* linux_cpu_to_uarch_index_map = NULL; + + const uint32_t max_processors_count = cpuinfo_linux_get_max_processors_count(); + cpuinfo_log_debug("system maximum processors count: %"PRIu32, max_processors_count); + + const uint32_t max_possible_processors_count = 1 + + cpuinfo_linux_get_max_possible_processor(max_processors_count); + cpuinfo_log_debug("maximum possible processors count: %"PRIu32, max_possible_processors_count); + const uint32_t max_present_processors_count = 1 + + cpuinfo_linux_get_max_present_processor(max_processors_count); + cpuinfo_log_debug("maximum present processors count: %"PRIu32, max_present_processors_count); + + uint32_t valid_processor_mask = 0; + uint32_t arm_linux_processors_count = max_processors_count; + if (max_present_processors_count != 0) { + arm_linux_processors_count = min(arm_linux_processors_count, max_present_processors_count); + valid_processor_mask = CPUINFO_LINUX_FLAG_PRESENT; + } + if (max_possible_processors_count != 0) { + arm_linux_processors_count = min(arm_linux_processors_count, max_possible_processors_count); + valid_processor_mask |= CPUINFO_LINUX_FLAG_POSSIBLE; + } + if ((max_present_processors_count | max_possible_processors_count) == 0) { + cpuinfo_log_error("failed to parse both lists of possible and present processors"); + return; + } + + arm_linux_processors = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_arm_linux_processor)); + if (arm_linux_processors == NULL) { + cpuinfo_log_error( + "failed to allocate %zu bytes for descriptions of %"PRIu32" ARM logical processors", + arm_linux_processors_count * sizeof(struct cpuinfo_arm_linux_processor), + arm_linux_processors_count); + return; + } + + if (max_possible_processors_count) { + cpuinfo_linux_detect_possible_processors( + arm_linux_processors_count, &arm_linux_processors->flags, + sizeof(struct cpuinfo_arm_linux_processor), + CPUINFO_LINUX_FLAG_POSSIBLE); + } + + if (max_present_processors_count) { + cpuinfo_linux_detect_present_processors( + arm_linux_processors_count, &arm_linux_processors->flags, + sizeof(struct cpuinfo_arm_linux_processor), + CPUINFO_LINUX_FLAG_PRESENT); + } + +#if defined(__ANDROID__) + struct cpuinfo_android_properties android_properties; + cpuinfo_arm_android_parse_properties(&android_properties); +#else + char proc_cpuinfo_hardware[CPUINFO_HARDWARE_VALUE_MAX]; +#endif + char proc_cpuinfo_revision[CPUINFO_REVISION_VALUE_MAX]; + + if (!cpuinfo_arm_linux_parse_proc_cpuinfo( +#if defined(__ANDROID__) + android_properties.proc_cpuinfo_hardware, +#else + proc_cpuinfo_hardware, +#endif + proc_cpuinfo_revision, + arm_linux_processors_count, + arm_linux_processors)) { + cpuinfo_log_error("failed to parse processor information from /proc/cpuinfo"); + return; + } + + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, valid_processor_mask)) { + arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_VALID; + cpuinfo_log_debug("parsed processor %"PRIu32" MIDR 0x%08"PRIx32, + i, arm_linux_processors[i].midr); + } + } + + uint32_t valid_processors = 0, last_midr = 0; + #if CPUINFO_ARCH_ARM + uint32_t last_architecture_version = 0, last_architecture_flags = 0; + #endif + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + arm_linux_processors[i].system_processor_id = i; + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + valid_processors += 1; + + if (!(arm_linux_processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR)) { + /* + * Processor is in possible and present lists, but not reported in /proc/cpuinfo. + * This is fairly common: high-index processors can be not reported if they are offline. + */ + cpuinfo_log_info("processor %"PRIu32" is not listed in /proc/cpuinfo", i); + } + + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) { + last_midr = arm_linux_processors[i].midr; + } + #if CPUINFO_ARCH_ARM + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_ARCHITECTURE)) { + last_architecture_version = arm_linux_processors[i].architecture_version; + last_architecture_flags = arm_linux_processors[i].architecture_flags; + } + #endif + } else { + /* Processor reported in /proc/cpuinfo, but not in possible and/or present lists: log and ignore */ + if (!(arm_linux_processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR)) { + cpuinfo_log_warning("invalid processor %"PRIu32" reported in /proc/cpuinfo", i); + } + } + } + +#if defined(__ANDROID__) + const struct cpuinfo_arm_chipset chipset = + cpuinfo_arm_android_decode_chipset(&android_properties, valid_processors, 0); +#else + const struct cpuinfo_arm_chipset chipset = + cpuinfo_arm_linux_decode_chipset(proc_cpuinfo_hardware, proc_cpuinfo_revision, valid_processors, 0); +#endif + + #if CPUINFO_ARCH_ARM + uint32_t isa_features = 0, isa_features2 = 0; + #ifdef __ANDROID__ + /* + * On Android before API 20, libc.so does not provide getauxval function. + * Thus, we try to dynamically find it, or use two fallback mechanisms: + * 1. dlopen libc.so, and try to find getauxval + * 2. Parse /proc/self/auxv procfs file + * 3. Use features reported in /proc/cpuinfo + */ + if (!cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2)) { + /* getauxval can't be used, fall back to parsing /proc/self/auxv */ + if (!cpuinfo_arm_linux_hwcap_from_procfs(&isa_features, &isa_features2)) { + /* + * Reading /proc/self/auxv failed, probably due to file permissions. + * Use information from /proc/cpuinfo to detect ISA. + * + * If different processors report different ISA features, take the intersection. + */ + uint32_t processors_with_features = 0; + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID | CPUINFO_ARM_LINUX_VALID_FEATURES)) { + if (processors_with_features == 0) { + isa_features = arm_linux_processors[i].features; + isa_features2 = arm_linux_processors[i].features2; + } else { + isa_features &= arm_linux_processors[i].features; + isa_features2 &= arm_linux_processors[i].features2; + } + processors_with_features += 1; + } + } + } + } + #else + /* On GNU/Linux getauxval is always available */ + cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2); + #endif + cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo( + isa_features, isa_features2, + last_midr, last_architecture_version, last_architecture_flags, + &chipset, &cpuinfo_isa); + #elif CPUINFO_ARCH_ARM64 + uint32_t isa_features = 0, isa_features2 = 0; + /* getauxval is always available on ARM64 Android */ + cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2); + cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo( + isa_features, isa_features2, last_midr, &chipset, &cpuinfo_isa); + #endif + + /* Detect min/max frequency and package ID */ + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + const uint32_t max_frequency = cpuinfo_linux_get_processor_max_frequency(i); + if (max_frequency != 0) { + arm_linux_processors[i].max_frequency = max_frequency; + arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY; + } + + const uint32_t min_frequency = cpuinfo_linux_get_processor_min_frequency(i); + if (min_frequency != 0) { + arm_linux_processors[i].min_frequency = min_frequency; + arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY; + } + + if (cpuinfo_linux_get_processor_package_id(i, &arm_linux_processors[i].package_id)) { + arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_ID; + } + } + } + + /* Initialize topology group IDs */ + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + arm_linux_processors[i].package_leader_id = i; + } + + /* Propagate topology group IDs among siblings */ + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + continue; + } + + if (arm_linux_processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_ID) { + cpuinfo_linux_detect_core_siblings( + arm_linux_processors_count, i, + (cpuinfo_siblings_callback) cluster_siblings_parser, + arm_linux_processors); + } + } + + /* Propagate all cluster IDs */ + uint32_t clustered_processors = 0; + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID | CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) { + clustered_processors += 1; + + const uint32_t package_leader_id = arm_linux_processors[i].package_leader_id; + if (package_leader_id < i) { + arm_linux_processors[i].package_leader_id = arm_linux_processors[package_leader_id].package_leader_id; + } + + cpuinfo_log_debug("processor %"PRIu32" clustered with processor %"PRIu32" as inferred from system siblings lists", + i, arm_linux_processors[i].package_leader_id); + } + } + + if (clustered_processors != valid_processors) { + /* + * Topology information about some or all logical processors may be unavailable, for the following reasons: + * - Linux kernel is too old, or configured without support for topology information in sysfs. + * - Core is offline, and Linux kernel is configured to not report topology for offline cores. + * + * In this case, we assign processors to clusters using two methods: + * - Try heuristic cluster configurations (e.g. 6-core SoC usually has 4+2 big.LITTLE configuration). + * - If heuristic failed, assign processors to core clusters in a sequential scan. + */ + if (!cpuinfo_arm_linux_detect_core_clusters_by_heuristic(valid_processors, arm_linux_processors_count, arm_linux_processors)) { + cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan(arm_linux_processors_count, arm_linux_processors); + } + } + + cpuinfo_arm_linux_count_cluster_processors(arm_linux_processors_count, arm_linux_processors); + + const uint32_t cluster_count = cpuinfo_arm_linux_detect_cluster_midr( + &chipset, + arm_linux_processors_count, valid_processors, arm_linux_processors); + + /* Initialize core vendor, uarch, MIDR, and frequency for every logical processor */ + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + const uint32_t cluster_leader = arm_linux_processors[i].package_leader_id; + if (cluster_leader == i) { + /* Cluster leader: decode core vendor and uarch */ + cpuinfo_arm_decode_vendor_uarch( + arm_linux_processors[cluster_leader].midr, +#if CPUINFO_ARCH_ARM + !!(arm_linux_processors[cluster_leader].features & CPUINFO_ARM_LINUX_FEATURE_VFPV4), +#endif + &arm_linux_processors[cluster_leader].vendor, + &arm_linux_processors[cluster_leader].uarch); + } else { + /* Cluster non-leader: copy vendor, uarch, MIDR, and frequency from cluster leader */ + arm_linux_processors[i].flags |= arm_linux_processors[cluster_leader].flags & + (CPUINFO_ARM_LINUX_VALID_MIDR | CPUINFO_LINUX_FLAG_MAX_FREQUENCY); + arm_linux_processors[i].midr = arm_linux_processors[cluster_leader].midr; + arm_linux_processors[i].vendor = arm_linux_processors[cluster_leader].vendor; + arm_linux_processors[i].uarch = arm_linux_processors[cluster_leader].uarch; + arm_linux_processors[i].max_frequency = arm_linux_processors[cluster_leader].max_frequency; + } + } + } + + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + cpuinfo_log_debug("post-analysis processor %"PRIu32": MIDR %08"PRIx32" frequency %"PRIu32, + i, arm_linux_processors[i].midr, arm_linux_processors[i].max_frequency); + } + } + + qsort(arm_linux_processors, arm_linux_processors_count, + sizeof(struct cpuinfo_arm_linux_processor), cmp_arm_linux_processor); + + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + cpuinfo_log_debug("post-sort processor %"PRIu32": system id %"PRIu32" MIDR %08"PRIx32" frequency %"PRIu32, + i, arm_linux_processors[i].system_processor_id, arm_linux_processors[i].midr, arm_linux_processors[i].max_frequency); + } + } + + uint32_t uarchs_count = 0; + enum cpuinfo_uarch last_uarch; + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + if (uarchs_count == 0 || arm_linux_processors[i].uarch != last_uarch) { + last_uarch = arm_linux_processors[i].uarch; + uarchs_count += 1; + } + arm_linux_processors[i].uarch_index = uarchs_count - 1; + } + } + + /* + * Assumptions: + * - No SMP (i.e. each core supports only one hardware thread). + * - Level 1 instruction and data caches are private to the core clusters. + * - Level 2 and level 3 cache is shared between cores in the same cluster. + */ + cpuinfo_arm_chipset_to_string(&chipset, package.name); + package.processor_count = valid_processors; + package.core_count = valid_processors; + package.cluster_count = cluster_count; + + processors = calloc(valid_processors, sizeof(struct cpuinfo_processor)); + if (processors == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors", + valid_processors * sizeof(struct cpuinfo_processor), valid_processors); + goto cleanup; + } + + cores = calloc(valid_processors, sizeof(struct cpuinfo_core)); + if (cores == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores", + valid_processors * sizeof(struct cpuinfo_core), valid_processors); + goto cleanup; + } + + clusters = calloc(cluster_count, sizeof(struct cpuinfo_cluster)); + if (clusters == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters", + cluster_count * sizeof(struct cpuinfo_cluster), cluster_count); + goto cleanup; + } + + uarchs = calloc(uarchs_count, sizeof(struct cpuinfo_uarch_info)); + if (uarchs == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" microarchitectures", + uarchs_count * sizeof(struct cpuinfo_uarch_info), uarchs_count); + goto cleanup; + } + + linux_cpu_to_processor_map = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_processor*)); + if (linux_cpu_to_processor_map == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for %"PRIu32" logical processor mapping entries", + arm_linux_processors_count * sizeof(struct cpuinfo_processor*), arm_linux_processors_count); + goto cleanup; + } + + linux_cpu_to_core_map = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_core*)); + if (linux_cpu_to_core_map == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for %"PRIu32" core mapping entries", + arm_linux_processors_count * sizeof(struct cpuinfo_core*), arm_linux_processors_count); + goto cleanup; + } + + if (uarchs_count > 1) { + linux_cpu_to_uarch_index_map = calloc(arm_linux_processors_count, sizeof(uint32_t)); + if (linux_cpu_to_uarch_index_map == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for %"PRIu32" uarch index mapping entries", + arm_linux_processors_count * sizeof(uint32_t), arm_linux_processors_count); + goto cleanup; + } + } + + l1i = calloc(valid_processors, sizeof(struct cpuinfo_cache)); + if (l1i == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches", + valid_processors * sizeof(struct cpuinfo_cache), valid_processors); + goto cleanup; + } + + l1d = calloc(valid_processors, sizeof(struct cpuinfo_cache)); + if (l1d == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches", + valid_processors * sizeof(struct cpuinfo_cache), valid_processors); + goto cleanup; + } + + uint32_t uarchs_index = 0; + for (uint32_t i = 0; i < arm_linux_processors_count; i++) { + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + if (uarchs_index == 0 || arm_linux_processors[i].uarch != last_uarch) { + last_uarch = arm_linux_processors[i].uarch; + uarchs[uarchs_index] = (struct cpuinfo_uarch_info) { + .uarch = arm_linux_processors[i].uarch, + .midr = arm_linux_processors[i].midr, + }; + uarchs_index += 1; + } + uarchs[uarchs_index - 1].processor_count += 1; + uarchs[uarchs_index - 1].core_count += 1; + } + } + + uint32_t l2_count = 0, l3_count = 0, big_l3_size = 0, cluster_id = UINT32_MAX; + /* Indication whether L3 (if it exists) is shared between all cores */ + bool shared_l3 = true; + /* Populate cache information structures in l1i, l1d */ + for (uint32_t i = 0; i < valid_processors; i++) { + if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) { + cluster_id += 1; + clusters[cluster_id] = (struct cpuinfo_cluster) { + .processor_start = i, + .processor_count = arm_linux_processors[i].package_processor_count, + .core_start = i, + .core_count = arm_linux_processors[i].package_processor_count, + .cluster_id = cluster_id, + .package = &package, + .vendor = arm_linux_processors[i].vendor, + .uarch = arm_linux_processors[i].uarch, + .midr = arm_linux_processors[i].midr, + }; + } + + processors[i].smt_id = 0; + processors[i].core = cores + i; + processors[i].cluster = clusters + cluster_id; + processors[i].package = &package; + processors[i].linux_id = (int) arm_linux_processors[i].system_processor_id; + processors[i].cache.l1i = l1i + i; + processors[i].cache.l1d = l1d + i; + linux_cpu_to_processor_map[arm_linux_processors[i].system_processor_id] = &processors[i]; + + cores[i].processor_start = i; + cores[i].processor_count = 1; + cores[i].core_id = i; + cores[i].cluster = clusters + cluster_id; + cores[i].package = &package; + cores[i].vendor = arm_linux_processors[i].vendor; + cores[i].uarch = arm_linux_processors[i].uarch; + cores[i].midr = arm_linux_processors[i].midr; + linux_cpu_to_core_map[arm_linux_processors[i].system_processor_id] = &cores[i]; + + if (linux_cpu_to_uarch_index_map != NULL) { + linux_cpu_to_uarch_index_map[arm_linux_processors[i].system_processor_id] = + arm_linux_processors[i].uarch_index; + } + + struct cpuinfo_cache temp_l2 = { 0 }, temp_l3 = { 0 }; + cpuinfo_arm_decode_cache( + arm_linux_processors[i].uarch, + arm_linux_processors[i].package_processor_count, + arm_linux_processors[i].midr, + &chipset, + cluster_id, + arm_linux_processors[i].architecture_version, + &l1i[i], &l1d[i], &temp_l2, &temp_l3); + l1i[i].processor_start = l1d[i].processor_start = i; + l1i[i].processor_count = l1d[i].processor_count = 1; + #if CPUINFO_ARCH_ARM + /* L1I reported in /proc/cpuinfo overrides defaults */ + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_ICACHE)) { + l1i[i] = (struct cpuinfo_cache) { + .size = arm_linux_processors[i].proc_cpuinfo_cache.i_size, + .associativity = arm_linux_processors[i].proc_cpuinfo_cache.i_assoc, + .sets = arm_linux_processors[i].proc_cpuinfo_cache.i_sets, + .partitions = 1, + .line_size = arm_linux_processors[i].proc_cpuinfo_cache.i_line_length + }; + } + /* L1D reported in /proc/cpuinfo overrides defaults */ + if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_DCACHE)) { + l1d[i] = (struct cpuinfo_cache) { + .size = arm_linux_processors[i].proc_cpuinfo_cache.d_size, + .associativity = arm_linux_processors[i].proc_cpuinfo_cache.d_assoc, + .sets = arm_linux_processors[i].proc_cpuinfo_cache.d_sets, + .partitions = 1, + .line_size = arm_linux_processors[i].proc_cpuinfo_cache.d_line_length + }; + } + #endif + + if (temp_l3.size != 0) { + /* + * Assumptions: + * - L2 is private to each core + * - L3 is shared by cores in the same cluster + * - If cores in different clusters report the same L3, it is shared between all cores. + */ + l2_count += 1; + if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) { + if (cluster_id == 0) { + big_l3_size = temp_l3.size; + l3_count = 1; + } else if (temp_l3.size != big_l3_size) { + /* If some cores have different L3 size, L3 is not shared between all cores */ + shared_l3 = false; + l3_count += 1; + } + } + } else { + /* If some cores don't have L3 cache, L3 is not shared between all cores */ + shared_l3 = false; + if (temp_l2.size != 0) { + /* Assume L2 is shared by cores in the same cluster */ + if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) { + l2_count += 1; + } + } + } + } + + if (l2_count != 0) { + l2 = calloc(l2_count, sizeof(struct cpuinfo_cache)); + if (l2 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches", + l2_count * sizeof(struct cpuinfo_cache), l2_count); + goto cleanup; + } + + if (l3_count != 0) { + l3 = calloc(l3_count, sizeof(struct cpuinfo_cache)); + if (l3 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches", + l3_count * sizeof(struct cpuinfo_cache), l3_count); + goto cleanup; + } + } + } + + cluster_id = UINT32_MAX; + uint32_t l2_index = UINT32_MAX, l3_index = UINT32_MAX; + for (uint32_t i = 0; i < valid_processors; i++) { + if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) { + cluster_id++; + } + + struct cpuinfo_cache dummy_l1i, dummy_l1d, temp_l2 = { 0 }, temp_l3 = { 0 }; + cpuinfo_arm_decode_cache( + arm_linux_processors[i].uarch, + arm_linux_processors[i].package_processor_count, + arm_linux_processors[i].midr, + &chipset, + cluster_id, + arm_linux_processors[i].architecture_version, + &dummy_l1i, &dummy_l1d, &temp_l2, &temp_l3); + + if (temp_l3.size != 0) { + /* + * Assumptions: + * - L2 is private to each core + * - L3 is shared by cores in the same cluster + * - If cores in different clusters report the same L3, it is shared between all cores. + */ + l2_index += 1; + l2[l2_index] = (struct cpuinfo_cache) { + .size = temp_l2.size, + .associativity = temp_l2.associativity, + .sets = temp_l2.sets, + .partitions = 1, + .line_size = temp_l2.line_size, + .flags = temp_l2.flags, + .processor_start = i, + .processor_count = 1, + }; + processors[i].cache.l2 = l2 + l2_index; + if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) { + l3_index += 1; + if (l3_index < l3_count) { + l3[l3_index] = (struct cpuinfo_cache) { + .size = temp_l3.size, + .associativity = temp_l3.associativity, + .sets = temp_l3.sets, + .partitions = 1, + .line_size = temp_l3.line_size, + .flags = temp_l3.flags, + .processor_start = i, + .processor_count = + shared_l3 ? valid_processors : arm_linux_processors[i].package_processor_count, + }; + } + } + if (shared_l3) { + processors[i].cache.l3 = l3; + } else if (l3_index < l3_count) { + processors[i].cache.l3 = l3 + l3_index; + } + } else if (temp_l2.size != 0) { + /* Assume L2 is shared by cores in the same cluster */ + if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) { + l2_index += 1; + l2[l2_index] = (struct cpuinfo_cache) { + .size = temp_l2.size, + .associativity = temp_l2.associativity, + .sets = temp_l2.sets, + .partitions = 1, + .line_size = temp_l2.line_size, + .flags = temp_l2.flags, + .processor_start = i, + .processor_count = arm_linux_processors[i].package_processor_count, + }; + } + processors[i].cache.l2 = l2 + l2_index; + } + } + + /* Commit */ + cpuinfo_processors = processors; + cpuinfo_cores = cores; + cpuinfo_clusters = clusters; + cpuinfo_packages = &package; + cpuinfo_uarchs = uarchs; + cpuinfo_cache[cpuinfo_cache_level_1i] = l1i; + cpuinfo_cache[cpuinfo_cache_level_1d] = l1d; + cpuinfo_cache[cpuinfo_cache_level_2] = l2; + cpuinfo_cache[cpuinfo_cache_level_3] = l3; + + cpuinfo_processors_count = valid_processors; + cpuinfo_cores_count = valid_processors; + cpuinfo_clusters_count = cluster_count; + cpuinfo_packages_count = 1; + cpuinfo_uarchs_count = uarchs_count; + cpuinfo_cache_count[cpuinfo_cache_level_1i] = valid_processors; + cpuinfo_cache_count[cpuinfo_cache_level_1d] = valid_processors; + cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count; + cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count; + cpuinfo_max_cache_size = cpuinfo_arm_compute_max_cache_size(&processors[0]); + + cpuinfo_linux_cpu_max = arm_linux_processors_count; + cpuinfo_linux_cpu_to_processor_map = linux_cpu_to_processor_map; + cpuinfo_linux_cpu_to_core_map = linux_cpu_to_core_map; + cpuinfo_linux_cpu_to_uarch_index_map = linux_cpu_to_uarch_index_map; + + __sync_synchronize(); + + cpuinfo_is_initialized = true; + + processors = NULL; + cores = NULL; + clusters = NULL; + uarchs = NULL; + l1i = l1d = l2 = l3 = NULL; + linux_cpu_to_processor_map = NULL; + linux_cpu_to_core_map = NULL; + linux_cpu_to_uarch_index_map = NULL; + +cleanup: + free(arm_linux_processors); + free(processors); + free(cores); + free(clusters); + free(uarchs); + free(l1i); + free(l1d); + free(l2); + free(l3); + free(linux_cpu_to_processor_map); + free(linux_cpu_to_core_map); + free(linux_cpu_to_uarch_index_map); +} diff --git a/source/3rdparty/cpuinfo/src/arm/linux/midr.c b/source/3rdparty/cpuinfo/src/arm/linux/midr.c new file mode 100644 index 0000000..0d8f03f --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/linux/midr.c @@ -0,0 +1,863 @@ +#include +#include +#include +#include + +#include +#include +#if defined(__ANDROID__) + #include +#endif +#include +#include +#include +#include +#include +#include + + +#define CLUSTERS_MAX 3 + +static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) { + return (bitfield & mask) == mask; +} + +/* Description of core clusters configuration in a chipset (identified by series and model number) */ +struct cluster_config { + /* Number of cores (logical processors) */ + uint8_t cores; + /* ARM chipset series (see cpuinfo_arm_chipset_series enum) */ + uint8_t series; + /* Chipset model number (see cpuinfo_arm_chipset struct) */ + uint16_t model; + /* Number of heterogenous clusters in the CPU package */ + uint8_t clusters; + /* + * Number of cores in each cluster: + # - Symmetric configurations: [0] = # cores + * - big.LITTLE configurations: [0] = # LITTLE cores, [1] = # big cores + * - Max.Med.Min configurations: [0] = # Min cores, [1] = # Med cores, [2] = # Max cores + */ + uint8_t cluster_cores[CLUSTERS_MAX]; + /* + * MIDR of cores in each cluster: + * - Symmetric configurations: [0] = core MIDR + * - big.LITTLE configurations: [0] = LITTLE core MIDR, [1] = big core MIDR + * - Max.Med.Min configurations: [0] = Min core MIDR, [1] = Med core MIDR, [2] = Max core MIDR + */ + uint32_t cluster_midr[CLUSTERS_MAX]; +}; + +/* + * The list of chipsets where MIDR may not be unambigiously decoded at least on some devices. + * The typical reasons for impossibility to decoded MIDRs are buggy kernels, which either do not report all MIDR + * information (e.g. on ATM7029 kernel doesn't report CPU Part), or chipsets have more than one type of cores + * (i.e. 4x Cortex-A53 + 4x Cortex-A53 is out) and buggy kernels report MIDR information only about some cores + * in /proc/cpuinfo (either only online cores, or only the core that reads /proc/cpuinfo). On these kernels/chipsets, + * it is not possible to detect all core types by just parsing /proc/cpuinfo, so we use chipset name and this table to + * find their MIDR (and thus microarchitecture, cache, etc). + * + * Note: not all chipsets with heterogeneous multiprocessing need an entry in this table. The following HMP + * chipsets always list information about all cores in /proc/cpuinfo: + * + * - Snapdragon 660 + * - Snapdragon 820 (MSM8996) + * - Snapdragon 821 (MSM8996PRO) + * - Snapdragon 835 (MSM8998) + * - Exynos 8895 + * - Kirin 960 + * + * As these are all new processors, there is hope that this table won't uncontrollably grow over time. + */ +static const struct cluster_config cluster_configs[] = { +#if CPUINFO_ARCH_ARM + { + /* + * MSM8916 (Snapdragon 410): 4x Cortex-A53 + * Some AArch32 phones use non-standard /proc/cpuinfo format. + */ + .cores = 4, + .series = cpuinfo_arm_chipset_series_qualcomm_msm, + .model = UINT16_C(8916), + .clusters = 1, + .cluster_cores = { + [0] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD030), + }, + }, + { + /* + * MSM8939 (Snapdragon 615): 4x Cortex-A53 + 4x Cortex-A53 + * Some AArch32 phones use non-standard /proc/cpuinfo format. + */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_qualcomm_msm, + .model = UINT16_C(8939), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + [1] = UINT32_C(0x410FD034), + }, + }, +#endif + { + /* MSM8956 (Snapdragon 650): 2x Cortex-A72 + 4x Cortex-A53 */ + .cores = 6, + .series = cpuinfo_arm_chipset_series_qualcomm_msm, + .model = UINT16_C(8956), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 2, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + [1] = UINT32_C(0x410FD080), + }, + }, + { + /* MSM8976/MSM8976PRO (Snapdragon 652/653): 4x Cortex-A72 + 4x Cortex-A53 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_qualcomm_msm, + .model = UINT16_C(8976), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + [1] = UINT32_C(0x410FD080), + }, + }, + { + /* MSM8992 (Snapdragon 808): 2x Cortex-A57 + 4x Cortex-A53 */ + .cores = 6, + .series = cpuinfo_arm_chipset_series_qualcomm_msm, + .model = UINT16_C(8992), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 2, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD033), + [1] = UINT32_C(0x411FD072), + }, + }, + { + /* MSM8994/MSM8994V (Snapdragon 810): 4x Cortex-A57 + 4x Cortex-A53 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_qualcomm_msm, + .model = UINT16_C(8994), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD032), + [1] = UINT32_C(0x411FD071), + }, + }, +#if CPUINFO_ARCH_ARM + { + /* Exynos 5422: 4x Cortex-A15 + 4x Cortex-A7 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = UINT16_C(5422), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC073), + [1] = UINT32_C(0x412FC0F3), + }, + }, + { + /* Exynos 5430: 4x Cortex-A15 + 4x Cortex-A7 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = UINT16_C(5430), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC074), + [1] = UINT32_C(0x413FC0F3), + }, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* Exynos 5433: 4x Cortex-A57 + 4x Cortex-A53 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = UINT16_C(5433), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD031), + [1] = UINT32_C(0x411FD070), + }, + }, + { + /* Exynos 7420: 4x Cortex-A57 + 4x Cortex-A53 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = UINT16_C(7420), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD032), + [1] = UINT32_C(0x411FD070), + }, + }, + { + /* Exynos 8890: 4x Exynos M1 + 4x Cortex-A53 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_samsung_exynos, + .model = UINT16_C(8890), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + [1] = UINT32_C(0x531F0011), + }, + }, +#if CPUINFO_ARCH_ARM + { + /* Kirin 920: 4x Cortex-A15 + 4x Cortex-A7 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = UINT16_C(920), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC075), + [1] = UINT32_C(0x413FC0F3), + }, + }, + { + /* Kirin 925: 4x Cortex-A15 + 4x Cortex-A7 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = UINT16_C(925), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC075), + [1] = UINT32_C(0x413FC0F3), + }, + }, + { + /* Kirin 928: 4x Cortex-A15 + 4x Cortex-A7 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = UINT16_C(928), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC075), + [1] = UINT32_C(0x413FC0F3), + }, + }, +#endif /* CPUINFO_ARCH_ARM */ + { + /* Kirin 950: 4x Cortex-A72 + 4x Cortex-A53 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = UINT16_C(950), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + [1] = UINT32_C(0x410FD080), + }, + }, + { + /* Kirin 955: 4x Cortex-A72 + 4x Cortex-A53 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_hisilicon_kirin, + .model = UINT16_C(955), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + [1] = UINT32_C(0x410FD080), + }, + }, +#if CPUINFO_ARCH_ARM + { + /* MediaTek MT8135: 2x Cortex-A7 + 2x Cortex-A15 */ + .cores = 4, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = UINT16_C(8135), + .clusters = 2, + .cluster_cores = { + [0] = 2, + [1] = 2, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC073), + [1] = UINT32_C(0x413FC0F2), + }, + }, +#endif + { + /* MediaTek MT8173: 2x Cortex-A72 + 2x Cortex-A53 */ + .cores = 4, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = UINT16_C(8173), + .clusters = 2, + .cluster_cores = { + [0] = 2, + [1] = 2, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD032), + [1] = UINT32_C(0x410FD080), + }, + }, + { + /* MediaTek MT8176: 2x Cortex-A72 + 4x Cortex-A53 */ + .cores = 6, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = UINT16_C(8176), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 2, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD032), + [1] = UINT32_C(0x410FD080), + }, + }, +#if CPUINFO_ARCH_ARM64 + { + /* + * MediaTek MT8735: 4x Cortex-A53 + * Some AArch64 phones use non-standard /proc/cpuinfo format. + */ + .cores = 4, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = UINT16_C(8735), + .clusters = 1, + .cluster_cores = { + [0] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + }, + }, +#endif +#if CPUINFO_ARCH_ARM + { + /* + * MediaTek MT6592: 4x Cortex-A7 + 4x Cortex-A7 + * Some phones use non-standard /proc/cpuinfo format. + */ + .cores = 4, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = UINT16_C(6592), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC074), + [1] = UINT32_C(0x410FC074), + }, + }, + { + /* MediaTek MT6595: 4x Cortex-A17 + 4x Cortex-A7 */ + .cores = 8, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = UINT16_C(6595), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC075), + [1] = UINT32_C(0x410FC0E0), + }, + }, +#endif + { + /* MediaTek MT6797: 2x Cortex-A72 + 4x Cortex-A53 + 4x Cortex-A53 */ + .cores = 10, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = UINT16_C(6797), + .clusters = 3, + .cluster_cores = { + [0] = 4, + [1] = 4, + [2] = 2, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + [1] = UINT32_C(0x410FD034), + [2] = UINT32_C(0x410FD081), + }, + }, + { + /* MediaTek MT6799: 2x Cortex-A73 + 4x Cortex-A53 + 4x Cortex-A35 */ + .cores = 10, + .series = cpuinfo_arm_chipset_series_mediatek_mt, + .model = UINT16_C(6799), + .clusters = 3, + .cluster_cores = { + [0] = 4, + [1] = 4, + [2] = 2, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD041), + [1] = UINT32_C(0x410FD034), + [2] = UINT32_C(0x410FD092), + }, + }, + { + /* Rockchip RK3399: 2x Cortex-A72 + 4x Cortex-A53 */ + .cores = 6, + .series = cpuinfo_arm_chipset_series_rockchip_rk, + .model = UINT16_C(3399), + .clusters = 2, + .cluster_cores = { + [0] = 4, + [1] = 2, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FD034), + [1] = UINT32_C(0x410FD082), + }, + }, +#if CPUINFO_ARCH_ARM + { + /* Actions ATM8029: 4x Cortex-A5 + * Most devices use non-standard /proc/cpuinfo format. + */ + .cores = 4, + .series = cpuinfo_arm_chipset_series_actions_atm, + .model = UINT16_C(7029), + .clusters = 1, + .cluster_cores = { + [0] = 4, + }, + .cluster_midr = { + [0] = UINT32_C(0x410FC051), + }, + }, +#endif +}; + +/* + * Searches chipset name in mapping of chipset name to cores' MIDR values. If match is successful, initializes MIDR + * for all clusters' leaders with tabulated values. + * + * @param[in] chipset - chipset (SoC) name information. + * @param clusters_count - number of CPU core clusters detected in the SoC. + * @param cluster_leaders - indices of core clusters' leaders in the @p processors array. + * @param processors_count - number of usable logical processors in the system. + * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency, + * and decoded core cluster (package_leader_id) information. + * Upon successful return, processors[i].midr for all clusters' leaders contains the + * tabulated MIDR values. + * @param verify_midr - indicated whether the function should check that the MIDR values to be assigned to leaders of + * core clusters are consistent with known parts of their parsed values. + * Set if to false if the only MIDR value parsed from /proc/cpuinfo is for the last processor + * reported in /proc/cpuinfo and thus can't be unambiguously attributed to that processor. + * + * @retval true if the chipset was found in the mapping and core clusters' leaders initialized with MIDR values. + * @retval false if the chipset was not found in the mapping, or any consistency check failed. + */ +static bool cpuinfo_arm_linux_detect_cluster_midr_by_chipset( + const struct cpuinfo_arm_chipset chipset[restrict static 1], + uint32_t clusters_count, + const uint32_t cluster_leaders[restrict static CLUSTERS_MAX], + uint32_t processors_count, + struct cpuinfo_arm_linux_processor processors[restrict static processors_count], + bool verify_midr) +{ + if (clusters_count <= CLUSTERS_MAX) { + for (uint32_t c = 0; c < CPUINFO_COUNT_OF(cluster_configs); c++) { + if (cluster_configs[c].model == chipset->model && cluster_configs[c].series == chipset->series) { + /* Verify that the total number of cores and clusters of cores matches expectation */ + if (cluster_configs[c].cores != processors_count || cluster_configs[c].clusters != clusters_count) { + return false; + } + + /* Verify that core cluster configuration matches expectation */ + for (uint32_t cluster = 0; cluster < clusters_count; cluster++) { + const uint32_t cluster_leader = cluster_leaders[cluster]; + if (cluster_configs[c].cluster_cores[cluster] != processors[cluster_leader].package_processor_count) { + return false; + } + } + + if (verify_midr) { + /* Verify known parts of MIDR */ + for (uint32_t cluster = 0; cluster < clusters_count; cluster++) { + const uint32_t cluster_leader = cluster_leaders[cluster]; + + /* Create a mask of known midr bits */ + uint32_t midr_mask = 0; + if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) { + midr_mask |= CPUINFO_ARM_MIDR_IMPLEMENTER_MASK; + } + if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) { + midr_mask |= CPUINFO_ARM_MIDR_VARIANT_MASK; + } + if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_PART) { + midr_mask |= CPUINFO_ARM_MIDR_PART_MASK; + } + if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_REVISION) { + midr_mask |= CPUINFO_ARM_MIDR_REVISION_MASK; + } + + /* Verify the bits under the mask */ + if ((processors[cluster_leader].midr ^ cluster_configs[c].cluster_midr[cluster]) & midr_mask) { + cpuinfo_log_debug("parsed MIDR of cluster %08"PRIu32" does not match tabulated value %08"PRIu32, + processors[cluster_leader].midr, cluster_configs[c].cluster_midr[cluster]); + return false; + } + } + } + + /* Assign MIDRs according to tabulated configurations */ + for (uint32_t cluster = 0; cluster < clusters_count; cluster++) { + const uint32_t cluster_leader = cluster_leaders[cluster]; + processors[cluster_leader].midr = cluster_configs[c].cluster_midr[cluster]; + processors[cluster_leader].flags |= CPUINFO_ARM_LINUX_VALID_MIDR; + cpuinfo_log_debug("cluster %"PRIu32" MIDR = 0x%08"PRIx32, cluster, cluster_configs[c].cluster_midr[cluster]); + } + return true; + } + } + } + return false; +} + +/* + * Initializes MIDR for leaders of core clusters using a heuristic for big.LITTLE systems: + * - If the only known MIDR is for the big core cluster, guess the matching MIDR for the LITTLE cluster. + * - Estimate which of the clusters is big using maximum frequency, if known, otherwise using system processor ID. + * - Initialize the MIDR for big and LITTLE core clusters using the guesstimates values. + * + * @param clusters_count - number of CPU core clusters detected in the SoC. + * @param cluster_with_midr_count - number of CPU core clusters in the SoC with known MIDR values. + * @param last_processor_with_midr - index of the last logical processor with known MIDR in the @p processors array. + * @param cluster_leaders - indices of core clusters' leaders in the @p processors array. + * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency, + * and decoded core cluster (package_leader_id) information. + * Upon successful return, processors[i].midr for all core clusters' leaders contains + * the heuristically detected MIDR value. + * @param verify_midr - indicated whether the function should check that the MIDR values to be assigned to leaders of + * core clusters are consistent with known parts of their parsed values. + * Set if to false if the only MIDR value parsed from /proc/cpuinfo is for the last processor + * reported in /proc/cpuinfo and thus can't be unambiguously attributed to that processor. + * + * @retval true if this is a big.LITTLE system with only one known MIDR and the CPU core clusters' leaders were + * initialized with MIDR values. + * @retval false if this is not a big.LITTLE system. + */ +static bool cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic( + uint32_t clusters_count, + uint32_t cluster_with_midr_count, + uint32_t last_processor_with_midr, + const uint32_t cluster_leaders[restrict static CLUSTERS_MAX], + struct cpuinfo_arm_linux_processor processors[restrict static last_processor_with_midr], + bool verify_midr) +{ + if (clusters_count != 2 || cluster_with_midr_count != 1) { + /* Not a big.LITTLE system, or MIDR is known for both/neither clusters */ + return false; + } + + const uint32_t midr_flags = + (processors[processors[last_processor_with_midr].package_leader_id].flags & CPUINFO_ARM_LINUX_VALID_MIDR); + const uint32_t big_midr = processors[processors[last_processor_with_midr].package_leader_id].midr; + const uint32_t little_midr = midr_little_core_for_big(big_midr); + + /* Default assumption: the first reported cluster is LITTLE cluster (this holds on most Linux kernels) */ + uint32_t little_cluster_leader = cluster_leaders[0]; + const uint32_t other_cluster_leader = cluster_leaders[1]; + /* If maximum frequency is known for both clusters, assume LITTLE cluster is the one with lower frequency */ + if (processors[little_cluster_leader].flags & processors[other_cluster_leader].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + if (processors[little_cluster_leader].max_frequency > processors[other_cluster_leader].max_frequency) { + little_cluster_leader = other_cluster_leader; + } + } + + if (verify_midr) { + /* Verify known parts of MIDR */ + for (uint32_t cluster = 0; cluster < clusters_count; cluster++) { + const uint32_t cluster_leader = cluster_leaders[cluster]; + + /* Create a mask of known midr bits */ + uint32_t midr_mask = 0; + if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) { + midr_mask |= CPUINFO_ARM_MIDR_IMPLEMENTER_MASK; + } + if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) { + midr_mask |= CPUINFO_ARM_MIDR_VARIANT_MASK; + } + if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_PART) { + midr_mask |= CPUINFO_ARM_MIDR_PART_MASK; + } + if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_REVISION) { + midr_mask |= CPUINFO_ARM_MIDR_REVISION_MASK; + } + + /* Verify the bits under the mask */ + const uint32_t midr = (cluster_leader == little_cluster_leader) ? little_midr : big_midr; + if ((processors[cluster_leader].midr ^ midr) & midr_mask) { + cpuinfo_log_debug( + "parsed MIDR %08"PRIu32" of cluster leader %"PRIu32" is inconsistent with expected value %08"PRIu32, + processors[cluster_leader].midr, cluster_leader, midr); + return false; + } + } + } + + for (uint32_t c = 0; c < clusters_count; c++) { + /* Skip cluster with already assigned MIDR */ + const uint32_t cluster_leader = cluster_leaders[c]; + if (bitmask_all(processors[cluster_leader].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) { + continue; + } + + const uint32_t midr = (cluster_leader == little_cluster_leader) ? little_midr : big_midr; + cpuinfo_log_info("assume processor %"PRIu32" to have MIDR %08"PRIx32, cluster_leader, midr); + /* To be consistent, we copy the MIDR entirely, rather than by parts */ + processors[cluster_leader].midr = midr; + processors[cluster_leader].flags |= midr_flags; + } + return true; +} + +/* + * Initializes MIDR for leaders of core clusters in a single sequential scan: + * - Clusters preceding the first reported MIDR value are assumed to have default MIDR value. + * - Clusters following any reported MIDR value to have that MIDR value. + * + * @param default_midr - MIDR value that will be assigned to cluster leaders preceding any reported MIDR value. + * @param processors_count - number of logical processor descriptions in the @p processors array. + * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency, + * and decoded core cluster (package_leader_id) information. + * Upon successful return, processors[i].midr for all core clusters' leaders contains + * the assigned MIDR value. + */ +static void cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan( + uint32_t default_midr, + uint32_t processors_count, + struct cpuinfo_arm_linux_processor processors[restrict static processors_count]) +{ + uint32_t midr = default_midr; + for (uint32_t i = 0; i < processors_count; i++) { + if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + if (processors[i].package_leader_id == i) { + if (bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) { + midr = processors[i].midr; + } else { + cpuinfo_log_info("assume processor %"PRIu32" to have MIDR %08"PRIx32, i, midr); + /* To be consistent, we copy the MIDR entirely, rather than by parts */ + processors[i].midr = midr; + processors[i].flags |= CPUINFO_ARM_LINUX_VALID_MIDR; + } + } + } + } +} + +/* + * Detects MIDR of each CPU core clusters' leader. + * + * @param[in] chipset - chipset (SoC) name information. + * @param max_processors - number of processor descriptions in the @p processors array. + * @param usable_processors - number of processor descriptions in the @p processors array with both POSSIBLE and + * PRESENT flags. + * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency, + * and decoded core cluster (package_leader_id) information. + * Upon return, processors[i].midr for all clusters' leaders contains the MIDR value. + * + * @returns The number of core clusters + */ +uint32_t cpuinfo_arm_linux_detect_cluster_midr( + const struct cpuinfo_arm_chipset chipset[restrict static 1], + uint32_t max_processors, + uint32_t usable_processors, + struct cpuinfo_arm_linux_processor processors[restrict static max_processors]) +{ + uint32_t clusters_count = 0; + uint32_t cluster_leaders[CLUSTERS_MAX]; + uint32_t last_processor_in_cpuinfo = max_processors; + uint32_t last_processor_with_midr = max_processors; + uint32_t processors_with_midr_count = 0; + for (uint32_t i = 0; i < max_processors; i++) { + if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR) { + last_processor_in_cpuinfo = i; + } + if (bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_IMPLEMENTER | CPUINFO_ARM_LINUX_VALID_PART)) { + last_processor_with_midr = i; + processors_with_midr_count += 1; + } + const uint32_t group_leader = processors[i].package_leader_id; + if (group_leader == i) { + if (clusters_count < CLUSTERS_MAX) { + cluster_leaders[clusters_count] = i; + } + clusters_count += 1; + } else { + /* Copy known bits of information to cluster leader */ + + if ((processors[i].flags & ~processors[group_leader].flags) & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) { + processors[group_leader].max_frequency = processors[i].max_frequency; + processors[group_leader].flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY; + } + if (!bitmask_all(processors[group_leader].flags, CPUINFO_ARM_LINUX_VALID_MIDR) && + bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) + { + processors[group_leader].midr = processors[i].midr; + processors[group_leader].flags |= CPUINFO_ARM_LINUX_VALID_MIDR; + } + } + } + } + cpuinfo_log_debug("detected %"PRIu32" core clusters", clusters_count); + + /* + * Two relations between reported /proc/cpuinfo information, and cores is possible: + * - /proc/cpuinfo reports information for all or some of the cores below the corresponding + * "processor : " lines. Information on offline cores may be missing. + * - /proc/cpuinfo reports information only once, after all "processor : " lines. + * The reported information may relate to processor #0 or to the processor which + * executed the system calls to read /proc/cpuinfo. It is also indistinguishable + * from /proc/cpuinfo reporting information only for the last core (e.g. if all other + * cores are offline). + * + * We detect the second case by checking if /proc/cpuinfo contains valid MIDR only for one, + * last reported, processor. Note, that the last reported core may be not the last + * present & possible processor, as /proc/cpuinfo may non-report high-index offline cores. + */ + if (processors_with_midr_count == 1 && last_processor_in_cpuinfo == last_processor_with_midr && clusters_count > 1) { + /* + * There are multiple core clusters, but /proc/cpuinfo reported MIDR only for one + * processor, and we don't even know which logical processor this information refers to. + * + * We make three attempts to detect MIDR for all clusters: + * 1. Search tabulated MIDR values for chipsets which have heterogeneous clusters and ship with Linux + * kernels which do not always report all cores in /proc/cpuinfo. If found, use the tabulated values. + * 2. For systems with 2 clusters and MIDR known for one cluster, assume big.LITTLE configuration, + * and estimate MIDR for the other cluster under assumption that MIDR for the big cluster is known. + * 3. Initialize MIDRs for all core clusters to the only parsed MIDR value. + */ + cpuinfo_log_debug("the only reported MIDR can not be attributed to a particular processor"); + + if (cpuinfo_arm_linux_detect_cluster_midr_by_chipset( + chipset, clusters_count, cluster_leaders, usable_processors, processors, false)) + { + return clusters_count; + } + + /* Try big.LITTLE heuristic */ + if (cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic( + clusters_count, 1, last_processor_with_midr, + cluster_leaders, processors, false)) + { + return clusters_count; + } + + /* Fall back to sequential initialization of MIDR values for core clusters */ + cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan( + processors[processors[last_processor_with_midr].package_leader_id].midr, + max_processors, processors); + } else if (processors_with_midr_count < usable_processors) { + /* + * /proc/cpuinfo reported MIDR only for some processors, and probably some core clusters do not have MIDR + * for any of the cores. Check if this is the case. + */ + uint32_t clusters_with_midr_count = 0; + for (uint32_t i = 0; i < max_processors; i++) { + if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID | CPUINFO_ARM_LINUX_VALID_MIDR)) { + if (processors[i].package_leader_id == i) { + clusters_with_midr_count += 1; + } + } + } + + if (clusters_with_midr_count < clusters_count) { + /* + * /proc/cpuinfo reported MIDR only for some clusters, need to reconstruct others. + * We make three attempts to detect MIDR for clusters without it: + * 1. Search tabulated MIDR values for chipsets which have heterogeneous clusters and ship with Linux + * kernels which do not always report all cores in /proc/cpuinfo. If found, use the tabulated values. + * 2. For systems with 2 clusters and MIDR known for one cluster, assume big.LITTLE configuration, + * and estimate MIDR for the other cluster under assumption that MIDR for the big cluster is known. + * 3. Initialize MIDRs for core clusters in a single sequential scan: + * - Clusters preceding the first reported MIDR value are assumed to have the last reported MIDR value. + * - Clusters following any reported MIDR value to have that MIDR value. + */ + + if (cpuinfo_arm_linux_detect_cluster_midr_by_chipset( + chipset, clusters_count, cluster_leaders, usable_processors, processors, true)) + { + return clusters_count; + } + + if (last_processor_with_midr != max_processors) { + /* Try big.LITTLE heuristic */ + if (cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic( + clusters_count, processors_with_midr_count, last_processor_with_midr, + cluster_leaders, processors, true)) + { + return clusters_count; + } + + /* Fall back to sequential initialization of MIDR values for core clusters */ + cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan( + processors[processors[last_processor_with_midr].package_leader_id].midr, + max_processors, processors); + } + } + } + return clusters_count; +} diff --git a/source/3rdparty/cpuinfo/src/arm/mach/init.c b/source/3rdparty/cpuinfo/src/arm/mach/init.c new file mode 100644 index 0000000..dbea578 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/mach/init.c @@ -0,0 +1,619 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +/* Polyfill recent CPUFAMILY_ARM_* values for older SDKs */ +#ifndef CPUFAMILY_ARM_MONSOON_MISTRAL + #define CPUFAMILY_ARM_MONSOON_MISTRAL 0xE81E7EF6 +#endif +#ifndef CPUFAMILY_ARM_VORTEX_TEMPEST + #define CPUFAMILY_ARM_VORTEX_TEMPEST 0x07D34B9F +#endif +#ifndef CPUFAMILY_ARM_LIGHTNING_THUNDER + #define CPUFAMILY_ARM_LIGHTNING_THUNDER 0x462504D2 +#endif +#ifndef CPUFAMILY_ARM_FIRESTORM_ICESTORM + #define CPUFAMILY_ARM_FIRESTORM_ICESTORM 0x1B588BB3 +#endif + +struct cpuinfo_arm_isa cpuinfo_isa = { +#if CPUINFO_ARCH_ARM + .thumb = true, + .thumb2 = true, + .thumbee = false, + .jazelle = false, + .armv5e = true, + .armv6 = true, + .armv6k = true, + .armv7 = true, + .vfpv2 = false, + .vfpv3 = true, + .d32 = true, + .wmmx = false, + .wmmx2 = false, + .neon = true, +#endif +#if CPUINFO_ARCH_ARM64 + .aes = true, + .sha1 = true, + .sha2 = true, + .pmull = true, + .crc32 = true, +#endif +}; + +static uint32_t get_sys_info(int type_specifier, const char* name) { + size_t size = 0; + uint32_t result = 0; + int mib[2] = { CTL_HW, type_specifier }; + if (sysctl(mib, 2, NULL, &size, NULL, 0) != 0) { + cpuinfo_log_info("sysctl(\"%s\") failed: %s", name, strerror(errno)); + } else if (size == sizeof(uint32_t)) { + sysctl(mib, 2, &result, &size, NULL, 0); + cpuinfo_log_debug("%s: %"PRIu32 ", size = %lu", name, result, size); + } else { + cpuinfo_log_info("sysctl does not support non-integer lookup for (\"%s\")", name); + } + return result; +} + +static uint32_t get_sys_info_by_name(const char* type_specifier) { + size_t size = 0; + uint32_t result = 0; + if (sysctlbyname(type_specifier, NULL, &size, NULL, 0) != 0) { + cpuinfo_log_info("sysctlbyname(\"%s\") failed: %s", type_specifier, strerror(errno)); + } else if (size == sizeof(uint32_t)) { + sysctlbyname(type_specifier, &result, &size, NULL, 0); + cpuinfo_log_debug("%s: %"PRIu32 ", size = %lu", type_specifier, result, size); + } else { + cpuinfo_log_info("sysctl does not support non-integer lookup for (\"%s\")", type_specifier); + } + return result; +} + +static enum cpuinfo_uarch decode_uarch(uint32_t cpu_family, uint32_t cpu_subtype, uint32_t core_index, uint32_t core_count) { + switch (cpu_family) { + case CPUFAMILY_ARM_SWIFT: + return cpuinfo_uarch_swift; + case CPUFAMILY_ARM_CYCLONE: + return cpuinfo_uarch_cyclone; + case CPUFAMILY_ARM_TYPHOON: + return cpuinfo_uarch_typhoon; + case CPUFAMILY_ARM_TWISTER: + return cpuinfo_uarch_twister; + case CPUFAMILY_ARM_HURRICANE: + return cpuinfo_uarch_hurricane; + case CPUFAMILY_ARM_MONSOON_MISTRAL: + /* 2x Monsoon + 4x Mistral cores */ + return core_index < 2 ? cpuinfo_uarch_monsoon : cpuinfo_uarch_mistral; + case CPUFAMILY_ARM_VORTEX_TEMPEST: + /* Hexa-core: 2x Vortex + 4x Tempest; Octa-core: 4x Cortex + 4x Tempest */ + return core_index + 4 < core_count ? cpuinfo_uarch_vortex : cpuinfo_uarch_tempest; + case CPUFAMILY_ARM_LIGHTNING_THUNDER: + /* Hexa-core: 2x Lightning + 4x Thunder; Octa-core (presumed): 4x Lightning + 4x Thunder */ + return core_index + 4 < core_count ? cpuinfo_uarch_lightning : cpuinfo_uarch_thunder; + case CPUFAMILY_ARM_FIRESTORM_ICESTORM: + /* Hexa-core: 2x Firestorm + 4x Icestorm; Octa-core: 4x Firestorm + 4x Icestorm */ + return core_index + 4 < core_count ? cpuinfo_uarch_firestorm : cpuinfo_uarch_icestorm; + default: + /* Use hw.cpusubtype for detection */ + break; + } + + #if CPUINFO_ARCH_ARM + switch (cpu_subtype) { + case CPU_SUBTYPE_ARM_V7: + return cpuinfo_uarch_cortex_a8; + case CPU_SUBTYPE_ARM_V7F: + return cpuinfo_uarch_cortex_a9; + case CPU_SUBTYPE_ARM_V7K: + return cpuinfo_uarch_cortex_a7; + default: + return cpuinfo_uarch_unknown; + } + #else + return cpuinfo_uarch_unknown; + #endif +} + +static void decode_package_name(char* package_name) { + size_t size; + if (sysctlbyname("hw.machine", NULL, &size, NULL, 0) != 0) { + cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno)); + return; + } + + char *machine_name = alloca(size); + if (sysctlbyname("hw.machine", machine_name, &size, NULL, 0) != 0) { + cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno)); + return; + } + cpuinfo_log_debug("hw.machine: %s", machine_name); + + char name[10]; + uint32_t major = 0, minor = 0; + if (sscanf(machine_name, "%9[^,0123456789]%"SCNu32",%"SCNu32, name, &major, &minor) != 3) { + cpuinfo_log_warning("parsing \"hw.machine\" failed: %s", strerror(errno)); + return; + } + + uint32_t chip_model = 0; + char suffix = '\0'; + if (strcmp(name, "iPhone") == 0) { + /* + * iPhone 4 and up are supported: + * - iPhone 4 [A4]: iPhone3,1, iPhone3,2, iPhone3,3 + * - iPhone 4S [A5]: iPhone4,1 + * - iPhone 5 [A6]: iPhone5,1, iPhone5,2 + * - iPhone 5c [A6]: iPhone5,3, iPhone5,4 + * - iPhone 5s [A7]: iPhone6,1, iPhone6,2 + * - iPhone 6 [A8]: iPhone7,2 + * - iPhone 6 Plus [A8]: iPhone7,1 + * - iPhone 6s [A9]: iPhone8,1 + * - iPhone 6s Plus [A9]: iPhone8,2 + * - iPhone SE [A9]: iPhone8,4 + * - iPhone 7 [A10]: iPhone9,1, iPhone9,3 + * - iPhone 7 Plus [A10]: iPhone9,2, iPhone9,4 + * - iPhone 8 [A11]: iPhone10,1, iPhone10,4 + * - iPhone 8 Plus [A11]: iPhone10,2, iPhone10,5 + * - iPhone X [A11]: iPhone10,3, iPhone10,6 + * - iPhone XS [A12]: iPhone11,2, + * - iPhone XS Max [A12]: iPhone11,4, iPhone11,6 + * - iPhone XR [A12]: iPhone11,8 + */ + chip_model = major + 1; + } else if (strcmp(name, "iPad") == 0) { + switch (major) { + /* iPad 2 and up are supported */ + case 2: + /* + * iPad 2 [A5]: iPad2,1, iPad2,2, iPad2,3, iPad2,4 + * iPad mini [A5]: iPad2,5, iPad2,6, iPad2,7 + */ + chip_model = major + 3; + break; + case 3: + /* + * iPad 3rd Gen [A5X]: iPad3,1, iPad3,2, iPad3,3 + * iPad 4th Gen [A6X]: iPad3,4, iPad3,5, iPad3,6 + */ + chip_model = (minor <= 3) ? 5 : 6; + suffix = 'X'; + break; + case 4: + /* + * iPad Air [A7]: iPad4,1, iPad4,2, iPad4,3 + * iPad mini Retina [A7]: iPad4,4, iPad4,5, iPad4,6 + * iPad mini 3 [A7]: iPad4,7, iPad4,8, iPad4,9 + */ + chip_model = major + 3; + break; + case 5: + /* + * iPad mini 4 [A8]: iPad5,1, iPad5,2 + * iPad Air 2 [A8X]: iPad5,3, iPad5,4 + */ + chip_model = major + 3; + suffix = (minor <= 2) ? '\0' : 'X'; + break; + case 6: + /* + * iPad Pro 9.7" [A9X]: iPad6,3, iPad6,4 + * iPad Pro [A9X]: iPad6,7, iPad6,8 + * iPad 5th Gen [A9]: iPad6,11, iPad6,12 + */ + chip_model = major + 3; + suffix = minor <= 8 ? 'X' : '\0'; + break; + case 7: + /* + * iPad Pro 12.9" [A10X]: iPad7,1, iPad7,2 + * iPad Pro 10.5" [A10X]: iPad7,3, iPad7,4 + * iPad 6th Gen [A10]: iPad7,5, iPad7,6 + */ + chip_model = major + 3; + suffix = minor <= 4 ? 'X' : '\0'; + break; + default: + cpuinfo_log_info("unknown iPad: %s", machine_name); + break; + } + } else if (strcmp(name, "iPod") == 0) { + switch (major) { + case 5: + chip_model = 5; + break; + /* iPod touch (5th Gen) [A5]: iPod5,1 */ + case 7: + /* iPod touch (6th Gen, 2015) [A8]: iPod7,1 */ + chip_model = 8; + break; + default: + cpuinfo_log_info("unknown iPod: %s", machine_name); + break; + } + } else { + cpuinfo_log_info("unknown device: %s", machine_name); + } + if (chip_model != 0) { + snprintf(package_name, CPUINFO_PACKAGE_NAME_MAX, "Apple A%"PRIu32"%c", chip_model, suffix); + } +} + +void cpuinfo_arm_mach_init(void) { + struct cpuinfo_processor* processors = NULL; + struct cpuinfo_core* cores = NULL; + struct cpuinfo_cluster* clusters = NULL; + struct cpuinfo_package* packages = NULL; + struct cpuinfo_uarch_info* uarchs = NULL; + struct cpuinfo_cache* l1i = NULL; + struct cpuinfo_cache* l1d = NULL; + struct cpuinfo_cache* l2 = NULL; + struct cpuinfo_cache* l3 = NULL; + + struct cpuinfo_mach_topology mach_topology = cpuinfo_mach_detect_topology(); + processors = calloc(mach_topology.threads, sizeof(struct cpuinfo_processor)); + if (processors == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors", + mach_topology.threads * sizeof(struct cpuinfo_processor), mach_topology.threads); + goto cleanup; + } + cores = calloc(mach_topology.cores, sizeof(struct cpuinfo_core)); + if (cores == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores", + mach_topology.cores * sizeof(struct cpuinfo_core), mach_topology.cores); + goto cleanup; + } + packages = calloc(mach_topology.packages, sizeof(struct cpuinfo_package)); + if (packages == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" packages", + mach_topology.packages * sizeof(struct cpuinfo_package), mach_topology.packages); + goto cleanup; + } + + const uint32_t threads_per_core = mach_topology.threads / mach_topology.cores; + const uint32_t threads_per_package = mach_topology.threads / mach_topology.packages; + const uint32_t cores_per_package = mach_topology.cores / mach_topology.packages; + + for (uint32_t i = 0; i < mach_topology.packages; i++) { + packages[i] = (struct cpuinfo_package) { + .processor_start = i * threads_per_package, + .processor_count = threads_per_package, + .core_start = i * cores_per_package, + .core_count = cores_per_package, + }; + decode_package_name(packages[i].name); + } + + + const uint32_t cpu_family = get_sys_info_by_name("hw.cpufamily"); + const uint32_t cpu_type = get_sys_info_by_name("hw.cputype"); + const uint32_t cpu_subtype = get_sys_info_by_name("hw.cpusubtype"); + switch (cpu_type) { + case CPU_TYPE_ARM64: + cpuinfo_isa.aes = true; + cpuinfo_isa.sha1 = true; + cpuinfo_isa.sha2 = true; + cpuinfo_isa.pmull = true; + cpuinfo_isa.crc32 = true; + break; +#if CPUINFO_ARCH_ARM + case CPU_TYPE_ARM: + switch (cpu_subtype) { + case CPU_SUBTYPE_ARM_V8: + cpuinfo_isa.armv8 = true; + cpuinfo_isa.aes = true; + cpuinfo_isa.sha1 = true; + cpuinfo_isa.sha2 = true; + cpuinfo_isa.pmull = true; + cpuinfo_isa.crc32 = true; + /* Fall-through to add ARMv7S features */ + case CPU_SUBTYPE_ARM_V7S: + case CPU_SUBTYPE_ARM_V7K: + cpuinfo_isa.fma = true; + /* Fall-through to add ARMv7F features */ + case CPU_SUBTYPE_ARM_V7F: + cpuinfo_isa.armv7mp = true; + cpuinfo_isa.fp16 = true; + /* Fall-through to add ARMv7 features */ + case CPU_SUBTYPE_ARM_V7: + break; + default: + break; + } + break; +#endif + } + /* + * Support for ARMv8.1 Atomics & FP16 arithmetic instructions is supposed to be detected via + * sysctlbyname calls with "hw.optional.armv8_1_atomics" and "hw.optional.neon_fp16" arguments + * (see https://devstreaming-cdn.apple.com/videos/wwdc/2018/409t8zw7rumablsh/409/409_whats_new_in_llvm.pdf), + * but on new iOS versions these calls just fail with EPERM. + * + * Thus, we whitelist CPUs known to support these instructions. + */ + switch (cpu_family) { + case CPUFAMILY_ARM_MONSOON_MISTRAL: + case CPUFAMILY_ARM_VORTEX_TEMPEST: + case CPUFAMILY_ARM_LIGHTNING_THUNDER: + case CPUFAMILY_ARM_FIRESTORM_ICESTORM: + #if CPUINFO_ARCH_ARM64 + cpuinfo_isa.atomics = true; + #endif + cpuinfo_isa.fp16arith = true; + } + + /* + * There does not yet seem to exist an OS mechanism to detect support for + * ARMv8.2 optional dot-product instructions, so we currently whitelist CPUs + * known to support these instruction. + */ + switch (cpu_family) { + case CPUFAMILY_ARM_LIGHTNING_THUNDER: + case CPUFAMILY_ARM_FIRESTORM_ICESTORM: + cpuinfo_isa.dot = true; + } + + uint32_t num_clusters = 1; + for (uint32_t i = 0; i < mach_topology.cores; i++) { + cores[i] = (struct cpuinfo_core) { + .processor_start = i * threads_per_core, + .processor_count = threads_per_core, + .core_id = i % cores_per_package, + .package = packages + i / cores_per_package, + .vendor = cpuinfo_vendor_apple, + .uarch = decode_uarch(cpu_family, cpu_subtype, i, mach_topology.cores), + }; + if (i != 0 && cores[i].uarch != cores[i - 1].uarch) { + num_clusters++; + } + } + for (uint32_t i = 0; i < mach_topology.threads; i++) { + const uint32_t smt_id = i % threads_per_core; + const uint32_t core_id = i / threads_per_core; + const uint32_t package_id = i / threads_per_package; + + processors[i].smt_id = smt_id; + processors[i].core = &cores[core_id]; + processors[i].package = &packages[package_id]; + } + + clusters = calloc(num_clusters, sizeof(struct cpuinfo_cluster)); + if (clusters == NULL) { + cpuinfo_log_error( + "failed to allocate %zu bytes for descriptions of %"PRIu32" clusters", + num_clusters * sizeof(struct cpuinfo_cluster), num_clusters); + goto cleanup; + } + uarchs = calloc(num_clusters, sizeof(struct cpuinfo_uarch_info)); + if (uarchs == NULL) { + cpuinfo_log_error( + "failed to allocate %zu bytes for descriptions of %"PRIu32" uarchs", + num_clusters * sizeof(enum cpuinfo_uarch), num_clusters); + goto cleanup; + } + uint32_t cluster_idx = UINT32_MAX; + for (uint32_t i = 0; i < mach_topology.cores; i++) { + if (i == 0 || cores[i].uarch != cores[i - 1].uarch) { + cluster_idx++; + uarchs[cluster_idx] = (struct cpuinfo_uarch_info) { + .uarch = cores[i].uarch, + .processor_count = 1, + .core_count = 1, + }; + clusters[cluster_idx] = (struct cpuinfo_cluster) { + .processor_start = i * threads_per_core, + .processor_count = 1, + .core_start = i, + .core_count = 1, + .cluster_id = cluster_idx, + .package = cores[i].package, + .vendor = cores[i].vendor, + .uarch = cores[i].uarch, + }; + } else { + uarchs[cluster_idx].processor_count++; + uarchs[cluster_idx].core_count++; + clusters[cluster_idx].processor_count++; + clusters[cluster_idx].core_count++; + } + cores[i].cluster = &clusters[cluster_idx]; + } + + for (uint32_t i = 0; i < mach_topology.threads; i++) { + const uint32_t core_id = i / threads_per_core; + processors[i].cluster = cores[core_id].cluster; + } + + for (uint32_t i = 0; i < mach_topology.packages; i++) { + packages[i].cluster_start = 0; + packages[i].cluster_count = num_clusters; + } + + const uint32_t cacheline_size = get_sys_info(HW_CACHELINE, "HW_CACHELINE"); + const uint32_t l1d_cache_size = get_sys_info(HW_L1DCACHESIZE, "HW_L1DCACHESIZE"); + const uint32_t l1i_cache_size = get_sys_info(HW_L1ICACHESIZE, "HW_L1ICACHESIZE"); + const uint32_t l2_cache_size = get_sys_info(HW_L2CACHESIZE, "HW_L2CACHESIZE"); + const uint32_t l3_cache_size = get_sys_info(HW_L3CACHESIZE, "HW_L3CACHESIZE"); + const uint32_t l1_cache_associativity = 4; + const uint32_t l2_cache_associativity = 8; + const uint32_t l3_cache_associativity = 16; + const uint32_t cache_partitions = 1; + const uint32_t cache_flags = 0; + + uint32_t threads_per_l1 = 0, l1_count = 0; + if (l1i_cache_size != 0 || l1d_cache_size != 0) { + /* Assume L1 caches are private to each core */ + threads_per_l1 = 1; + l1_count = mach_topology.threads / threads_per_l1; + cpuinfo_log_debug("detected %"PRIu32" L1 caches", l1_count); + } + + uint32_t threads_per_l2 = 0, l2_count = 0; + if (l2_cache_size != 0) { + /* Assume L2 cache is shared between all cores */ + threads_per_l2 = mach_topology.cores; + l2_count = 1; + cpuinfo_log_debug("detected %"PRIu32" L2 caches", l2_count); + } + + uint32_t threads_per_l3 = 0, l3_count = 0; + if (l3_cache_size != 0) { + /* Assume L3 cache is shared between all cores */ + threads_per_l3 = mach_topology.cores; + l3_count = 1; + cpuinfo_log_debug("detected %"PRIu32" L3 caches", l3_count); + } + + if (l1i_cache_size != 0) { + l1i = calloc(l1_count, sizeof(struct cpuinfo_cache)); + if (l1i == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches", + l1_count * sizeof(struct cpuinfo_cache), l1_count); + goto cleanup; + } + for (uint32_t c = 0; c < l1_count; c++) { + l1i[c] = (struct cpuinfo_cache) { + .size = l1i_cache_size, + .associativity = l1_cache_associativity, + .sets = l1i_cache_size / (l1_cache_associativity * cacheline_size), + .partitions = cache_partitions, + .line_size = cacheline_size, + .flags = cache_flags, + .processor_start = c * threads_per_l1, + .processor_count = threads_per_l1, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l1i = &l1i[t / threads_per_l1]; + } + } + + if (l1d_cache_size != 0) { + l1d = calloc(l1_count, sizeof(struct cpuinfo_cache)); + if (l1d == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches", + l1_count * sizeof(struct cpuinfo_cache), l1_count); + goto cleanup; + } + for (uint32_t c = 0; c < l1_count; c++) { + l1d[c] = (struct cpuinfo_cache) { + .size = l1d_cache_size, + .associativity = l1_cache_associativity, + .sets = l1d_cache_size / (l1_cache_associativity * cacheline_size), + .partitions = cache_partitions, + .line_size = cacheline_size, + .flags = cache_flags, + .processor_start = c * threads_per_l1, + .processor_count = threads_per_l1, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l1d = &l1d[t / threads_per_l1]; + } + } + + if (l2_count != 0) { + l2 = calloc(l2_count, sizeof(struct cpuinfo_cache)); + if (l2 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches", + l2_count * sizeof(struct cpuinfo_cache), l2_count); + goto cleanup; + } + for (uint32_t c = 0; c < l2_count; c++) { + l2[c] = (struct cpuinfo_cache) { + .size = l2_cache_size, + .associativity = l2_cache_associativity, + .sets = l2_cache_size / (l2_cache_associativity * cacheline_size), + .partitions = cache_partitions, + .line_size = cacheline_size, + .flags = cache_flags, + .processor_start = c * threads_per_l2, + .processor_count = threads_per_l2, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l2 = &l2[0]; + } + } + + if (l3_count != 0) { + l3 = calloc(l3_count, sizeof(struct cpuinfo_cache)); + if (l3 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches", + l3_count * sizeof(struct cpuinfo_cache), l3_count); + goto cleanup; + } + for (uint32_t c = 0; c < l3_count; c++) { + l3[c] = (struct cpuinfo_cache) { + .size = l3_cache_size, + .associativity = l3_cache_associativity, + .sets = l3_cache_size / (l3_cache_associativity * cacheline_size), + .partitions = cache_partitions, + .line_size = cacheline_size, + .flags = cache_flags, + .processor_start = c * threads_per_l3, + .processor_count = threads_per_l3, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l3 = &l3[0]; + } + } + + /* Commit changes */ + cpuinfo_processors = processors; + cpuinfo_cores = cores; + cpuinfo_clusters = clusters; + cpuinfo_packages = packages; + cpuinfo_uarchs = uarchs; + cpuinfo_cache[cpuinfo_cache_level_1i] = l1i; + cpuinfo_cache[cpuinfo_cache_level_1d] = l1d; + cpuinfo_cache[cpuinfo_cache_level_2] = l2; + cpuinfo_cache[cpuinfo_cache_level_3] = l3; + + cpuinfo_processors_count = mach_topology.threads; + cpuinfo_cores_count = mach_topology.cores; + cpuinfo_clusters_count = num_clusters; + cpuinfo_packages_count = mach_topology.packages; + cpuinfo_uarchs_count = num_clusters; + cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1_count; + cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1_count; + cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count; + cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count; + cpuinfo_max_cache_size = cpuinfo_compute_max_cache_size(&processors[0]); + + __sync_synchronize(); + + cpuinfo_is_initialized = true; + + processors = NULL; + cores = NULL; + clusters = NULL; + packages = NULL; + uarchs = NULL; + l1i = l1d = l2 = l3 = NULL; + +cleanup: + free(processors); + free(cores); + free(clusters); + free(packages); + free(uarchs); + free(l1i); + free(l1d); + free(l2); + free(l3); +} diff --git a/source/3rdparty/cpuinfo/src/arm/midr.h b/source/3rdparty/cpuinfo/src/arm/midr.h new file mode 100644 index 0000000..6329783 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/midr.h @@ -0,0 +1,260 @@ +#pragma once +#include + + +#define CPUINFO_ARM_MIDR_IMPLEMENTER_MASK UINT32_C(0xFF000000) +#define CPUINFO_ARM_MIDR_VARIANT_MASK UINT32_C(0x00F00000) +#define CPUINFO_ARM_MIDR_ARCHITECTURE_MASK UINT32_C(0x000F0000) +#define CPUINFO_ARM_MIDR_PART_MASK UINT32_C(0x0000FFF0) +#define CPUINFO_ARM_MIDR_REVISION_MASK UINT32_C(0x0000000F) + +#define CPUINFO_ARM_MIDR_IMPLEMENTER_OFFSET 24 +#define CPUINFO_ARM_MIDR_VARIANT_OFFSET 20 +#define CPUINFO_ARM_MIDR_ARCHITECTURE_OFFSET 16 +#define CPUINFO_ARM_MIDR_PART_OFFSET 4 +#define CPUINFO_ARM_MIDR_REVISION_OFFSET 0 + +#define CPUINFO_ARM_MIDR_ARM1156 UINT32_C(0x410FB560) +#define CPUINFO_ARM_MIDR_CORTEX_A7 UINT32_C(0x410FC070) +#define CPUINFO_ARM_MIDR_CORTEX_A9 UINT32_C(0x410FC090) +#define CPUINFO_ARM_MIDR_CORTEX_A15 UINT32_C(0x410FC0F0) +#define CPUINFO_ARM_MIDR_CORTEX_A17 UINT32_C(0x410FC0E0) +#define CPUINFO_ARM_MIDR_CORTEX_A35 UINT32_C(0x410FD040) +#define CPUINFO_ARM_MIDR_CORTEX_A53 UINT32_C(0x410FD030) +#define CPUINFO_ARM_MIDR_CORTEX_A55 UINT32_C(0x410FD050) +#define CPUINFO_ARM_MIDR_CORTEX_A57 UINT32_C(0x410FD070) +#define CPUINFO_ARM_MIDR_CORTEX_A72 UINT32_C(0x410FD080) +#define CPUINFO_ARM_MIDR_CORTEX_A73 UINT32_C(0x410FD090) +#define CPUINFO_ARM_MIDR_CORTEX_A75 UINT32_C(0x410FD0A0) +#define CPUINFO_ARM_MIDR_KRYO280_GOLD UINT32_C(0x51AF8001) +#define CPUINFO_ARM_MIDR_KRYO280_SILVER UINT32_C(0x51AF8014) +#define CPUINFO_ARM_MIDR_KRYO385_GOLD UINT32_C(0x518F802D) +#define CPUINFO_ARM_MIDR_KRYO385_SILVER UINT32_C(0x518F803C) +#define CPUINFO_ARM_MIDR_KRYO_SILVER_821 UINT32_C(0x510F2010) +#define CPUINFO_ARM_MIDR_KRYO_GOLD UINT32_C(0x510F2050) +#define CPUINFO_ARM_MIDR_KRYO_SILVER_820 UINT32_C(0x510F2110) +#define CPUINFO_ARM_MIDR_EXYNOS_M1_M2 UINT32_C(0x530F0010) +#define CPUINFO_ARM_MIDR_DENVER2 UINT32_C(0x4E0F0030) + +inline static uint32_t midr_set_implementer(uint32_t midr, uint32_t implementer) { + return (midr & ~CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) | + ((implementer << CPUINFO_ARM_MIDR_IMPLEMENTER_OFFSET) & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK); +} + +inline static uint32_t midr_set_variant(uint32_t midr, uint32_t variant) { + return (midr & ~CPUINFO_ARM_MIDR_VARIANT_MASK) | + ((variant << CPUINFO_ARM_MIDR_VARIANT_OFFSET) & CPUINFO_ARM_MIDR_VARIANT_MASK); +} + +inline static uint32_t midr_set_architecture(uint32_t midr, uint32_t architecture) { + return (midr & ~CPUINFO_ARM_MIDR_ARCHITECTURE_MASK) | + ((architecture << CPUINFO_ARM_MIDR_ARCHITECTURE_OFFSET) & CPUINFO_ARM_MIDR_ARCHITECTURE_MASK); +} + +inline static uint32_t midr_set_part(uint32_t midr, uint32_t part) { + return (midr & ~CPUINFO_ARM_MIDR_PART_MASK) | + ((part << CPUINFO_ARM_MIDR_PART_OFFSET) & CPUINFO_ARM_MIDR_PART_MASK); +} + +inline static uint32_t midr_set_revision(uint32_t midr, uint32_t revision) { + return (midr & ~CPUINFO_ARM_MIDR_REVISION_MASK) | + ((revision << CPUINFO_ARM_MIDR_REVISION_OFFSET) & CPUINFO_ARM_MIDR_REVISION_MASK); +} + +inline static uint32_t midr_get_variant(uint32_t midr) { + return (midr & CPUINFO_ARM_MIDR_VARIANT_MASK) >> CPUINFO_ARM_MIDR_VARIANT_OFFSET; +} + +inline static uint32_t midr_get_implementer(uint32_t midr) { + return (midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) >> CPUINFO_ARM_MIDR_IMPLEMENTER_OFFSET; +} + +inline static uint32_t midr_get_part(uint32_t midr) { + return (midr & CPUINFO_ARM_MIDR_PART_MASK) >> CPUINFO_ARM_MIDR_PART_OFFSET; +} + +inline static uint32_t midr_get_revision(uint32_t midr) { + return (midr & CPUINFO_ARM_MIDR_REVISION_MASK) >> CPUINFO_ARM_MIDR_REVISION_OFFSET; +} + +inline static uint32_t midr_copy_implementer(uint32_t midr, uint32_t other_midr) { + return (midr & ~CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) | (other_midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK); +} + +inline static uint32_t midr_copy_variant(uint32_t midr, uint32_t other_midr) { + return (midr & ~CPUINFO_ARM_MIDR_VARIANT_MASK) | (other_midr & CPUINFO_ARM_MIDR_VARIANT_MASK); +} + +inline static uint32_t midr_copy_architecture(uint32_t midr, uint32_t other_midr) { + return (midr & ~CPUINFO_ARM_MIDR_ARCHITECTURE_MASK) | (other_midr & CPUINFO_ARM_MIDR_ARCHITECTURE_MASK); +} + +inline static uint32_t midr_copy_part(uint32_t midr, uint32_t other_midr) { + return (midr & ~CPUINFO_ARM_MIDR_PART_MASK) | (other_midr & CPUINFO_ARM_MIDR_PART_MASK); +} + +inline static uint32_t midr_copy_revision(uint32_t midr, uint32_t other_midr) { + return (midr & ~CPUINFO_ARM_MIDR_REVISION_MASK) | (other_midr & CPUINFO_ARM_MIDR_REVISION_MASK); +} + +inline static bool midr_is_arm1156(uint32_t midr) { + const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK; + return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_ARM1156 & uarch_mask); +} + +inline static bool midr_is_arm11(uint32_t midr) { + return (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | 0x0000F000)) == UINT32_C(0x4100B000); +} + +inline static bool midr_is_cortex_a9(uint32_t midr) { + const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK; + return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_CORTEX_A9 & uarch_mask); +} + +inline static bool midr_is_scorpion(uint32_t midr) { + switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) { + case UINT32_C(0x510000F0): + case UINT32_C(0x510002D0): + return true; + default: + return false; + } +} + +inline static bool midr_is_krait(uint32_t midr) { + switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) { + case UINT32_C(0x510004D0): + case UINT32_C(0x510006F0): + return true; + default: + return false; + } +} + +inline static bool midr_is_cortex_a53(uint32_t midr) { + const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK; + return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_CORTEX_A53 & uarch_mask); +} + +inline static bool midr_is_qualcomm_cortex_a53_silver(uint32_t midr) { + const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK; + return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_KRYO280_SILVER & uarch_mask); +} + +inline static bool midr_is_qualcomm_cortex_a55_silver(uint32_t midr) { + const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK; + return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_KRYO385_SILVER & uarch_mask); +} + +inline static bool midr_is_kryo280_gold(uint32_t midr) { + const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK; + return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_KRYO280_GOLD & uarch_mask); +} + +inline static bool midr_is_kryo_silver(uint32_t midr) { + const uint32_t uarch_mask = + CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_ARCHITECTURE_MASK | CPUINFO_ARM_MIDR_PART_MASK; + switch (midr & uarch_mask) { + case CPUINFO_ARM_MIDR_KRYO_SILVER_820: + case CPUINFO_ARM_MIDR_KRYO_SILVER_821: + return true; + default: + return false; + } +} + +inline static bool midr_is_kryo_gold(uint32_t midr) { + const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK; + return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_KRYO_GOLD & uarch_mask); +} + +inline static uint32_t midr_score_core(uint32_t midr) { + const uint32_t core_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK; + switch (midr & core_mask) { + case UINT32_C(0x53000030): /* Exynos M4 */ + case UINT32_C(0x53000040): /* Exynos M5 */ + case UINT32_C(0x4100D440): /* Cortex-X1 */ + /* These cores are in big role w.r.t Cortex-A75/-A76/-A77/-A78 */ + return 6; + case UINT32_C(0x4E000030): /* Denver 2 */ + case UINT32_C(0x53000010): /* Exynos M1 and Exynos M2 */ + case UINT32_C(0x53000020): /* Exynos M3 */ + case UINT32_C(0x51008040): /* Kryo 485 Gold / Gold Prime */ + case UINT32_C(0x51008020): /* Kryo 385 Gold */ + case UINT32_C(0x51008000): /* Kryo 260 / 280 Gold */ + case UINT32_C(0x51002050): /* Kryo Gold */ + case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */ + case UINT32_C(0x4100D490): /* Neoverse N2 */ + case UINT32_C(0x4100D410): /* Cortex-A78 */ + case UINT32_C(0x4100D400): /* Neoverse V1 */ + case UINT32_C(0x4100D0D0): /* Cortex-A77 */ + case UINT32_C(0x4100D0E0): /* Cortex-A76AE */ + case UINT32_C(0x4100D0C0): /* Neoverse-N1 */ + case UINT32_C(0x4100D0B0): /* Cortex-A76 */ + case UINT32_C(0x4100D0A0): /* Cortex-A75 */ + case UINT32_C(0x4100D090): /* Cortex-A73 */ + case UINT32_C(0x4100D080): /* Cortex-A72 */ +#if CPUINFO_ARCH_ARM + case UINT32_C(0x4100C0F0): /* Cortex-A15 */ + case UINT32_C(0x4100C0E0): /* Cortex-A17 */ + case UINT32_C(0x4100C0D0): /* Rockchip RK3288 cores */ + case UINT32_C(0x4100C0C0): /* Cortex-A12 */ +#endif /* CPUINFO_ARCH_ARM */ + /* These cores are always in big role */ + return 5; + case UINT32_C(0x4100D070): /* Cortex-A57 */ + /* Cortex-A57 can be in LITTLE role w.r.t. Denver 2, or in big role w.r.t. Cortex-A53 */ + return 4; +#if CPUINFO_ARCH_ARM64 + case UINT32_C(0x4100D060): /* Cortex-A65 */ +#endif /* CPUINFO_ARCH_ARM64 */ + case UINT32_C(0x4100D050): /* Cortex-A55 */ + case UINT32_C(0x4100D030): /* Cortex-A53 */ + /* Cortex-A53 is usually in LITTLE role, but can be in big role w.r.t. Cortex-A35 */ + return 2; + case UINT32_C(0x4100D040): /* Cortex-A35 */ +#if CPUINFO_ARCH_ARM + case UINT32_C(0x4100C070): /* Cortex-A7 */ +#endif /* CPUINFO_ARCH_ARM */ + case UINT32_C(0x51008050): /* Kryo 485 Silver */ + case UINT32_C(0x51008030): /* Kryo 385 Silver */ + case UINT32_C(0x51008010): /* Kryo 260 / 280 Silver */ + case UINT32_C(0x51002110): /* Kryo Silver (Snapdragon 820) */ + case UINT32_C(0x51002010): /* Kryo Silver (Snapdragon 821) */ + /* These cores are always in LITTLE core */ + return 1; + default: + /* + * Unknown cores, or cores which do not have big/LITTLE roles. + * To be future-proof w.r.t. cores not yet recognized in cpuinfo, assume position between + * Cortex-A57/A72/A73/A75 and Cortex-A53/A55. Then at least future cores paired with + * one of these known cores will be properly scored. + */ + return 3; + } +} + +inline static uint32_t midr_little_core_for_big(uint32_t midr) { + const uint32_t core_mask = + CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_ARCHITECTURE_MASK | CPUINFO_ARM_MIDR_PART_MASK; + switch (midr & core_mask) { + case CPUINFO_ARM_MIDR_CORTEX_A75: + return CPUINFO_ARM_MIDR_CORTEX_A55; + case CPUINFO_ARM_MIDR_CORTEX_A73: + case CPUINFO_ARM_MIDR_CORTEX_A72: + case CPUINFO_ARM_MIDR_CORTEX_A57: + case CPUINFO_ARM_MIDR_EXYNOS_M1_M2: + return CPUINFO_ARM_MIDR_CORTEX_A53; + case CPUINFO_ARM_MIDR_CORTEX_A17: + case CPUINFO_ARM_MIDR_CORTEX_A15: + return CPUINFO_ARM_MIDR_CORTEX_A7; + case CPUINFO_ARM_MIDR_KRYO280_GOLD: + return CPUINFO_ARM_MIDR_KRYO280_SILVER; + case CPUINFO_ARM_MIDR_KRYO_GOLD: + return CPUINFO_ARM_MIDR_KRYO_SILVER_820; + case CPUINFO_ARM_MIDR_DENVER2: + return CPUINFO_ARM_MIDR_CORTEX_A57; + default: + return midr; + } +} diff --git a/source/3rdparty/cpuinfo/src/arm/tlb.c b/source/3rdparty/cpuinfo/src/arm/tlb.c new file mode 100644 index 0000000..9beb832 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/tlb.c @@ -0,0 +1,133 @@ + + +switch (uarch) { + case cpuinfo_uarch_cortex_a5: + /* + * Cortex-A5 Technical Reference Manual: + * 6.3.1. Micro TLB + * The first level of caching for the page table information is a micro TLB of + * 10 entries that is implemented on each of the instruction and data sides. + * 6.3.2. Main TLB + * Misses from the instruction and data micro TLBs are handled by a unified main TLB. + * The main TLB is 128-entry two-way set-associative. + */ + break; + case cpuinfo_uarch_cortex_a7: + /* + * Cortex-A7 MPCore Technical Reference Manual: + * 5.3.1. Micro TLB + * The first level of caching for the page table information is a micro TLB of + * 10 entries that is implemented on each of the instruction and data sides. + * 5.3.2. Main TLB + * Misses from the micro TLBs are handled by a unified main TLB. This is a 256-entry 2-way + * set-associative structure. The main TLB supports all the VMSAv7 page sizes of + * 4KB, 64KB, 1MB and 16MB in addition to the LPAE page sizes of 2MB and 1G. + */ + break; + case cpuinfo_uarch_cortex_a8: + /* + * Cortex-A8 Technical Reference Manual: + * 6.1. About the MMU + * The MMU features include the following: + * - separate, fully-associative, 32-entry data and instruction TLBs + * - TLB entries that support 4KB, 64KB, 1MB, and 16MB pages + */ + break; + case cpuinfo_uarch_cortex_a9: + /* + * ARM Cortex‑A9 Technical Reference Manual: + * 6.2.1 Micro TLB + * The first level of caching for the page table information is a micro TLB of 32 entries on the data side, + * and configurable 32 or 64 entries on the instruction side. + * 6.2.2 Main TLB + * The main TLB is implemented as a combination of: + * - A fully-associative, lockable array of four elements. + * - A 2-way associative structure of 2x32, 2x64, 2x128 or 2x256 entries. + */ + break; + case cpuinfo_uarch_cortex_a15: + /* + * ARM Cortex-A15 MPCore Processor Technical Reference Manual: + * 5.2.1. L1 instruction TLB + * The L1 instruction TLB is a 32-entry fully-associative structure. This TLB caches entries at the 4KB + * granularity of Virtual Address (VA) to Physical Address (PA) mapping only. If the page tables map the + * memory region to a larger granularity than 4K, it only allocates one mapping for the particular 4K region + * to which the current access corresponds. + * 5.2.2. L1 data TLB + * There are two separate 32-entry fully-associative TLBs that are used for data loads and stores, + * respectively. Similar to the L1 instruction TLB, both of these cache entries at the 4KB granularity of + * VA to PA mappings only. At implementation time, the Cortex-A15 MPCore processor can be configured with + * the -l1tlb_1m option, to have the L1 data TLB cache entries at both the 4KB and 1MB granularity. + * With this configuration, any translation that results in a 1MB or larger page is cached in the L1 data + * TLB as a 1MB entry. Any translation that results in a page smaller than 1MB is cached in the L1 data TLB + * as a 4KB entry. By default, all translations are cached in the L1 data TLB as a 4KB entry. + * 5.2.3. L2 TLB + * Misses from the L1 instruction and data TLBs are handled by a unified L2 TLB. This is a 512-entry 4-way + * set-associative structure. The L2 TLB supports all the VMSAv7 page sizes of 4K, 64K, 1MB and 16MB in + * addition to the LPAE page sizes of 2MB and 1GB. + */ + break; + case cpuinfo_uarch_cortex_a17: + /* + * ARM Cortex-A17 MPCore Processor Technical Reference Manual: + * 5.2.1. Instruction micro TLB + * The instruction micro TLB is implemented as a 32, 48 or 64 entry, fully-associative structure. This TLB + * caches entries at the 4KB and 1MB granularity of Virtual Address (VA) to Physical Address (PA) mapping + * only. If the translation tables map the memory region to a larger granularity than 4KB or 1MB, it only + * allocates one mapping for the particular 4KB region to which the current access corresponds. + * 5.2.2. Data micro TLB + * The data micro TLB is a 32 entry fully-associative TLB that is used for data loads and stores. The cache + * entries have a 4KB and 1MB granularity of VA to PA mappings only. + * 5.2.3. Unified main TLB + * Misses from the instruction and data micro TLBs are handled by a unified main TLB. This is a 1024 entry + * 4-way set-associative structure. The main TLB supports all the VMSAv7 page sizes of 4K, 64K, 1MB and 16MB + * in addition to the LPAE page sizes of 2MB and 1GB. + */ + break; + case cpuinfo_uarch_cortex_a35: + /* + * ARM Cortex‑A35 Processor Technical Reference Manual: + * A6.2 TLB Organization + * Micro TLB + * The first level of caching for the translation table information is a micro TLB of ten entries that + * is implemented on each of the instruction and data sides. + * Main TLB + * A unified main TLB handles misses from the micro TLBs. It has a 512-entry, 2-way, set-associative + * structure and supports all VMSAv8 block sizes, except 1GB. If it fetches a 1GB block, the TLB splits + * it into 512MB blocks and stores the appropriate block for the lookup. + */ + break; + case cpuinfo_uarch_cortex_a53: + /* + * ARM Cortex-A53 MPCore Processor Technical Reference Manual: + * 5.2.1. Micro TLB + * The first level of caching for the translation table information is a micro TLB of ten entries that is + * implemented on each of the instruction and data sides. + * 5.2.2. Main TLB + * A unified main TLB handles misses from the micro TLBs. This is a 512-entry, 4-way, set-associative + * structure. The main TLB supports all VMSAv8 block sizes, except 1GB. If a 1GB block is fetched, it is + * split into 512MB blocks and the appropriate block for the lookup stored. + */ + break; + case cpuinfo_uarch_cortex_a57: + /* + * ARM® Cortex-A57 MPCore Processor Technical Reference Manual: + * 5.2.1 L1 instruction TLB + * The L1 instruction TLB is a 48-entry fully-associative structure. This TLB caches entries of three + * different page sizes, natively 4KB, 64KB, and 1MB, of VA to PA mappings. If the page tables map the memory + * region to a larger granularity than 1MB, it only allocates one mapping for the particular 1MB region to + * which the current access corresponds. + * 5.2.2 L1 data TLB + * The L1 data TLB is a 32-entry fully-associative TLB that is used for data loads and stores. This TLB + * caches entries of three different page sizes, natively 4KB, 64KB, and 1MB, of VA to PA mappings. + * 5.2.3 L2 TLB + * Misses from the L1 instruction and data TLBs are handled by a unified L2 TLB. This is a 1024-entry 4-way + * set-associative structure. The L2 TLB supports the page sizes of 4K, 64K, 1MB and 16MB. It also supports + * page sizes of 2MB and 1GB for the long descriptor format translation in AArch32 state and in AArch64 state + * when using the 4KB translation granule. In addition, the L2 TLB supports the 512MB page map size defined + * for the AArch64 translations that use a 64KB translation granule. + */ + break; +} + + diff --git a/source/3rdparty/cpuinfo/src/arm/uarch.c b/source/3rdparty/cpuinfo/src/arm/uarch.c new file mode 100644 index 0000000..346e1c1 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/arm/uarch.c @@ -0,0 +1,370 @@ +#include + +#include +#include +#include + + +void cpuinfo_arm_decode_vendor_uarch( + uint32_t midr, +#if CPUINFO_ARCH_ARM + bool has_vfpv4, +#endif /* CPUINFO_ARCH_ARM */ + enum cpuinfo_vendor vendor[restrict static 1], + enum cpuinfo_uarch uarch[restrict static 1]) +{ + switch (midr_get_implementer(midr)) { + case 'A': + *vendor = cpuinfo_vendor_arm; + switch (midr_get_part(midr)) { +#if CPUINFO_ARCH_ARM + case 0xC05: + *uarch = cpuinfo_uarch_cortex_a5; + break; + case 0xC07: + *uarch = cpuinfo_uarch_cortex_a7; + break; + case 0xC08: + *uarch = cpuinfo_uarch_cortex_a8; + break; + case 0xC09: + *uarch = cpuinfo_uarch_cortex_a9; + break; + case 0xC0C: + *uarch = cpuinfo_uarch_cortex_a12; + break; + case 0xC0E: + *uarch = cpuinfo_uarch_cortex_a17; + break; + case 0xC0D: + /* + * Rockchip RK3288 only. + * Core information is ambiguous: some sources specify Cortex-A12, others - Cortex-A17. + * Assume it is Cortex-A12. + */ + *uarch = cpuinfo_uarch_cortex_a12; + break; + case 0xC0F: + *uarch = cpuinfo_uarch_cortex_a15; + break; +#endif /* CPUINFO_ARCH_ARM */ + case 0xD01: + *uarch = cpuinfo_uarch_cortex_a32; + break; + case 0xD03: + *uarch = cpuinfo_uarch_cortex_a53; + break; + case 0xD04: + *uarch = cpuinfo_uarch_cortex_a35; + break; + case 0xD05: + // Note: use Variant, not Revision, field + *uarch = (midr & CPUINFO_ARM_MIDR_VARIANT_MASK) == 0 ? + cpuinfo_uarch_cortex_a55r0 : cpuinfo_uarch_cortex_a55; + break; + case 0xD06: + *uarch = cpuinfo_uarch_cortex_a65; + break; + case 0xD07: + *uarch = cpuinfo_uarch_cortex_a57; + break; + case 0xD08: + *uarch = cpuinfo_uarch_cortex_a72; + break; + case 0xD09: + *uarch = cpuinfo_uarch_cortex_a73; + break; + case 0xD0A: + *uarch = cpuinfo_uarch_cortex_a75; + break; + case 0xD0B: + *uarch = cpuinfo_uarch_cortex_a76; + break; +#if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) + case 0xD0C: + *uarch = cpuinfo_uarch_neoverse_n1; + break; +#endif /* CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) */ + case 0xD0D: + *uarch = cpuinfo_uarch_cortex_a77; + break; + case 0xD0E: /* Cortex-A76AE */ + *uarch = cpuinfo_uarch_cortex_a76; + break; +#if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) + case 0xD40: + *uarch = cpuinfo_uarch_neoverse_v1; + break; +#endif /* CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) */ + case 0xD41: /* Cortex-A78 */ + *uarch = cpuinfo_uarch_cortex_a78; + break; + case 0xD44: /* Cortex-X1 */ + *uarch = cpuinfo_uarch_cortex_x1; + break; +#if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) + case 0xD49: + *uarch = cpuinfo_uarch_neoverse_n2; + break; + case 0xD4A: + *uarch = cpuinfo_uarch_neoverse_e1; + break; +#endif /* CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) */ + default: + switch (midr_get_part(midr) >> 8) { +#if CPUINFO_ARCH_ARM + case 7: + *uarch = cpuinfo_uarch_arm7; + break; + case 9: + *uarch = cpuinfo_uarch_arm9; + break; + case 11: + *uarch = cpuinfo_uarch_arm11; + break; +#endif /* CPUINFO_ARCH_ARM */ + default: + cpuinfo_log_warning("unknown ARM CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + } + break; + case 'B': + *vendor = cpuinfo_vendor_broadcom; + switch (midr_get_part(midr)) { + case 0x00F: + *uarch = cpuinfo_uarch_brahma_b15; + break; + case 0x100: + *uarch = cpuinfo_uarch_brahma_b53; + break; +#if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) + case 0x516: + /* Broadcom Vulkan was sold to Cavium before it reached the market, so we identify it as Cavium ThunderX2 */ + *vendor = cpuinfo_vendor_cavium; + *uarch = cpuinfo_uarch_thunderx2; + break; +#endif + default: + cpuinfo_log_warning("unknown Broadcom CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + break; +#if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) + case 'C': + *vendor = cpuinfo_vendor_cavium; + switch (midr_get_part(midr)) { + case 0x0A0: /* ThunderX */ + case 0x0A1: /* ThunderX 88XX */ + case 0x0A2: /* ThunderX 81XX */ + case 0x0A3: /* ThunderX 83XX */ + *uarch = cpuinfo_uarch_thunderx; + break; + case 0x0AF: /* ThunderX2 99XX */ + *uarch = cpuinfo_uarch_thunderx2; + break; + default: + cpuinfo_log_warning("unknown Cavium CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + break; +#endif + case 'H': + *vendor = cpuinfo_vendor_huawei; + switch (midr_get_part(midr)) { +#if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) + case 0xD01: /* Kunpeng 920 series */ + *uarch = cpuinfo_uarch_taishan_v110; + break; +#endif + case 0xD40: /* Kirin 980 Big/Medium cores -> Cortex-A76 */ + *vendor = cpuinfo_vendor_arm; + *uarch = cpuinfo_uarch_cortex_a76; + break; + default: + cpuinfo_log_warning("unknown Huawei CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + break; +#if CPUINFO_ARCH_ARM + case 'i': + *vendor = cpuinfo_vendor_intel; + switch (midr_get_part(midr) >> 8) { + case 2: /* PXA 210/25X/26X */ + case 4: /* PXA 27X */ + case 6: /* PXA 3XX */ + *uarch = cpuinfo_uarch_xscale; + break; + default: + cpuinfo_log_warning("unknown Intel CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + break; +#endif /* CPUINFO_ARCH_ARM */ + case 'N': + *vendor = cpuinfo_vendor_nvidia; + switch (midr_get_part(midr)) { + case 0x000: + *uarch = cpuinfo_uarch_denver; + break; + case 0x003: + *uarch = cpuinfo_uarch_denver2; + break; + case 0x004: + *uarch = cpuinfo_uarch_carmel; + break; + default: + cpuinfo_log_warning("unknown Nvidia CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + break; +#if !defined(__ANDROID__) + case 'P': + *vendor = cpuinfo_vendor_apm; + switch (midr_get_part(midr)) { + case 0x000: + *uarch = cpuinfo_uarch_xgene; + break; + default: + cpuinfo_log_warning("unknown Applied Micro CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + break; +#endif + case 'Q': + *vendor = cpuinfo_vendor_qualcomm; + switch (midr_get_part(midr)) { +#if CPUINFO_ARCH_ARM + case 0x00F: + /* Mostly Scorpions, but some Cortex A5 may report this value as well */ + if (has_vfpv4) { + /* Unlike Scorpion, Cortex-A5 comes with VFPv4 */ + *vendor = cpuinfo_vendor_arm; + *uarch = cpuinfo_uarch_cortex_a5; + } else { + *uarch = cpuinfo_uarch_scorpion; + } + break; + case 0x02D: /* Dual-core Scorpions */ + *uarch = cpuinfo_uarch_scorpion; + break; + case 0x04D: + /* + * Dual-core Krait: + * - r1p0 -> Krait 200 + * - r1p4 -> Krait 200 + * - r2p0 -> Krait 300 + */ + case 0x06F: + /* + * Quad-core Krait: + * - r0p1 -> Krait 200 + * - r0p2 -> Krait 200 + * - r1p0 -> Krait 300 + * - r2p0 -> Krait 400 (Snapdragon 800 MSMxxxx) + * - r2p1 -> Krait 400 (Snapdragon 801 MSMxxxxPRO) + * - r3p1 -> Krait 450 + */ + *uarch = cpuinfo_uarch_krait; + break; +#endif /* CPUINFO_ARCH_ARM */ + case 0x201: /* Qualcomm Snapdragon 821: Low-power Kryo "Silver" */ + case 0x205: /* Qualcomm Snapdragon 820 & 821: High-performance Kryo "Gold" */ + case 0x211: /* Qualcomm Snapdragon 820: Low-power Kryo "Silver" */ + *uarch = cpuinfo_uarch_kryo; + break; + case 0x800: /* High-performance Kryo 260 (r10p2) / Kryo 280 (r10p1) "Gold" -> Cortex-A73 */ + *vendor = cpuinfo_vendor_arm; + *uarch = cpuinfo_uarch_cortex_a73; + break; + case 0x801: /* Low-power Kryo 260 / 280 "Silver" -> Cortex-A53 */ + *vendor = cpuinfo_vendor_arm; + *uarch = cpuinfo_uarch_cortex_a53; + break; + case 0x802: /* High-performance Kryo 385 "Gold" -> Cortex-A75 */ + *vendor = cpuinfo_vendor_arm; + *uarch = cpuinfo_uarch_cortex_a75; + break; + case 0x803: /* Low-power Kryo 385 "Silver" -> Cortex-A55r0 */ + *vendor = cpuinfo_vendor_arm; + *uarch = cpuinfo_uarch_cortex_a55r0; + break; + case 0x804: /* High-performance Kryo 485 "Gold" / "Gold Prime" -> Cortex-A76 */ + *vendor = cpuinfo_vendor_arm; + *uarch = cpuinfo_uarch_cortex_a76; + break; + case 0x805: /* Low-performance Kryo 485 "Silver" -> Cortex-A55 */ + *vendor = cpuinfo_vendor_arm; + *uarch = cpuinfo_uarch_cortex_a55; + break; +#if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) + case 0xC00: + *uarch = cpuinfo_uarch_falkor; + break; + case 0xC01: + *uarch = cpuinfo_uarch_saphira; + break; +#endif /* CPUINFO_ARCH_ARM64 && !defined(__ANDROID__) */ + default: + cpuinfo_log_warning("unknown Qualcomm CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + break; + case 'S': + *vendor = cpuinfo_vendor_samsung; + switch (midr & (CPUINFO_ARM_MIDR_VARIANT_MASK | CPUINFO_ARM_MIDR_PART_MASK)) { + case 0x00100010: + /* + * Exynos 8890 MIDR = 0x531F0011, assume Exynos M1 has: + * - CPU variant 0x1 + * - CPU part 0x001 + */ + *uarch = cpuinfo_uarch_exynos_m1; + break; + case 0x00400010: + /* + * Exynos 8895 MIDR = 0x534F0010, assume Exynos M2 has: + * - CPU variant 0x4 + * - CPU part 0x001 + */ + *uarch = cpuinfo_uarch_exynos_m2; + break; + case 0x00100020: + /* + * Exynos 9810 MIDR = 0x531F0020, assume Exynos M3 has: + * - CPU variant 0x1 + * - CPU part 0x002 + */ + *uarch = cpuinfo_uarch_exynos_m3; + break; + case 0x00100030: + /* + * Exynos 9820 MIDR = 0x531F0030, assume Exynos M4 has: + * - CPU variant 0x1 + * - CPU part 0x003 + */ + *uarch = cpuinfo_uarch_exynos_m4; + break; + case 0x00100040: + /* + * Exynos 9820 MIDR = 0x531F0040, assume Exynos M5 has: + * - CPU variant 0x1 + * - CPU part 0x004 + */ + *uarch = cpuinfo_uarch_exynos_m5; + break; + default: + cpuinfo_log_warning("unknown Samsung CPU variant 0x%01"PRIx32" part 0x%03"PRIx32" ignored", + midr_get_variant(midr), midr_get_part(midr)); + } + break; +#if CPUINFO_ARCH_ARM + case 'V': + *vendor = cpuinfo_vendor_marvell; + switch (midr_get_part(midr)) { + case 0x581: /* PJ4 / PJ4B */ + case 0x584: /* PJ4B-MP / PJ4C */ + *uarch = cpuinfo_uarch_pj4; + break; + default: + cpuinfo_log_warning("unknown Marvell CPU part 0x%03"PRIx32" ignored", midr_get_part(midr)); + } + break; +#endif /* CPUINFO_ARCH_ARM */ + default: + cpuinfo_log_warning("unknown CPU implementer '%c' (0x%02"PRIx32") with CPU part 0x%03"PRIx32" ignored", + (char) midr_get_implementer(midr), midr_get_implementer(midr), midr_get_part(midr)); + } +} diff --git a/source/3rdparty/cpuinfo/src/cache.c b/source/3rdparty/cpuinfo/src/cache.c new file mode 100644 index 0000000..b976b87 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/cache.c @@ -0,0 +1,18 @@ +#include + +#include +#include + + +uint32_t cpuinfo_compute_max_cache_size(const struct cpuinfo_processor* processor) { + if (processor->cache.l4 != NULL) { + return processor->cache.l4->size; + } else if (processor->cache.l3 != NULL) { + return processor->cache.l3->size; + } else if (processor->cache.l2 != NULL) { + return processor->cache.l2->size; + } else if (processor->cache.l1d != NULL) { + return processor->cache.l1d->size; + } + return 0; +} diff --git a/source/3rdparty/cpuinfo/src/cpuinfo/common.h b/source/3rdparty/cpuinfo/src/cpuinfo/common.h new file mode 100644 index 0000000..b2b404d --- /dev/null +++ b/source/3rdparty/cpuinfo/src/cpuinfo/common.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + + +#define CPUINFO_COUNT_OF(array) (sizeof(array) / sizeof(0[array])) + +#if defined(__GNUC__) + #define CPUINFO_LIKELY(condition) (__builtin_expect(!!(condition), 1)) + #define CPUINFO_UNLIKELY(condition) (__builtin_expect(!!(condition), 0)) +#else + #define CPUINFO_LIKELY(condition) (!!(condition)) + #define CPUINFO_UNLIKELY(condition) (!!(condition)) +#endif + +#ifndef CPUINFO_INTERNAL + #if defined(__ELF__) + #define CPUINFO_INTERNAL __attribute__((__visibility__("internal"))) + #elif defined(__MACH__) + #define CPUINFO_INTERNAL __attribute__((__visibility__("hidden"))) + #else + #define CPUINFO_INTERNAL + #endif +#endif + +#ifndef CPUINFO_PRIVATE + #if defined(__ELF__) + #define CPUINFO_PRIVATE __attribute__((__visibility__("hidden"))) + #elif defined(__MACH__) + #define CPUINFO_PRIVATE __attribute__((__visibility__("hidden"))) + #else + #define CPUINFO_PRIVATE + #endif +#endif diff --git a/source/3rdparty/cpuinfo/src/cpuinfo/internal-api.h b/source/3rdparty/cpuinfo/src/cpuinfo/internal-api.h new file mode 100644 index 0000000..9c23d7c --- /dev/null +++ b/source/3rdparty/cpuinfo/src/cpuinfo/internal-api.h @@ -0,0 +1,62 @@ +#pragma once + +#include +#include + +#if defined(_WIN32) || defined(__CYGWIN__) + #include +#endif + +#include +#include + + +enum cpuinfo_cache_level { + cpuinfo_cache_level_1i = 0, + cpuinfo_cache_level_1d = 1, + cpuinfo_cache_level_2 = 2, + cpuinfo_cache_level_3 = 3, + cpuinfo_cache_level_4 = 4, + cpuinfo_cache_level_max = 5, +}; + +extern CPUINFO_INTERNAL bool cpuinfo_is_initialized; + +extern CPUINFO_INTERNAL struct cpuinfo_processor* cpuinfo_processors; +extern CPUINFO_INTERNAL struct cpuinfo_core* cpuinfo_cores; +extern CPUINFO_INTERNAL struct cpuinfo_cluster* cpuinfo_clusters; +extern CPUINFO_INTERNAL struct cpuinfo_package* cpuinfo_packages; +extern CPUINFO_INTERNAL struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max]; + +extern CPUINFO_INTERNAL uint32_t cpuinfo_processors_count; +extern CPUINFO_INTERNAL uint32_t cpuinfo_cores_count; +extern CPUINFO_INTERNAL uint32_t cpuinfo_clusters_count; +extern CPUINFO_INTERNAL uint32_t cpuinfo_packages_count; +extern CPUINFO_INTERNAL uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max]; +extern CPUINFO_INTERNAL uint32_t cpuinfo_max_cache_size; + +#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + extern CPUINFO_INTERNAL struct cpuinfo_uarch_info* cpuinfo_uarchs; + extern CPUINFO_INTERNAL uint32_t cpuinfo_uarchs_count; +#else + extern CPUINFO_INTERNAL struct cpuinfo_uarch_info cpuinfo_global_uarch; +#endif + +#ifdef __linux__ + extern CPUINFO_INTERNAL uint32_t cpuinfo_linux_cpu_max; + extern CPUINFO_INTERNAL const struct cpuinfo_processor** cpuinfo_linux_cpu_to_processor_map; + extern CPUINFO_INTERNAL const struct cpuinfo_core** cpuinfo_linux_cpu_to_core_map; +#endif + +CPUINFO_PRIVATE void cpuinfo_x86_mach_init(void); +CPUINFO_PRIVATE void cpuinfo_x86_linux_init(void); +#if defined(_WIN32) || defined(__CYGWIN__) + CPUINFO_PRIVATE BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PVOID* context); +#endif +CPUINFO_PRIVATE void cpuinfo_arm_mach_init(void); +CPUINFO_PRIVATE void cpuinfo_arm_linux_init(void); +CPUINFO_PRIVATE void cpuinfo_emscripten_init(void); + +CPUINFO_PRIVATE uint32_t cpuinfo_compute_max_cache_size(const struct cpuinfo_processor* processor); + +typedef void (*cpuinfo_processor_callback)(uint32_t); diff --git a/source/3rdparty/cpuinfo/src/cpuinfo/log.h b/source/3rdparty/cpuinfo/src/cpuinfo/log.h new file mode 100644 index 0000000..dac8cdb --- /dev/null +++ b/source/3rdparty/cpuinfo/src/cpuinfo/log.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +#include + +#define CPUINFO_LOG_DEBUG_PARSERS 0 + +#ifndef CPUINFO_LOG_LEVEL + #define CPUINFO_LOG_LEVEL CLOG_ERROR +#endif + +CLOG_DEFINE_LOG_DEBUG(cpuinfo_log_debug, "cpuinfo", CPUINFO_LOG_LEVEL); +CLOG_DEFINE_LOG_INFO(cpuinfo_log_info, "cpuinfo", CPUINFO_LOG_LEVEL); +CLOG_DEFINE_LOG_WARNING(cpuinfo_log_warning, "cpuinfo", CPUINFO_LOG_LEVEL); +CLOG_DEFINE_LOG_ERROR(cpuinfo_log_error, "cpuinfo", CPUINFO_LOG_LEVEL); +CLOG_DEFINE_LOG_FATAL(cpuinfo_log_fatal, "cpuinfo", CPUINFO_LOG_LEVEL); diff --git a/source/3rdparty/cpuinfo/src/cpuinfo/utils.h b/source/3rdparty/cpuinfo/src/cpuinfo/utils.h new file mode 100644 index 0000000..157baad --- /dev/null +++ b/source/3rdparty/cpuinfo/src/cpuinfo/utils.h @@ -0,0 +1,19 @@ +#pragma once + +#include + + +inline static uint32_t bit_length(uint32_t n) { + const uint32_t n_minus_1 = n - 1; + if (n_minus_1 == 0) { + return 0; + } else { + #ifdef _MSC_VER + unsigned long bsr; + _BitScanReverse(&bsr, n_minus_1); + return bsr + 1; + #else + return 32 - __builtin_clz(n_minus_1); + #endif + } +} diff --git a/source/3rdparty/cpuinfo/src/emscripten/init.c b/source/3rdparty/cpuinfo/src/emscripten/init.c new file mode 100644 index 0000000..ce4bdea --- /dev/null +++ b/source/3rdparty/cpuinfo/src/emscripten/init.c @@ -0,0 +1,277 @@ +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + + +static const volatile float infinity = INFINITY; + +static struct cpuinfo_package static_package = { }; + +static struct cpuinfo_cache static_x86_l3 = { + .size = 2 * 1024 * 1024, + .associativity = 16, + .sets = 2048, + .partitions = 1, + .line_size = 64, +}; + +void cpuinfo_emscripten_init(void) { + struct cpuinfo_processor* processors = NULL; + struct cpuinfo_core* cores = NULL; + struct cpuinfo_cluster* clusters = NULL; + struct cpuinfo_cache* l1i = NULL; + struct cpuinfo_cache* l1d = NULL; + struct cpuinfo_cache* l2 = NULL; + + const bool is_x86 = signbit(infinity - infinity); + + int logical_cores_count = emscripten_num_logical_cores(); + if (logical_cores_count <= 0) { + logical_cores_count = 1; + } + uint32_t processor_count = (uint32_t) logical_cores_count; + uint32_t core_count = processor_count; + uint32_t cluster_count = 1; + uint32_t big_cluster_core_count = core_count; + uint32_t processors_per_core = 1; + if (is_x86) { + if (processor_count % 2 == 0) { + processors_per_core = 2; + core_count = processor_count / 2; + big_cluster_core_count = core_count; + } + } else { + /* Assume ARM/ARM64 */ + if (processor_count > 4) { + /* Assume big.LITTLE architecture */ + cluster_count = 2; + big_cluster_core_count = processor_count >= 8 ? 4 : 2; + } + } + uint32_t l2_count = is_x86 ? core_count : cluster_count; + + processors = calloc(processor_count, sizeof(struct cpuinfo_processor)); + if (processors == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors", + processor_count * sizeof(struct cpuinfo_processor), processor_count); + goto cleanup; + } + cores = calloc(processor_count, sizeof(struct cpuinfo_core)); + if (cores == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores", + processor_count * sizeof(struct cpuinfo_core), processor_count); + goto cleanup; + } + clusters = calloc(cluster_count, sizeof(struct cpuinfo_cluster)); + if (clusters == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" clusters", + cluster_count * sizeof(struct cpuinfo_cluster), cluster_count); + goto cleanup; + } + + l1i = calloc(core_count, sizeof(struct cpuinfo_cache)); + if (l1i == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches", + core_count * sizeof(struct cpuinfo_cache), core_count); + goto cleanup; + } + + l1d = calloc(core_count, sizeof(struct cpuinfo_cache)); + if (l1d == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches", + core_count * sizeof(struct cpuinfo_cache), core_count); + goto cleanup; + } + + l2 = calloc(l2_count, sizeof(struct cpuinfo_cache)); + if (l2 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches", + l2_count * sizeof(struct cpuinfo_cache), l2_count); + goto cleanup; + } + + static_package.processor_count = processor_count; + static_package.core_count = core_count; + static_package.cluster_count = cluster_count; + if (is_x86) { + strncpy(static_package.name, "x86 vCPU", CPUINFO_PACKAGE_NAME_MAX); + } else { + strncpy(static_package.name, "ARM vCPU", CPUINFO_PACKAGE_NAME_MAX); + } + + for (uint32_t i = 0; i < core_count; i++) { + for (uint32_t j = 0; j < processors_per_core; j++) { + processors[i * processors_per_core + j] = (struct cpuinfo_processor) { + .smt_id = j, + .core = cores + i, + .cluster = clusters + (uint32_t) (i >= big_cluster_core_count), + .package = &static_package, + .cache.l1i = l1i + i, + .cache.l1d = l1d + i, + .cache.l2 = is_x86 ? l2 + i : l2 + (uint32_t) (i >= big_cluster_core_count), + .cache.l3 = is_x86 ? &static_x86_l3 : NULL, + }; + } + + cores[i] = (struct cpuinfo_core) { + .processor_start = i * processors_per_core, + .processor_count = processors_per_core, + .core_id = i, + .cluster = clusters + (uint32_t) (i >= big_cluster_core_count), + .package = &static_package, + .vendor = cpuinfo_vendor_unknown, + .uarch = cpuinfo_uarch_unknown, + .frequency = 0, + }; + + l1i[i] = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .sets = 128, + .partitions = 1, + .line_size = 64, + .processor_start = i * processors_per_core, + .processor_count = processors_per_core, + }; + + l1d[i] = (struct cpuinfo_cache) { + .size = 32 * 1024, + .associativity = 4, + .sets = 128, + .partitions = 1, + .line_size = 64, + .processor_start = i * processors_per_core, + .processor_count = processors_per_core, + }; + + if (is_x86) { + l2[i] = (struct cpuinfo_cache) { + .size = 256 * 1024, + .associativity = 8, + .sets = 512, + .partitions = 1, + .line_size = 64, + .processor_start = i * processors_per_core, + .processor_count = processors_per_core, + }; + } + } + + if (is_x86) { + clusters[0] = (struct cpuinfo_cluster) { + .processor_start = 0, + .processor_count = processor_count, + .core_start = 0, + .core_count = core_count, + .cluster_id = 0, + .package = &static_package, + .vendor = cpuinfo_vendor_unknown, + .uarch = cpuinfo_uarch_unknown, + .frequency = 0, + }; + + static_x86_l3.processor_count = processor_count; + } else { + clusters[0] = (struct cpuinfo_cluster) { + .processor_start = 0, + .processor_count = big_cluster_core_count, + .core_start = 0, + .core_count = big_cluster_core_count, + .cluster_id = 0, + .package = &static_package, + .vendor = cpuinfo_vendor_unknown, + .uarch = cpuinfo_uarch_unknown, + .frequency = 0, + }; + + l2[0] = (struct cpuinfo_cache) { + .size = 1024 * 1024, + .associativity = 8, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .processor_start = 0, + .processor_count = big_cluster_core_count, + }; + + if (cluster_count > 1) { + l2[1] = (struct cpuinfo_cache) { + .size = 256 * 1024, + .associativity = 8, + .sets = 512, + .partitions = 1, + .line_size = 64, + .processor_start = big_cluster_core_count, + .processor_count = processor_count - big_cluster_core_count, + }; + + clusters[1] = (struct cpuinfo_cluster) { + .processor_start = big_cluster_core_count, + .processor_count = processor_count - big_cluster_core_count, + .core_start = big_cluster_core_count, + .core_count = processor_count - big_cluster_core_count, + .cluster_id = 1, + .package = &static_package, + .vendor = cpuinfo_vendor_unknown, + .uarch = cpuinfo_uarch_unknown, + .frequency = 0, + }; + } + } + + /* Commit changes */ + cpuinfo_cache[cpuinfo_cache_level_1i] = l1i; + cpuinfo_cache[cpuinfo_cache_level_1d] = l1d; + cpuinfo_cache[cpuinfo_cache_level_2] = l2; + if (is_x86) { + cpuinfo_cache[cpuinfo_cache_level_3] = &static_x86_l3; + } + + cpuinfo_processors = processors; + cpuinfo_cores = cores; + cpuinfo_clusters = clusters; + cpuinfo_packages = &static_package; + + cpuinfo_cache_count[cpuinfo_cache_level_1i] = processor_count; + cpuinfo_cache_count[cpuinfo_cache_level_1d] = processor_count; + cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count; + if (is_x86) { + cpuinfo_cache_count[cpuinfo_cache_level_3] = 1; + } + + cpuinfo_global_uarch = (struct cpuinfo_uarch_info) { + .uarch = cpuinfo_uarch_unknown, + .processor_count = processor_count, + .core_count = core_count, + }; + + cpuinfo_processors_count = processor_count; + cpuinfo_cores_count = processor_count; + cpuinfo_clusters_count = cluster_count; + cpuinfo_packages_count = 1; + + cpuinfo_max_cache_size = is_x86 ? 128 * 1024 * 1024 : 8 * 1024 * 1024; + + cpuinfo_is_initialized = true; + + processors = NULL; + cores = NULL; + clusters = NULL; + l1i = l1d = l2 = NULL; + +cleanup: + free(processors); + free(cores); + free(clusters); + free(l1i); + free(l1d); + free(l2); +} diff --git a/source/3rdparty/cpuinfo/src/init.c b/source/3rdparty/cpuinfo/src/init.c new file mode 100644 index 0000000..d61e7be --- /dev/null +++ b/source/3rdparty/cpuinfo/src/init.c @@ -0,0 +1,59 @@ +#if defined(_WIN32) || defined(__CYGWIN__) + #include +#elif !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__) + #include +#endif + +#include +#include +#include + +#ifdef __APPLE__ + #include "TargetConditionals.h" +#endif + + +#if defined(_WIN32) || defined(__CYGWIN__) + static INIT_ONCE init_guard = INIT_ONCE_STATIC_INIT; +#elif !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__) + static pthread_once_t init_guard = PTHREAD_ONCE_INIT; +#else + static bool init_guard = false; +#endif + +bool CPUINFO_ABI cpuinfo_initialize(void) { +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__MACH__) && defined(__APPLE__) + pthread_once(&init_guard, &cpuinfo_x86_mach_init); + #elif defined(__linux__) + pthread_once(&init_guard, &cpuinfo_x86_linux_init); + #elif defined(_WIN32) || defined(__CYGWIN__) + InitOnceExecuteOnce(&init_guard, &cpuinfo_x86_windows_init, NULL, NULL); + #else + cpuinfo_log_error("operating system is not supported in cpuinfo"); + #endif +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + #if defined(__linux__) + pthread_once(&init_guard, &cpuinfo_arm_linux_init); + #elif defined(__MACH__) && defined(__APPLE__) + pthread_once(&init_guard, &cpuinfo_arm_mach_init); + #else + cpuinfo_log_error("operating system is not supported in cpuinfo"); + #endif +#elif CPUINFO_ARCH_ASMJS || CPUINFO_ARCH_WASM || CPUINFO_ARCH_WASMSIMD + #if defined(__EMSCRIPTEN_PTHREADS__) + pthread_once(&init_guard, &cpuinfo_emscripten_init); + #else + if (!init_guard) { + cpuinfo_emscripten_init(); + } + init_guard = true; + #endif +#else + cpuinfo_log_error("processor architecture is not supported in cpuinfo"); +#endif + return cpuinfo_is_initialized; +} + +void CPUINFO_ABI cpuinfo_deinitialize(void) { +} diff --git a/source/3rdparty/cpuinfo/src/linux/api.h b/source/3rdparty/cpuinfo/src/linux/api.h new file mode 100644 index 0000000..f55b8ac --- /dev/null +++ b/source/3rdparty/cpuinfo/src/linux/api.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include + +#include +#include + + +#define CPUINFO_LINUX_FLAG_PRESENT UINT32_C(0x00000001) +#define CPUINFO_LINUX_FLAG_POSSIBLE UINT32_C(0x00000002) +#define CPUINFO_LINUX_FLAG_MAX_FREQUENCY UINT32_C(0x00000004) +#define CPUINFO_LINUX_FLAG_MIN_FREQUENCY UINT32_C(0x00000008) +#define CPUINFO_LINUX_FLAG_SMT_ID UINT32_C(0x00000010) +#define CPUINFO_LINUX_FLAG_CORE_ID UINT32_C(0x00000020) +#define CPUINFO_LINUX_FLAG_PACKAGE_ID UINT32_C(0x00000040) +#define CPUINFO_LINUX_FLAG_APIC_ID UINT32_C(0x00000080) +#define CPUINFO_LINUX_FLAG_SMT_CLUSTER UINT32_C(0x00000100) +#define CPUINFO_LINUX_FLAG_CORE_CLUSTER UINT32_C(0x00000200) +#define CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER UINT32_C(0x00000400) +#define CPUINFO_LINUX_FLAG_PROC_CPUINFO UINT32_C(0x00000800) +#define CPUINFO_LINUX_FLAG_VALID UINT32_C(0x00001000) + + +typedef bool (*cpuinfo_cpulist_callback)(uint32_t, uint32_t, void*); +CPUINFO_INTERNAL bool cpuinfo_linux_parse_cpulist(const char* filename, cpuinfo_cpulist_callback callback, void* context); +typedef bool (*cpuinfo_smallfile_callback)(const char*, const char*, void*); +CPUINFO_INTERNAL bool cpuinfo_linux_parse_small_file(const char* filename, size_t buffer_size, cpuinfo_smallfile_callback, void* context); +typedef bool (*cpuinfo_line_callback)(const char*, const char*, void*, uint64_t); +CPUINFO_INTERNAL bool cpuinfo_linux_parse_multiline_file(const char* filename, size_t buffer_size, cpuinfo_line_callback, void* context); + +CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_max_processors_count(void); +CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_max_possible_processor(uint32_t max_processors_count); +CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_max_present_processor(uint32_t max_processors_count); +CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_processor_min_frequency(uint32_t processor); +CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_processor_max_frequency(uint32_t processor); +CPUINFO_INTERNAL bool cpuinfo_linux_get_processor_package_id(uint32_t processor, uint32_t package_id[restrict static 1]); +CPUINFO_INTERNAL bool cpuinfo_linux_get_processor_core_id(uint32_t processor, uint32_t core_id[restrict static 1]); + +CPUINFO_INTERNAL bool cpuinfo_linux_detect_possible_processors(uint32_t max_processors_count, + uint32_t* processor0_flags, uint32_t processor_struct_size, uint32_t possible_flag); +CPUINFO_INTERNAL bool cpuinfo_linux_detect_present_processors(uint32_t max_processors_count, + uint32_t* processor0_flags, uint32_t processor_struct_size, uint32_t present_flag); + +typedef bool (*cpuinfo_siblings_callback)(uint32_t, uint32_t, uint32_t, void*); +CPUINFO_INTERNAL bool cpuinfo_linux_detect_core_siblings( + uint32_t max_processors_count, + uint32_t processor, + cpuinfo_siblings_callback callback, + void* context); +CPUINFO_INTERNAL bool cpuinfo_linux_detect_thread_siblings( + uint32_t max_processors_count, + uint32_t processor, + cpuinfo_siblings_callback callback, + void* context); + +extern CPUINFO_INTERNAL const struct cpuinfo_processor** cpuinfo_linux_cpu_to_processor_map; +extern CPUINFO_INTERNAL const struct cpuinfo_core** cpuinfo_linux_cpu_to_core_map; diff --git a/source/3rdparty/cpuinfo/src/linux/cpulist.c b/source/3rdparty/cpuinfo/src/linux/cpulist.c new file mode 100644 index 0000000..2871986 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/linux/cpulist.c @@ -0,0 +1,214 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if CPUINFO_MOCK + #include +#endif +#include +#include + + +/* + * Size, in chars, of the on-stack buffer used for parsing cpu lists. + * This is also the limit on the length of a single entry + * ( or -) + * in the cpu list. + */ +#define BUFFER_SIZE 256 + + +/* Locale-independent */ +inline static bool is_whitespace(char c) { + switch (c) { + case ' ': + case '\t': + case '\n': + case '\r': + return true; + default: + return false; + } +} + +inline static const char* parse_number(const char* string, const char* end, uint32_t number_ptr[restrict static 1]) { + uint32_t number = 0; + while (string != end) { + const uint32_t digit = (uint32_t) (*string) - (uint32_t) '0'; + if (digit >= 10) { + break; + } + number = number * UINT32_C(10) + digit; + string += 1; + } + *number_ptr = number; + return string; +} + +inline static bool parse_entry(const char* entry_start, const char* entry_end, cpuinfo_cpulist_callback callback, void* context) { + /* Skip whitespace at the beginning of an entry */ + for (; entry_start != entry_end; entry_start++) { + if (!is_whitespace(*entry_start)) { + break; + } + } + /* Skip whitespace at the end of an entry */ + for (; entry_end != entry_start; entry_end--) { + if (!is_whitespace(entry_end[-1])) { + break; + } + } + + const size_t entry_length = (size_t) (entry_end - entry_start); + if (entry_length == 0) { + cpuinfo_log_warning("unexpected zero-length cpu list entry ignored"); + return false; + } + + #if CPUINFO_LOG_DEBUG_PARSERS + cpuinfo_log_debug("parse cpu list entry \"%.*s\" (%zu chars)", (int) entry_length, entry_start, entry_length); + #endif + uint32_t first_cpu, last_cpu; + + const char* number_end = parse_number(entry_start, entry_end, &first_cpu); + if (number_end == entry_start) { + /* Failed to parse the number; ignore the entry */ + cpuinfo_log_warning("invalid character '%c' in the cpu list entry \"%.*s\": entry is ignored", + entry_start[0], (int) entry_length, entry_start); + return false; + } else if (number_end == entry_end) { + /* Completely parsed the entry */ + #if CPUINFO_LOG_DEBUG_PARSERS + cpuinfo_log_debug("cpulist: call callback with list_start = %"PRIu32", list_end = %"PRIu32, + first_cpu, first_cpu + 1); + #endif + return callback(first_cpu, first_cpu + 1, context); + } + + /* Parse the second part of the entry */ + if (*number_end != '-') { + cpuinfo_log_warning("invalid character '%c' in the cpu list entry \"%.*s\": entry is ignored", + *number_end, (int) entry_length, entry_start); + return false; + } + + const char* number_start = number_end + 1; + number_end = parse_number(number_start, entry_end, &last_cpu); + if (number_end == number_start) { + /* Failed to parse the second number; ignore the entry */ + cpuinfo_log_warning("invalid character '%c' in the cpu list entry \"%.*s\": entry is ignored", + *number_start, (int) entry_length, entry_start); + return false; + } + + if (number_end != entry_end) { + /* Partially parsed the entry; ignore unparsed characters and continue with the parsed part */ + cpuinfo_log_warning("ignored invalid characters \"%.*s\" at the end of cpu list entry \"%.*s\"", + (int) (entry_end - number_end), number_start, (int) entry_length, entry_start); + } + + if (last_cpu < first_cpu) { + cpuinfo_log_warning("ignored cpu list entry \"%.*s\": invalid range %"PRIu32"-%"PRIu32, + (int) entry_length, entry_start, first_cpu, last_cpu); + return false; + } + + /* Parsed both parts of the entry; update CPU set */ + #if CPUINFO_LOG_DEBUG_PARSERS + cpuinfo_log_debug("cpulist: call callback with list_start = %"PRIu32", list_end = %"PRIu32, + first_cpu, last_cpu + 1); + #endif + return callback(first_cpu, last_cpu + 1, context); +} + +bool cpuinfo_linux_parse_cpulist(const char* filename, cpuinfo_cpulist_callback callback, void* context) { + bool status = true; + int file = -1; + char buffer[BUFFER_SIZE]; + #if CPUINFO_LOG_DEBUG_PARSERS + cpuinfo_log_debug("parsing cpu list from file %s", filename); + #endif + +#if CPUINFO_MOCK + file = cpuinfo_mock_open(filename, O_RDONLY); +#else + file = open(filename, O_RDONLY); +#endif + if (file == -1) { + cpuinfo_log_info("failed to open %s: %s", filename, strerror(errno)); + status = false; + goto cleanup; + } + + size_t position = 0; + const char* buffer_end = &buffer[BUFFER_SIZE]; + char* data_start = buffer; + ssize_t bytes_read; + do { +#if CPUINFO_MOCK + bytes_read = cpuinfo_mock_read(file, data_start, (size_t) (buffer_end - data_start)); +#else + bytes_read = read(file, data_start, (size_t) (buffer_end - data_start)); +#endif + if (bytes_read < 0) { + cpuinfo_log_info("failed to read file %s at position %zu: %s", filename, position, strerror(errno)); + status = false; + goto cleanup; + } + + position += (size_t) bytes_read; + const char* data_end = data_start + (size_t) bytes_read; + const char* entry_start = buffer; + + if (bytes_read == 0) { + /* No more data in the file: process the remaining text in the buffer as a single entry */ + const char* entry_end = data_end; + const bool entry_status = parse_entry(entry_start, entry_end, callback, context); + status &= entry_status; + } else { + const char* entry_end; + do { + /* Find the end of the entry, as indicated by a comma (',') */ + for (entry_end = entry_start; entry_end != data_end; entry_end++) { + if (*entry_end == ',') { + break; + } + } + + /* + * If we located separator at the end of the entry, parse it. + * Otherwise, there may be more data at the end; read the file once again. + */ + if (entry_end != data_end) { + const bool entry_status = parse_entry(entry_start, entry_end, callback, context); + status &= entry_status; + entry_start = entry_end + 1; + } + } while (entry_end != data_end); + + /* Move remaining partial entry data at the end to the beginning of the buffer */ + const size_t entry_length = (size_t) (entry_end - entry_start); + memmove(buffer, entry_start, entry_length); + data_start = &buffer[entry_length]; + } + } while (bytes_read != 0); + +cleanup: + if (file != -1) { +#if CPUINFO_MOCK + cpuinfo_mock_close(file); +#else + close(file); +#endif + file = -1; + } + return status; +} diff --git a/source/3rdparty/cpuinfo/src/linux/mockfile.c b/source/3rdparty/cpuinfo/src/linux/mockfile.c new file mode 100644 index 0000000..138acfe --- /dev/null +++ b/source/3rdparty/cpuinfo/src/linux/mockfile.c @@ -0,0 +1,105 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if !CPUINFO_MOCK + #error This file should be built only in mock mode +#endif + +#include +#include +#include +#include + + +static struct cpuinfo_mock_file* cpuinfo_mock_files = NULL; +static uint32_t cpuinfo_mock_file_count = 0; + + +void CPUINFO_ABI cpuinfo_mock_filesystem(struct cpuinfo_mock_file* files) { + cpuinfo_log_info("filesystem mocking enabled"); + uint32_t file_count = 0; + while (files[file_count].path != NULL) { + /* Indicate that file is not opened */ + files[file_count].offset = SIZE_MAX; + file_count += 1; + } + cpuinfo_mock_files = files; + cpuinfo_mock_file_count = file_count; +} + +int CPUINFO_ABI cpuinfo_mock_open(const char* path, int oflag) { + if (cpuinfo_mock_files == NULL) { + cpuinfo_log_warning("cpuinfo_mock_open called without mock filesystem; redictering to open"); + return open(path, oflag); + } + + for (uint32_t i = 0; i < cpuinfo_mock_file_count; i++) { + if (strcmp(cpuinfo_mock_files[i].path, path) == 0) { + if (oflag != O_RDONLY) { + errno = EACCES; + return -1; + } + if (cpuinfo_mock_files[i].offset != SIZE_MAX) { + errno = ENFILE; + return -1; + } + cpuinfo_mock_files[i].offset = 0; + return (int) i; + } + } + errno = ENOENT; + return -1; +} + +int CPUINFO_ABI cpuinfo_mock_close(int fd) { + if (cpuinfo_mock_files == NULL) { + cpuinfo_log_warning("cpuinfo_mock_close called without mock filesystem; redictering to close"); + return close(fd); + } + + if ((unsigned int) fd >= cpuinfo_mock_file_count) { + errno = EBADF; + return -1; + } + if (cpuinfo_mock_files[fd].offset == SIZE_MAX) { + errno = EBADF; + return -1; + } + cpuinfo_mock_files[fd].offset = SIZE_MAX; + return 0; +} + +ssize_t CPUINFO_ABI cpuinfo_mock_read(int fd, void* buffer, size_t capacity) { + if (cpuinfo_mock_files == NULL) { + cpuinfo_log_warning("cpuinfo_mock_read called without mock filesystem; redictering to read"); + return read(fd, buffer, capacity); + } + + if ((unsigned int) fd >= cpuinfo_mock_file_count) { + errno = EBADF; + return -1; + } + if (cpuinfo_mock_files[fd].offset == SIZE_MAX) { + errno = EBADF; + return -1; + } + + const size_t offset = cpuinfo_mock_files[fd].offset; + size_t count = cpuinfo_mock_files[fd].size - offset; + if (count > capacity) { + count = capacity; + } + memcpy(buffer, (void*) cpuinfo_mock_files[fd].content + offset, count); + cpuinfo_mock_files[fd].offset += count; + return (ssize_t) count; +} diff --git a/source/3rdparty/cpuinfo/src/linux/multiline.c b/source/3rdparty/cpuinfo/src/linux/multiline.c new file mode 100644 index 0000000..1feeb9b --- /dev/null +++ b/source/3rdparty/cpuinfo/src/linux/multiline.c @@ -0,0 +1,106 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if CPUINFO_MOCK + #include +#endif +#include +#include + + +bool cpuinfo_linux_parse_multiline_file(const char* filename, size_t buffer_size, cpuinfo_line_callback callback, void* context) +{ + int file = -1; + bool status = false; + char* buffer = (char*) alloca(buffer_size); + +#if CPUINFO_MOCK + file = cpuinfo_mock_open(filename, O_RDONLY); +#else + file = open(filename, O_RDONLY); +#endif + if (file == -1) { + cpuinfo_log_info("failed to open %s: %s", filename, strerror(errno)); + goto cleanup; + } + + /* Only used for error reporting */ + size_t position = 0; + uint64_t line_number = 1; + const char* buffer_end = &buffer[buffer_size]; + char* data_start = buffer; + ssize_t bytes_read; + do { +#if CPUINFO_MOCK + bytes_read = cpuinfo_mock_read(file, data_start, (size_t) (buffer_end - data_start)); +#else + bytes_read = read(file, data_start, (size_t) (buffer_end - data_start)); +#endif + if (bytes_read < 0) { + cpuinfo_log_info("failed to read file %s at position %zu: %s", + filename, position, strerror(errno)); + goto cleanup; + } + + position += (size_t) bytes_read; + const char* data_end = data_start + (size_t) bytes_read; + const char* line_start = buffer; + + if (bytes_read == 0) { + /* No more data in the file: process the remaining text in the buffer as a single entry */ + const char* line_end = data_end; + if (!callback(line_start, line_end, context, line_number)) { + goto cleanup; + } + } else { + const char* line_end; + do { + /* Find the end of the entry, as indicated by newline character ('\n') */ + for (line_end = line_start; line_end != data_end; line_end++) { + if (*line_end == '\n') { + break; + } + } + + /* + * If we located separator at the end of the entry, parse it. + * Otherwise, there may be more data at the end; read the file once again. + */ + if (line_end != data_end) { + if (!callback(line_start, line_end, context, line_number++)) { + goto cleanup; + } + line_start = line_end + 1; + } + } while (line_end != data_end); + + /* Move remaining partial line data at the end to the beginning of the buffer */ + const size_t line_length = (size_t) (line_end - line_start); + memmove(buffer, line_start, line_length); + data_start = &buffer[line_length]; + } + } while (bytes_read != 0); + + /* Commit */ + status = true; + +cleanup: + if (file != -1) { +#if CPUINFO_MOCK + cpuinfo_mock_close(file); +#else + close(file); +#endif + file = -1; + } + return status; +} diff --git a/source/3rdparty/cpuinfo/src/linux/processors.c b/source/3rdparty/cpuinfo/src/linux/processors.c new file mode 100644 index 0000000..aedba74 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/linux/processors.c @@ -0,0 +1,406 @@ +#include +#include +#include +#include +#include + +#if !defined(__ANDROID__) + /* + * sched.h is only used for CPU_SETSIZE constant. + * Android NDK headers before platform 21 do have this constant in sched.h + */ + #include +#endif + +#include +#include + + +#define STRINGIFY(token) #token + +#define KERNEL_MAX_FILENAME "/sys/devices/system/cpu/kernel_max" +#define KERNEL_MAX_FILESIZE 32 +#define FREQUENCY_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/cpufreq/cpuinfo_max_freq")) +#define MAX_FREQUENCY_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/cpufreq/cpuinfo_max_freq" +#define MIN_FREQUENCY_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/cpufreq/cpuinfo_min_freq" +#define FREQUENCY_FILESIZE 32 +#define PACKAGE_ID_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/physical_package_id")) +#define PACKAGE_ID_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/physical_package_id" +#define PACKAGE_ID_FILESIZE 32 +#define CORE_ID_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/core_id")) +#define CORE_ID_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_id" +#define CORE_ID_FILESIZE 32 + +#define CORE_SIBLINGS_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/core_siblings_list")) +#define CORE_SIBLINGS_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_siblings_list" +#define THREAD_SIBLINGS_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/thread_siblings_list")) +#define THREAD_SIBLINGS_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/thread_siblings_list" + +#define POSSIBLE_CPULIST_FILENAME "/sys/devices/system/cpu/possible" +#define PRESENT_CPULIST_FILENAME "/sys/devices/system/cpu/present" + + +inline static const char* parse_number(const char* start, const char* end, uint32_t number_ptr[restrict static 1]) { + uint32_t number = 0; + const char* parsed = start; + for (; parsed != end; parsed++) { + const uint32_t digit = (uint32_t) (uint8_t) (*parsed) - (uint32_t) '0'; + if (digit >= 10) { + break; + } + number = number * UINT32_C(10) + digit; + } + *number_ptr = number; + return parsed; +} + +/* Locale-independent */ +inline static bool is_whitespace(char c) { + switch (c) { + case ' ': + case '\t': + case '\n': + case '\r': + return true; + default: + return false; + } +} + +#if defined(__ANDROID__) && !defined(CPU_SETSIZE) + /* + * Android NDK headers before platform 21 do not define CPU_SETSIZE, + * so we hard-code its value, as defined in platform 21 headers + */ + #if defined(__LP64__) + static const uint32_t default_max_processors_count = 1024; + #else + static const uint32_t default_max_processors_count = 32; + #endif +#else + static const uint32_t default_max_processors_count = CPU_SETSIZE; +#endif + +static bool uint32_parser(const char* text_start, const char* text_end, void* context) { + if (text_start == text_end) { + cpuinfo_log_error("failed to parse file %s: file is empty", KERNEL_MAX_FILENAME); + return false; + } + + uint32_t kernel_max = 0; + const char* parsed_end = parse_number(text_start, text_end, &kernel_max); + if (parsed_end == text_start) { + cpuinfo_log_error("failed to parse file %s: \"%.*s\" is not an unsigned number", + KERNEL_MAX_FILENAME, (int) (text_end - text_start), text_start); + return false; + } else { + for (const char* char_ptr = parsed_end; char_ptr != text_end; char_ptr++) { + if (!is_whitespace(*char_ptr)) { + cpuinfo_log_warning("non-whitespace characters \"%.*s\" following number in file %s are ignored", + (int) (text_end - char_ptr), char_ptr, KERNEL_MAX_FILENAME); + break; + } + } + } + + uint32_t* kernel_max_ptr = (uint32_t*) context; + *kernel_max_ptr = kernel_max; + return true; +} + +uint32_t cpuinfo_linux_get_max_processors_count(void) { + uint32_t kernel_max; + if (cpuinfo_linux_parse_small_file(KERNEL_MAX_FILENAME, KERNEL_MAX_FILESIZE, uint32_parser, &kernel_max)) { + cpuinfo_log_debug("parsed kernel_max value of %"PRIu32" from %s", kernel_max, KERNEL_MAX_FILENAME); + + if (kernel_max >= default_max_processors_count) { + cpuinfo_log_warning("kernel_max value of %"PRIu32" parsed from %s exceeds platform-default limit %"PRIu32, + kernel_max, KERNEL_MAX_FILENAME, default_max_processors_count - 1); + } + + return kernel_max + 1; + } else { + cpuinfo_log_warning("using platform-default max processors count = %"PRIu32, default_max_processors_count); + return default_max_processors_count; + } +} + +uint32_t cpuinfo_linux_get_processor_max_frequency(uint32_t processor) { + char max_frequency_filename[FREQUENCY_FILENAME_SIZE]; + const int chars_formatted = snprintf( + max_frequency_filename, FREQUENCY_FILENAME_SIZE, MAX_FREQUENCY_FILENAME_FORMAT, processor); + if ((unsigned int) chars_formatted >= FREQUENCY_FILENAME_SIZE) { + cpuinfo_log_warning("failed to format filename for max frequency of processor %"PRIu32, processor); + return 0; + } + + uint32_t max_frequency; + if (cpuinfo_linux_parse_small_file(max_frequency_filename, FREQUENCY_FILESIZE, uint32_parser, &max_frequency)) { + cpuinfo_log_debug("parsed max frequency value of %"PRIu32" KHz for logical processor %"PRIu32" from %s", + max_frequency, processor, max_frequency_filename); + return max_frequency; + } else { + cpuinfo_log_warning("failed to parse max frequency for processor %"PRIu32" from %s", + processor, max_frequency_filename); + return 0; + } +} + +uint32_t cpuinfo_linux_get_processor_min_frequency(uint32_t processor) { + char min_frequency_filename[FREQUENCY_FILENAME_SIZE]; + const int chars_formatted = snprintf( + min_frequency_filename, FREQUENCY_FILENAME_SIZE, MIN_FREQUENCY_FILENAME_FORMAT, processor); + if ((unsigned int) chars_formatted >= FREQUENCY_FILENAME_SIZE) { + cpuinfo_log_warning("failed to format filename for min frequency of processor %"PRIu32, processor); + return 0; + } + + uint32_t min_frequency; + if (cpuinfo_linux_parse_small_file(min_frequency_filename, FREQUENCY_FILESIZE, uint32_parser, &min_frequency)) { + cpuinfo_log_debug("parsed min frequency value of %"PRIu32" KHz for logical processor %"PRIu32" from %s", + min_frequency, processor, min_frequency_filename); + return min_frequency; + } else { + /* + * This error is less severe than parsing max frequency, because min frequency is only useful for clustering, + * while max frequency is also needed for peak FLOPS calculation. + */ + cpuinfo_log_info("failed to parse min frequency for processor %"PRIu32" from %s", + processor, min_frequency_filename); + return 0; + } +} + +bool cpuinfo_linux_get_processor_core_id(uint32_t processor, uint32_t core_id_ptr[restrict static 1]) { + char core_id_filename[PACKAGE_ID_FILENAME_SIZE]; + const int chars_formatted = snprintf( + core_id_filename, CORE_ID_FILENAME_SIZE, CORE_ID_FILENAME_FORMAT, processor); + if ((unsigned int) chars_formatted >= CORE_ID_FILENAME_SIZE) { + cpuinfo_log_warning("failed to format filename for core id of processor %"PRIu32, processor); + return 0; + } + + uint32_t core_id; + if (cpuinfo_linux_parse_small_file(core_id_filename, CORE_ID_FILESIZE, uint32_parser, &core_id)) { + cpuinfo_log_debug("parsed core id value of %"PRIu32" for logical processor %"PRIu32" from %s", + core_id, processor, core_id_filename); + *core_id_ptr = core_id; + return true; + } else { + cpuinfo_log_info("failed to parse core id for processor %"PRIu32" from %s", + processor, core_id_filename); + return false; + } +} + +bool cpuinfo_linux_get_processor_package_id(uint32_t processor, uint32_t package_id_ptr[restrict static 1]) { + char package_id_filename[PACKAGE_ID_FILENAME_SIZE]; + const int chars_formatted = snprintf( + package_id_filename, PACKAGE_ID_FILENAME_SIZE, PACKAGE_ID_FILENAME_FORMAT, processor); + if ((unsigned int) chars_formatted >= PACKAGE_ID_FILENAME_SIZE) { + cpuinfo_log_warning("failed to format filename for package id of processor %"PRIu32, processor); + return 0; + } + + uint32_t package_id; + if (cpuinfo_linux_parse_small_file(package_id_filename, PACKAGE_ID_FILESIZE, uint32_parser, &package_id)) { + cpuinfo_log_debug("parsed package id value of %"PRIu32" for logical processor %"PRIu32" from %s", + package_id, processor, package_id_filename); + *package_id_ptr = package_id; + return true; + } else { + cpuinfo_log_info("failed to parse package id for processor %"PRIu32" from %s", + processor, package_id_filename); + return false; + } +} + +static bool max_processor_number_parser(uint32_t processor_list_start, uint32_t processor_list_end, void* context) { + uint32_t* processor_number_ptr = (uint32_t*) context; + const uint32_t processor_list_last = processor_list_end - 1; + if (*processor_number_ptr < processor_list_last) { + *processor_number_ptr = processor_list_last; + } + return true; +} + +uint32_t cpuinfo_linux_get_max_possible_processor(uint32_t max_processors_count) { + uint32_t max_possible_processor = 0; + if (!cpuinfo_linux_parse_cpulist(POSSIBLE_CPULIST_FILENAME, max_processor_number_parser, &max_possible_processor)) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + cpuinfo_log_error("failed to parse the list of possible processors in %s", POSSIBLE_CPULIST_FILENAME); + #else + cpuinfo_log_warning("failed to parse the list of possible processors in %s", POSSIBLE_CPULIST_FILENAME); + #endif + return UINT32_MAX; + } + if (max_possible_processor >= max_processors_count) { + cpuinfo_log_warning( + "maximum possible processor number %"PRIu32" exceeds system limit %"PRIu32": truncating to the latter", + max_possible_processor, max_processors_count - 1); + max_possible_processor = max_processors_count - 1; + } + return max_possible_processor; +} + +uint32_t cpuinfo_linux_get_max_present_processor(uint32_t max_processors_count) { + uint32_t max_present_processor = 0; + if (!cpuinfo_linux_parse_cpulist(PRESENT_CPULIST_FILENAME, max_processor_number_parser, &max_present_processor)) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + cpuinfo_log_error("failed to parse the list of present processors in %s", PRESENT_CPULIST_FILENAME); + #else + cpuinfo_log_warning("failed to parse the list of present processors in %s", PRESENT_CPULIST_FILENAME); + #endif + return UINT32_MAX; + } + if (max_present_processor >= max_processors_count) { + cpuinfo_log_warning( + "maximum present processor number %"PRIu32" exceeds system limit %"PRIu32": truncating to the latter", + max_present_processor, max_processors_count - 1); + max_present_processor = max_processors_count - 1; + } + return max_present_processor; +} + +struct detect_processors_context { + uint32_t max_processors_count; + uint32_t* processor0_flags; + uint32_t processor_struct_size; + uint32_t detected_flag; +}; + +static bool detect_processor_parser(uint32_t processor_list_start, uint32_t processor_list_end, void* context) { + const uint32_t max_processors_count = ((struct detect_processors_context*) context)->max_processors_count; + const uint32_t* processor0_flags = ((struct detect_processors_context*) context)->processor0_flags; + const uint32_t processor_struct_size = ((struct detect_processors_context*) context)->processor_struct_size; + const uint32_t detected_flag = ((struct detect_processors_context*) context)->detected_flag; + + for (uint32_t processor = processor_list_start; processor < processor_list_end; processor++) { + if (processor >= max_processors_count) { + break; + } + *((uint32_t*) ((uintptr_t) processor0_flags + processor_struct_size * processor)) |= detected_flag; + } + return true; +} + +bool cpuinfo_linux_detect_possible_processors(uint32_t max_processors_count, + uint32_t* processor0_flags, uint32_t processor_struct_size, uint32_t possible_flag) +{ + struct detect_processors_context context = { + .max_processors_count = max_processors_count, + .processor0_flags = processor0_flags, + .processor_struct_size = processor_struct_size, + .detected_flag = possible_flag, + }; + if (cpuinfo_linux_parse_cpulist(POSSIBLE_CPULIST_FILENAME, detect_processor_parser, &context)) { + return true; + } else { + cpuinfo_log_warning("failed to parse the list of possible processors in %s", POSSIBLE_CPULIST_FILENAME); + return false; + } +} + +bool cpuinfo_linux_detect_present_processors(uint32_t max_processors_count, + uint32_t* processor0_flags, uint32_t processor_struct_size, uint32_t present_flag) +{ + struct detect_processors_context context = { + .max_processors_count = max_processors_count, + .processor0_flags = processor0_flags, + .processor_struct_size = processor_struct_size, + .detected_flag = present_flag, + }; + if (cpuinfo_linux_parse_cpulist(PRESENT_CPULIST_FILENAME, detect_processor_parser, &context)) { + return true; + } else { + cpuinfo_log_warning("failed to parse the list of present processors in %s", PRESENT_CPULIST_FILENAME); + return false; + } +} + +struct siblings_context { + const char* group_name; + uint32_t max_processors_count; + uint32_t processor; + cpuinfo_siblings_callback callback; + void* callback_context; +}; + +static bool siblings_parser(uint32_t sibling_list_start, uint32_t sibling_list_end, struct siblings_context* context) { + const char* group_name = context->group_name; + const uint32_t max_processors_count = context->max_processors_count; + const uint32_t processor = context->processor; + + if (sibling_list_end > max_processors_count) { + cpuinfo_log_warning("ignore %s siblings %"PRIu32"-%"PRIu32" of processor %"PRIu32, + group_name, max_processors_count, sibling_list_end - 1, processor); + sibling_list_end = max_processors_count; + } + + return context->callback(processor, sibling_list_start, sibling_list_end, context->callback_context); +} + +bool cpuinfo_linux_detect_core_siblings( + uint32_t max_processors_count, + uint32_t processor, + cpuinfo_siblings_callback callback, + void* context) +{ + char core_siblings_filename[CORE_SIBLINGS_FILENAME_SIZE]; + const int chars_formatted = snprintf( + core_siblings_filename, CORE_SIBLINGS_FILENAME_SIZE, CORE_SIBLINGS_FILENAME_FORMAT, processor); + if ((unsigned int) chars_formatted >= CORE_SIBLINGS_FILENAME_SIZE) { + cpuinfo_log_warning("failed to format filename for core siblings of processor %"PRIu32, processor); + return false; + } + + struct siblings_context siblings_context = { + .group_name = "package", + .max_processors_count = max_processors_count, + .processor = processor, + .callback = callback, + .callback_context = context, + }; + if (cpuinfo_linux_parse_cpulist(core_siblings_filename, + (cpuinfo_cpulist_callback) siblings_parser, &siblings_context)) + { + return true; + } else { + cpuinfo_log_info("failed to parse the list of core siblings for processor %"PRIu32" from %s", + processor, core_siblings_filename); + return false; + } +} + +bool cpuinfo_linux_detect_thread_siblings( + uint32_t max_processors_count, + uint32_t processor, + cpuinfo_siblings_callback callback, + void* context) +{ + char thread_siblings_filename[THREAD_SIBLINGS_FILENAME_SIZE]; + const int chars_formatted = snprintf( + thread_siblings_filename, THREAD_SIBLINGS_FILENAME_SIZE, THREAD_SIBLINGS_FILENAME_FORMAT, processor); + if ((unsigned int) chars_formatted >= THREAD_SIBLINGS_FILENAME_SIZE) { + cpuinfo_log_warning("failed to format filename for thread siblings of processor %"PRIu32, processor); + return false; + } + + struct siblings_context siblings_context = { + .group_name = "core", + .max_processors_count = max_processors_count, + .processor = processor, + .callback = callback, + .callback_context = context, + }; + if (cpuinfo_linux_parse_cpulist(thread_siblings_filename, + (cpuinfo_cpulist_callback) siblings_parser, &siblings_context)) + { + return true; + } else { + cpuinfo_log_info("failed to parse the list of thread siblings for processor %"PRIu32" from %s", + processor, thread_siblings_filename); + return false; + } +} + diff --git a/source/3rdparty/cpuinfo/src/linux/smallfile.c b/source/3rdparty/cpuinfo/src/linux/smallfile.c new file mode 100644 index 0000000..98cde00 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/linux/smallfile.c @@ -0,0 +1,70 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if CPUINFO_MOCK + #include +#endif +#include +#include + + +bool cpuinfo_linux_parse_small_file(const char* filename, size_t buffer_size, cpuinfo_smallfile_callback callback, void* context) { + int file = -1; + bool status = false; + char* buffer = (char*) alloca(buffer_size); + + #if CPUINFO_LOG_DEBUG_PARSERS + cpuinfo_log_debug("parsing small file %s", filename); + #endif + +#if CPUINFO_MOCK + file = cpuinfo_mock_open(filename, O_RDONLY); +#else + file = open(filename, O_RDONLY); +#endif + if (file == -1) { + cpuinfo_log_info("failed to open %s: %s", filename, strerror(errno)); + goto cleanup; + } + + size_t buffer_position = 0; + ssize_t bytes_read; + do { +#if CPUINFO_MOCK + bytes_read = cpuinfo_mock_read(file, &buffer[buffer_position], buffer_size - buffer_position); +#else + bytes_read = read(file, &buffer[buffer_position], buffer_size - buffer_position); +#endif + if (bytes_read < 0) { + cpuinfo_log_info("failed to read file %s at position %zu: %s", filename, buffer_position, strerror(errno)); + goto cleanup; + } + buffer_position += (size_t) bytes_read; + if (buffer_position >= buffer_size) { + cpuinfo_log_error("failed to read file %s: insufficient buffer of size %zu", filename, buffer_size); + goto cleanup; + } + } while (bytes_read != 0); + + status = callback(buffer, &buffer[buffer_position], context); + +cleanup: + if (file != -1) { +#if CPUINFO_MOCK + cpuinfo_mock_close(file); +#else + close(file); +#endif + file = -1; + } + return status; +} diff --git a/source/3rdparty/cpuinfo/src/mach/api.h b/source/3rdparty/cpuinfo/src/mach/api.h new file mode 100644 index 0000000..fdef5bd --- /dev/null +++ b/source/3rdparty/cpuinfo/src/mach/api.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +#define CPUINFO_MACH_MAX_CACHE_LEVELS 8 + + +struct cpuinfo_mach_topology { + uint32_t packages; + uint32_t cores; + uint32_t threads; + uint32_t threads_per_cache[CPUINFO_MACH_MAX_CACHE_LEVELS]; +}; + + +struct cpuinfo_mach_topology cpuinfo_mach_detect_topology(void); diff --git a/source/3rdparty/cpuinfo/src/mach/topology.c b/source/3rdparty/cpuinfo/src/mach/topology.c new file mode 100644 index 0000000..b56343b --- /dev/null +++ b/source/3rdparty/cpuinfo/src/mach/topology.c @@ -0,0 +1,73 @@ +#include +#include +#include + +#include +#include + +#include +#include + +#include + + +struct cpuinfo_mach_topology cpuinfo_mach_detect_topology(void) { + int cores = 1; + size_t sizeof_cores = sizeof(cores); + if (sysctlbyname("hw.physicalcpu_max", &cores, &sizeof_cores, NULL, 0) != 0) { + cpuinfo_log_error("sysctlbyname(\"hw.physicalcpu_max\") failed: %s", strerror(errno)); + } else if (cores <= 0) { + cpuinfo_log_error("sysctlbyname(\"hw.physicalcpu_max\") returned invalid value %d", cores); + cores = 1; + } + + int threads = 1; + size_t sizeof_threads = sizeof(threads); + if (sysctlbyname("hw.logicalcpu_max", &threads, &sizeof_threads, NULL, 0) != 0) { + cpuinfo_log_error("sysctlbyname(\"hw.logicalcpu_max\") failed: %s", strerror(errno)); + } else if (threads <= 0) { + cpuinfo_log_error("sysctlbyname(\"hw.logicalcpu_max\") returned invalid value %d", threads); + threads = cores; + } + + int packages = 1; +#if !TARGET_OS_IPHONE + size_t sizeof_packages = sizeof(packages); + if (sysctlbyname("hw.packages", &packages, &sizeof_packages, NULL, 0) != 0) { + cpuinfo_log_error("sysctlbyname(\"hw.packages\") failed: %s", strerror(errno)); + } else if (packages <= 0) { + cpuinfo_log_error("sysctlbyname(\"hw.packages\") returned invalid value %d", packages); + packages = 1; + } +#endif + + cpuinfo_log_debug("mach topology: packages = %d, cores = %d, threads = %d", packages, (int) cores, (int) threads); + struct cpuinfo_mach_topology topology = { + .packages = (uint32_t) packages, + .cores = (uint32_t) cores, + .threads = (uint32_t) threads + }; + +#if !TARGET_OS_IPHONE + size_t cacheconfig_size = 0; + if (sysctlbyname("hw.cacheconfig", NULL, &cacheconfig_size, NULL, 0) != 0) { + cpuinfo_log_error("sysctlbyname(\"hw.cacheconfig\") failed: %s", strerror(errno)); + } else { + uint64_t* cacheconfig = alloca(cacheconfig_size); + if (sysctlbyname("hw.cacheconfig", cacheconfig, &cacheconfig_size, NULL, 0) != 0) { + cpuinfo_log_error("sysctlbyname(\"hw.cacheconfig\") failed: %s", strerror(errno)); + } else { + size_t cache_configs = cacheconfig_size / sizeof(uint64_t); + cpuinfo_log_debug("mach hw.cacheconfig count: %zu", cache_configs); + if (cache_configs > CPUINFO_MACH_MAX_CACHE_LEVELS) { + cache_configs = CPUINFO_MACH_MAX_CACHE_LEVELS; + } + for (size_t i = 0; i < cache_configs; i++) { + cpuinfo_log_debug("mach hw.cacheconfig[%zu]: %"PRIu64, i, cacheconfig[i]); + topology.threads_per_cache[i] = cacheconfig[i]; + } + } + } +#endif + return topology; +} diff --git a/source/3rdparty/cpuinfo/src/x86/api.h b/source/3rdparty/cpuinfo/src/x86/api.h new file mode 100644 index 0000000..213c2d8 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/api.h @@ -0,0 +1,159 @@ +#pragma once + +#include +#include + +#include +#include + + +struct cpuid_regs { + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; +}; + +struct cpuinfo_x86_cache { + uint32_t size; + uint32_t associativity; + uint32_t sets; + uint32_t partitions; + uint32_t line_size; + uint32_t flags; + uint32_t apic_bits; +}; + +struct cpuinfo_x86_caches { + struct cpuinfo_trace_cache trace; + struct cpuinfo_x86_cache l1i; + struct cpuinfo_x86_cache l1d; + struct cpuinfo_x86_cache l2; + struct cpuinfo_x86_cache l3; + struct cpuinfo_x86_cache l4; + uint32_t prefetch_size; +}; + +struct cpuinfo_x86_model_info { + uint32_t model; + uint32_t family; + + uint32_t base_model; + uint32_t base_family; + uint32_t stepping; + uint32_t extended_model; + uint32_t extended_family; + uint32_t processor_type; +}; + +struct cpuinfo_x86_topology { + uint32_t apic_id; + uint32_t thread_bits_offset; + uint32_t thread_bits_length; + uint32_t core_bits_offset; + uint32_t core_bits_length; +}; + +struct cpuinfo_x86_processor { + uint32_t cpuid; + enum cpuinfo_vendor vendor; + enum cpuinfo_uarch uarch; +#ifdef __linux__ + int linux_id; +#endif + struct cpuinfo_x86_caches cache; + struct { + struct cpuinfo_tlb itlb_4KB; + struct cpuinfo_tlb itlb_2MB; + struct cpuinfo_tlb itlb_4MB; + struct cpuinfo_tlb dtlb0_4KB; + struct cpuinfo_tlb dtlb0_2MB; + struct cpuinfo_tlb dtlb0_4MB; + struct cpuinfo_tlb dtlb_4KB; + struct cpuinfo_tlb dtlb_2MB; + struct cpuinfo_tlb dtlb_4MB; + struct cpuinfo_tlb dtlb_1GB; + struct cpuinfo_tlb stlb2_4KB; + struct cpuinfo_tlb stlb2_2MB; + struct cpuinfo_tlb stlb2_1GB; + } tlb; + struct cpuinfo_x86_topology topology; + char brand_string[CPUINFO_PACKAGE_NAME_MAX]; +}; + +CPUINFO_INTERNAL void cpuinfo_x86_init_processor(struct cpuinfo_x86_processor* processor); + +CPUINFO_INTERNAL enum cpuinfo_vendor cpuinfo_x86_decode_vendor(uint32_t ebx, uint32_t ecx, uint32_t edx); +CPUINFO_INTERNAL struct cpuinfo_x86_model_info cpuinfo_x86_decode_model_info(uint32_t eax); +CPUINFO_INTERNAL enum cpuinfo_uarch cpuinfo_x86_decode_uarch( + enum cpuinfo_vendor vendor, + const struct cpuinfo_x86_model_info* model_info); + +CPUINFO_INTERNAL struct cpuinfo_x86_isa cpuinfo_x86_detect_isa( + const struct cpuid_regs basic_info, const struct cpuid_regs extended_info, + uint32_t max_base_index, uint32_t max_extended_index, + enum cpuinfo_vendor vendor, enum cpuinfo_uarch uarch); + +CPUINFO_INTERNAL void cpuinfo_x86_detect_topology( + uint32_t max_base_index, + uint32_t max_extended_index, + struct cpuid_regs leaf1, + struct cpuinfo_x86_topology* topology); + +CPUINFO_INTERNAL void cpuinfo_x86_detect_cache( + uint32_t max_base_index, uint32_t max_extended_index, + bool amd_topology_extensions, + enum cpuinfo_vendor vendor, + const struct cpuinfo_x86_model_info* model_info, + struct cpuinfo_x86_caches* cache, + struct cpuinfo_tlb* itlb_4KB, + struct cpuinfo_tlb* itlb_2MB, + struct cpuinfo_tlb* itlb_4MB, + struct cpuinfo_tlb* dtlb0_4KB, + struct cpuinfo_tlb* dtlb0_2MB, + struct cpuinfo_tlb* dtlb0_4MB, + struct cpuinfo_tlb* dtlb_4KB, + struct cpuinfo_tlb* dtlb_2MB, + struct cpuinfo_tlb* dtlb_4MB, + struct cpuinfo_tlb* dtlb_1GB, + struct cpuinfo_tlb* stlb2_4KB, + struct cpuinfo_tlb* stlb2_2MB, + struct cpuinfo_tlb* stlb2_1GB, + uint32_t* log2_package_cores_max); + +CPUINFO_INTERNAL void cpuinfo_x86_decode_cache_descriptor( + uint8_t descriptor, enum cpuinfo_vendor vendor, + const struct cpuinfo_x86_model_info* model_info, + struct cpuinfo_x86_caches* cache, + struct cpuinfo_tlb* itlb_4KB, + struct cpuinfo_tlb* itlb_2MB, + struct cpuinfo_tlb* itlb_4MB, + struct cpuinfo_tlb* dtlb0_4KB, + struct cpuinfo_tlb* dtlb0_2MB, + struct cpuinfo_tlb* dtlb0_4MB, + struct cpuinfo_tlb* dtlb_4KB, + struct cpuinfo_tlb* dtlb_2MB, + struct cpuinfo_tlb* dtlb_4MB, + struct cpuinfo_tlb* dtlb_1GB, + struct cpuinfo_tlb* stlb2_4KB, + struct cpuinfo_tlb* stlb2_2MB, + struct cpuinfo_tlb* stlb2_1GB, + uint32_t* prefetch_size); + +CPUINFO_INTERNAL bool cpuinfo_x86_decode_deterministic_cache_parameters( + struct cpuid_regs regs, + struct cpuinfo_x86_caches* cache, + uint32_t* package_cores_max); + +CPUINFO_INTERNAL bool cpuinfo_x86_decode_cache_properties( + struct cpuid_regs regs, + struct cpuinfo_x86_caches* cache); + +CPUINFO_INTERNAL uint32_t cpuinfo_x86_normalize_brand_string( + const char raw_name[48], + char normalized_name[48]); + +CPUINFO_INTERNAL uint32_t cpuinfo_x86_format_package_name( + enum cpuinfo_vendor vendor, + const char normalized_brand_string[48], + char package_name[CPUINFO_PACKAGE_NAME_MAX]); diff --git a/source/3rdparty/cpuinfo/src/x86/cache/descriptor.c b/source/3rdparty/cpuinfo/src/x86/cache/descriptor.c new file mode 100644 index 0000000..69d38cc --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/cache/descriptor.c @@ -0,0 +1,1726 @@ +#include + +#include +#include + + +void cpuinfo_x86_decode_cache_descriptor( + uint8_t descriptor, enum cpuinfo_vendor vendor, + const struct cpuinfo_x86_model_info* model_info, + struct cpuinfo_x86_caches* cache, + struct cpuinfo_tlb* itlb_4KB, + struct cpuinfo_tlb* itlb_2MB, + struct cpuinfo_tlb* itlb_4MB, + struct cpuinfo_tlb* dtlb0_4KB, + struct cpuinfo_tlb* dtlb0_2MB, + struct cpuinfo_tlb* dtlb0_4MB, + struct cpuinfo_tlb* dtlb_4KB, + struct cpuinfo_tlb* dtlb_2MB, + struct cpuinfo_tlb* dtlb_4MB, + struct cpuinfo_tlb* dtlb_1GB, + struct cpuinfo_tlb* stlb2_4KB, + struct cpuinfo_tlb* stlb2_2MB, + struct cpuinfo_tlb* stlb2_1GB, + uint32_t* prefetch_size) +{ + /* + * Descriptors are parsed according to: + * - Application Note 485: Intel Processor Indentification and CPUID Instruction, May 2012, Order Number 241618-039 + * - Intel 64 and IA-32 Architectures Software Developer’s Manual, Volume 2 (2A, 2B, 2C & 2D): Instruction Set + * Reference, A-Z, December 2016. Order Number: 325383-061US + * - Cyrix CPU Detection Guide, Preliminary Revision 1.01 + * - Geode(TM) GX1 Processor Series: Low Power Integrated x86 Solution + */ + switch (descriptor) { + case 0x01: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 KByte pages, 4-way set associative, 32 entries" + * Application Note 485: + * "Instruction TLB: 4-KB Pages, 4-way set associative, 32 entries" + */ + *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 32, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x02: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 MByte pages, fully associative, 2 entries" + * Application Note 485: + * "Instruction TLB: 4-MB Pages, fully associative, 2 entries" + */ + *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 2, + .associativity = 2, + .pages = CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x03: + /* + * Intel ISA Reference: + * "Data TLB: 4 KByte pages, 4-way set associative, 64 entries" + * Application Note 485: + * "Data TLB: 4-KB Pages, 4-way set associative, 64 entries" + */ + *dtlb_4KB = (struct cpuinfo_tlb) { + .entries = 64, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x04: + /* + * Intel ISA Reference: + * "Data TLB: 4 MByte pages, 4-way set associative, 8 entries" + * Application Note 485: + * "Data TLB: 4-MB Pages, 4-way set associative, 8 entries" + */ + *dtlb_4MB = (struct cpuinfo_tlb) { + .entries = 8, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x05: + /* + * Intel ISA Reference: + * "Data TLB1: 4 MByte pages, 4-way set associative, 32 entries" + * Application Note 485: + * "Data TLB: 4-MB Pages, 4-way set associative, 32 entries" + */ + *dtlb_4MB = (struct cpuinfo_tlb) { + .entries = 32, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x06: + /* + * Intel ISA Reference: + * "1st-level instruction cache: 8 KBytes, 4-way set associative, 32 byte line size" + * Application Note 485: + * "1st-level instruction cache: 8-KB, 4-way set associative, 32-byte line size" + */ + cache->l1i = (struct cpuinfo_x86_cache) { + .size = 8 * 1024, + .associativity = 4, + .sets = 64, + .partitions = 1, + .line_size = 32, + }; + break; + case 0x08: + /* + * Intel ISA Reference: + * "1st-level instruction cache: 16 KBytes, 4-way set associative, 32 byte line size" + * Application Note 485: + * "1st-level instruction cache: 16-KB, 4-way set associative, 32-byte line size" + */ + cache->l1i = (struct cpuinfo_x86_cache) { + .size = 16 * 1024, + .associativity = 4, + .sets = 128, + .partitions = 1, + .line_size = 32, + }; + break; + case 0x09: + /* + * Intel ISA Reference: + * "1st-level instruction cache: 32KBytes, 4-way set associative, 64 byte line size" + * Application Note 485: + * "1st-level Instruction Cache: 32-KB, 4-way set associative, 64-byte line size" + */ + cache->l1i = (struct cpuinfo_x86_cache) { + .size = 32 * 1024, + .associativity = 4, + .sets = 128, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x0A: + /* + * Intel ISA Reference: + * "1st-level data cache: 8 KBytes, 2-way set associative, 32 byte line size" + * Application Note 485: + * "1st-level data cache: 8-KB, 2-way set associative, 32-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 8 * 1024, + .associativity = 2, + .sets = 128, + .partitions = 1, + .line_size = 32, + }; + break; + case 0x0B: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 MByte pages, 4-way set associative, 4 entries" + * Application Note 485: + * "Instruction TLB: 4-MB pages, 4-way set associative, 4 entries" + */ + *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 4, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x0C: + /* + * Intel ISA Reference: + * "1st-level data cache: 16 KBytes, 4-way set associative, 32 byte line size" + * Application Note 485: + * "1st-level data cache: 16-KB, 4-way set associative, 32-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 16 * 1024, + .associativity = 4, + .sets = 128, + .partitions = 1, + .line_size = 32, + }; + break; + case 0x0D: + /* + * Intel ISA Reference: + * "1st-level data cache: 16 KBytes, 4-way set associative, 64 byte line size" + * Application Note 485: + * "1st-level Data Cache: 16-KB, 4-way set associative, 64-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 16 * 1024, + .associativity = 4, + .sets = 64, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x0E: + /* + * Intel ISA Reference: + * "1st-level data cache: 24 KBytes, 6-way set associative, 64 byte line size" + * Application Note 485: + * "1st-level Data Cache: 24-KB, 6-way set associative, 64-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 24 * 1024, + .associativity = 6, + .sets = 64, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x1D: + /* + * Intel ISA Reference: + * "2nd-level cache: 128 KBytes, 2-way set associative, 64 byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 128 * 1024, + .associativity = 2, + .sets = 1024, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + case 0x21: + /* + * Intel ISA Reference: + * "2nd-level cache: 256 KBytes, 8-way set associative, 64 byte line size" + * Application Note 485: + * "2nd-level cache: 256-KB, 8-way set associative, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 256 * 1024, + .associativity = 8, + .sets = 512, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x22: + /* + * Intel ISA Reference: + * "3rd-level cache: 512 KBytes, 4-way set associative, 64 byte line size, 2 lines per sector" + * Application Note 485: + * "3rd-level cache: 512-KB, 4-way set associative, sectored cache, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 4, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x23: + /* + * Intel ISA Reference: + * "3rd-level cache: 1 MBytes, 8-way set associative, 64 byte line size, 2 lines per sector" + * Application Note 485: + * "3rd-level cache: 1-MB, 8-way set associative, sectored cache, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 8, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x24: + /* + * Intel ISA Reference: + * "2nd-level cache: 1 MBytes, 16-way set associative, 64 byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 16, + .sets = 1024, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x25: + /* + * Intel ISA Reference: + * "3rd-level cache: 2 MBytes, 8-way set associative, 64 byte line size, 2 lines per sector" + * Application Note 485: + * "3rd-level cache: 2-MB, 8-way set associative, sectored cache, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 2 * 1024 * 1024, + .associativity = 8, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x29: + /* + * Intel ISA Reference: + * "3rd-level cache: 4 MBytes, 8-way set associative, 64 byte line size, 2 lines per sector" + * Application Note 485: + * "3rd-level cache: 4-MB, 8-way set associative, sectored cache, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 4 * 1024 * 1024, + .associativity = 8, + .sets = 8192, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x2C: + /* + * Intel ISA Reference: + * "1st-level data cache: 32 KBytes, 8-way set associative, 64 byte line size" + * Application Note 485: + * "1st-level data cache: 32-KB, 8-way set associative, 64-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 32 * 1024, + .associativity = 8, + .sets = 64, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x30: + /* + * Intel ISA Reference: + * "1st-level instruction cache: 32 KBytes, 8-way set associative, 64 byte line size" + * Application Note 485: + * "1st-level instruction cache: 32-KB, 8-way set associative, 64-byte line size" + */ + cache->l1i = (struct cpuinfo_x86_cache) { + .size = 32 * 1024, + .associativity = 8, + .sets = 64, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x39: + /* Where does this come from? */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 128 * 1024, + .associativity = 4, + .sets = 512, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x3A: + /* Where does this come from? */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 192 * 1024, + .associativity = 6, + .sets = 512, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x3B: + /* Where does this come from? */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 128 * 1024, + .associativity = 2, + .sets = 1024, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x3C: + /* Where does this come from? */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 256 * 1024, + .associativity = 4, + .sets = 1024, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x3D: + /* Where does this come from? */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 384 * 1024, + .associativity = 6, + .sets = 1024, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x3E: + /* Where does this come from? */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 4, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x40: + /* + * Intel ISA Reference: + * "No 2nd-level cache or, if processor contains a valid 2nd-level cache, no 3rd-level cache" + * Application Note 485: + * "No 2nd-level cache or, if processor contains a valid 2nd-level cache, no 3rd-level cache" + */ + break; + case 0x41: + /* + * Intel ISA Reference: + * "2nd-level cache: 128 KBytes, 4-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 128-KB, 4-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 128 * 1024, + .associativity = 4, + .sets = 1024, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x42: + /* + * Intel ISA Reference: + * "2nd-level cache: 256 KBytes, 4-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 256-KB, 4-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 256 * 1024, + .associativity = 4, + .sets = 2048, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x43: + /* + * Intel ISA Reference: + * "2nd-level cache: 512 KBytes, 4-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 512-KB, 4-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 4, + .sets = 4096, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x44: + /* + * Intel ISA Reference: + * "2nd-level cache: 1 MByte, 4-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 1-MB, 4-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 4, + .sets = 8192, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x45: + /* + * Intel ISA Reference: + * "2nd-level cache: 2 MByte, 4-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 2-MB, 4-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 2 * 1024 * 1024, + .associativity = 4, + .sets = 16384, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x46: + /* + * Intel ISA Reference: + * "3rd-level cache: 4 MByte, 4-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 4-MB, 4-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 4 * 1024 * 1024, + .associativity = 4, + .sets = 16384, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x47: + /* + * Intel ISA Reference: + * "3rd-level cache: 8 MByte, 8-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 8-MB, 8-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 8 * 1024 * 1024, + .associativity = 8, + .sets = 16384, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x48: + /* + * Intel ISA Reference: + * "2nd-level cache: 3MByte, 12-way set associative, 64 byte line size" + * Application Note 485: + * "2nd-level cache: 3-MB, 12-way set associative, 64-byte line size, unified on-die" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 3 * 1024 * 1024, + .associativity = 12, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x49: + /* + * Intel ISA Reference: + * "3rd-level cache: 4MB, 16-way set associative, 64-byte line size (Intel Xeon processor MP, + * Family 0FH, Model 06H); 2nd-level cache: 4 MByte, 16-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 4-MB, 16-way set associative, 64-byte line size (Intel Xeon processor MP, + * Family 0Fh, Model 06h) + * 2nd-level cache: 4-MB, 16-way set associative, 64-byte line size" + */ + if ((vendor == cpuinfo_vendor_intel) && (model_info->model == 0x06) && (model_info->family == 0x0F)) { + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 4 * 1024 * 1024, + .associativity = 16, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + } else { + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 4 * 1024 * 1024, + .associativity = 16, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + } + break; + case 0x4A: + /* + * Intel ISA Reference: + * "3rd-level cache: 6MByte, 12-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 6-MB, 12-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 6 * 1024 * 1024, + .associativity = 12, + .sets = 8192, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x4B: + /* + * Intel ISA Reference: + * "3rd-level cache: 8MByte, 16-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 8-MB, 16-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 8 * 1024 * 1024, + .associativity = 16, + .sets = 8192, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x4C: + /* + * Intel ISA Reference: + * "3rd-level cache: 12MByte, 12-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 12-MB, 12-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 12 * 1024 * 1024, + .associativity = 12, + .sets = 16384, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x4D: + /* + * Intel ISA Reference: + * "3rd-level cache: 16MByte, 16-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 16-MB, 16-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 16 * 1024 * 1024, + .associativity = 16, + .sets = 16384, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x4E: + /* + * Intel ISA Reference: + * "2nd-level cache: 6MByte, 24-way set associative, 64 byte line size" + * Application Note 485: + * "2nd-level cache: 6-MB, 24-way set associative, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 6 * 1024 * 1024, + .associativity = 24, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x4F: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 KByte pages, 32 entries" + * Application Note 485: + * "Instruction TLB: 4-KB pages, 32 entries" + */ + *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 32, + /* Assume full associativity from nearby entries: manual lacks detail */ + .associativity = 32, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x50: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages, 64 entries" + * Application Note 485: + * "Instruction TLB: 4-KB, 2-MB or 4-MB pages, fully associative, 64 entries" + */ + *itlb_4KB = *itlb_2MB = *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 64, + .associativity = 64, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x51: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages, 128 entries" + * Application Note 485: + * "Instruction TLB: 4-KB, 2-MB or 4-MB pages, fully associative, 128 entries" + */ + *itlb_4KB = *itlb_2MB = *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 128, + .associativity = 128, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x52: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages, 256 entries" + * Application Note 485: + * "Instruction TLB: 4-KB, 2-MB or 4-MB pages, fully associative, 256 entries" + */ + *itlb_4KB = *itlb_2MB = *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 256, + .associativity = 256, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x55: + /* + * Intel ISA Reference: + * "Instruction TLB: 2-MByte or 4-MByte pages, fully associative, 7 entries" + * Application Note 485: + * "Instruction TLB: 2-MB or 4-MB pages, fully associative, 7 entries" + */ + *itlb_2MB = *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 7, + .associativity = 7, + .pages = CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x56: + /* + * Intel ISA Reference: + * "Data TLB0: 4 MByte pages, 4-way set associative, 16 entries" + * Application Note 485: + * "L1 Data TLB: 4-MB pages, 4-way set associative, 16 entries" + */ + *dtlb0_4MB = (struct cpuinfo_tlb) { + .entries = 16, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x57: + /* + * Intel ISA Reference: + * "Data TLB0: 4 KByte pages, 4-way associative, 16 entries" + * Application Note 485: + * "L1 Data TLB: 4-KB pages, 4-way set associative, 16 entries" + */ + *dtlb0_4KB = (struct cpuinfo_tlb) { + .entries = 16, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x59: + /* + * Intel ISA Reference: + * "Data TLB0: 4 KByte pages, fully associative, 16 entries" + * Application Note 485: + * "Data TLB0: 4-KB pages, fully associative, 16 entries" + */ + *dtlb0_4KB = (struct cpuinfo_tlb) { + .entries = 16, + .associativity = 16, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x5A: + /* + * Intel ISA Reference: + * "Data TLB0: 2 MByte or 4 MByte pages, 4-way set associative, 32 entries" + * Application Note 485: + * "Data TLB0: 2-MB or 4-MB pages, 4-way associative, 32 entries" + */ + *dtlb0_2MB = *dtlb0_4MB = (struct cpuinfo_tlb) { + .entries = 32, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x5B: + /* + * Intel ISA Reference: + * "Data TLB: 4 KByte and 4 MByte pages, 64 entries" + * Application Note 485: + * "Data TLB: 4-KB or 4-MB pages, fully associative, 64 entries" + */ + *dtlb_4KB = *dtlb_4MB = (struct cpuinfo_tlb) { + .entries = 64, + .associativity = 64, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x5C: + /* + * Intel ISA Reference: + * "Data TLB: 4 KByte and 4 MByte pages, 128 entries" + * Application Note 485: + * "Data TLB: 4-KB or 4-MB pages, fully associative, 128 entries" + */ + *dtlb_4KB = *dtlb_4MB = (struct cpuinfo_tlb) { + .entries = 128, + .associativity = 128, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x5D: + /* + * Intel ISA Reference: + * "Data TLB: 4 KByte and 4 MByte pages, 256 entries" + * Application Note 485: + * "Data TLB: 4-KB or 4-MB pages, fully associative, 256 entries" + */ + *dtlb_4KB = *dtlb_4MB = (struct cpuinfo_tlb) { + .entries = 256, + .associativity = 256, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x60: + /* + * Application Note 485: + * "1st-level data cache: 16-KB, 8-way set associative, sectored cache, 64-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 16 * 1024, + .associativity = 8, + .sets = 32, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x61: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 KByte pages, fully associative, 48 entries" + */ + *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 48, + .associativity = 48, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x63: + /* + * Intel ISA Reference: + * "Data TLB: 2 MByte or 4 MByte pages, 4-way set associative, 32 entries and + * a separate array with 1 GByte pages, 4-way set associative, 4 entries" + */ + *dtlb_2MB = *dtlb_4MB = (struct cpuinfo_tlb) { + .entries = 32, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + *dtlb_1GB = (struct cpuinfo_tlb) { + .entries = 4, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_1GB, + }; + break; + case 0x64: + /* + * Intel ISA Reference: + * "Data TLB: 4 KByte pages, 4-way set associative, 512 entries" + * + */ + *dtlb_4KB = (struct cpuinfo_tlb) { + .entries = 512, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x66: + /* + * Application Note 485: + * "1st-level data cache: 8-KB, 4-way set associative, sectored cache, 64-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 8 * 1024, + .associativity = 4, + .sets = 32, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x67: + /* + * Application Note 485: + * "1st-level data cache: 16-KB, 4-way set associative, sectored cache, 64-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 16 * 1024, + .associativity = 4, + .sets = 64, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x68: + /* + * Application Note 485: + * "1st-level data cache: 32-KB, 4 way set associative, sectored cache, 64-byte line size" + */ + cache->l1d = (struct cpuinfo_x86_cache) { + .size = 32 * 1024, + .associativity = 4, + .sets = 128, + .partitions = 1, + .line_size = 64, + }; + break; + case 0x6A: + /* + * Intel ISA Reference: + * "uTLB: 4 KByte pages, 8-way set associative, 64 entries" + */ + + /* uTLB is, an fact, a normal 1-level DTLB on Silvermont & Knoghts Landing */ + *dtlb_4KB = (struct cpuinfo_tlb) { + .entries = 64, + .associativity = 8, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x6B: + /* + * Intel ISA Reference: + * "DTLB: 4 KByte pages, 8-way set associative, 256 entries" + */ + *dtlb_4KB = (struct cpuinfo_tlb) { + .entries = 256, + .associativity = 8, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0x6C: + /* + * Intel ISA Reference: + * "DTLB: 2M/4M pages, 8-way set associative, 128 entries" + */ + *dtlb_2MB = *dtlb_4MB = (struct cpuinfo_tlb) { + .entries = 128, + .associativity = 8, + .pages = CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x6D: + /* + * Intel ISA Reference: + * "DTLB: 1 GByte pages, fully associative, 16 entries" + */ + *dtlb_1GB = (struct cpuinfo_tlb) { + .entries = 16, + .associativity = 16, + .pages = CPUINFO_PAGE_SIZE_1GB, + }; + break; + case 0x70: + /* + * Intel ISA Reference: + * "Trace cache: 12 K-uop, 8-way set associative" + * Application Note 485: + * "Trace cache: 12K-uops, 8-way set associative" + * Cyrix CPU Detection Guide and Geode GX1 Processor Series: + * "TLB, 32 entries, 4-way set associative, 4K-Byte Pages" + */ + switch (vendor) { +#if CPUINFO_ARCH_X86 + case cpuinfo_vendor_cyrix: + case cpuinfo_vendor_nsc: + *dtlb_4KB = *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 32, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; +#endif /* CPUINFO_ARCH_X86 */ + default: + cache->trace = (struct cpuinfo_trace_cache) { + .uops = 12 * 1024, + .associativity = 8, + }; + } + break; + case 0x71: + /* + * Intel ISA Reference: + * "Trace cache: 16 K-uop, 8-way set associative" + * Application Note 485: + * "Trace cache: 16K-uops, 8-way set associative" + */ + cache->trace = (struct cpuinfo_trace_cache) { + .uops = 16 * 1024, + .associativity = 8, + }; + break; + case 0x72: + /* + * Intel ISA Reference: + * "Trace cache: 32 K-μop, 8-way set associative" + * Application Note 485: + * "Trace cache: 32K-uops, 8-way set associative" + */ + cache->trace = (struct cpuinfo_trace_cache) { + .uops = 32 * 1024, + .associativity = 8, + }; + break; + case 0x73: + /* Where does this come from? */ + cache->trace = (struct cpuinfo_trace_cache) { + .uops = 64 * 1024, + .associativity = 8, + }; + break; + case 0x76: + /* + * Intel ISA Reference: + * "Instruction TLB: 2M/4M pages, fully associative, 8 entries" + * Application Note 485: + * "Instruction TLB: 2M/4M pages, fully associative, 8 entries" + */ + *itlb_2MB = *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 8, + .associativity = 8, + .pages = CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0x78: + /* + * Intel ISA Reference: + * "2nd-level cache: 1 MByte, 4-way set associative, 64byte line size" + * Application Note 485: + * "2nd-level cache: 1-MB, 4-way set associative, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 4, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x79: + /* + * Intel ISA Reference: + * "2nd-level cache: 128 KByte, 8-way set associative, 64 byte line size, 2 lines per sector" + * Application Note 485: + * "2nd-level cache: 128-KB, 8-way set associative, sectored cache, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 128 * 1024, + .associativity = 8, + .sets = 256, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x7A: + /* + * Intel ISA Reference: + * "2nd-level cache: 256 KByte, 8-way set associative, 64 byte line size, 2 lines per sector" + * Application Note 485: + * "2nd-level cache: 256-KB, 8-way set associative, sectored cache, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 256 * 1024, + .associativity = 8, + .sets = 512, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x7B: + /* + * Intel ISA Reference: + * "2nd-level cache: 512 KByte, 8-way set associative, 64 byte line size, 2 lines per sector" + * Application Note 485: + * "2nd-level cache: 512-KB, 8-way set associative, sectored cache, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 8, + .sets = 1024, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x7C: + /* + * Intel ISA Reference: + * "2nd-level cache: 1 MByte, 8-way set associative, 64 byte line size, 2 lines per sector" + * Application Note 485: + * "2nd-level cache: 1-MB, 8-way set associative, sectored cache, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 8, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x7D: + /* + * Intel ISA Reference: + * "2nd-level cache: 2 MByte, 8-way set associative, 64byte line size" + * Application Note 485: + * "2nd-level cache: 2-MB, 8-way set associative, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 2 * 1024 * 1024, + .associativity = 8, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x7F: + /* + * Intel ISA Reference: + * "2nd-level cache: 512 KByte, 2-way set associative, 64-byte line size" + * Application Note 485: + * "2nd-level cache: 512-KB, 2-way set associative, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 2, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x80: + /* + * Intel ISA Reference: + * "2nd-level cache: 512 KByte, 8-way set associative, 64-byte line size" + * Application Note 485: + * "2nd-level cache: 512-KB, 8-way set associative, 64-byte line size" + * Cyrix CPU Detection Guide and Geode GX1 Processor Series: + * "Level 1 Cache, 16K, 4-way set associative, 16 Bytes/Line" + */ + switch (vendor) { +#if CPUINFO_ARCH_X86 && !defined(__ANDROID__) + case cpuinfo_vendor_cyrix: + case cpuinfo_vendor_nsc: + cache->l1i = cache->l1d = (struct cpuinfo_x86_cache) { + .size = 16 * 1024, + .associativity = 4, + .sets = 256, + .partitions = 1, + .line_size = 16, + .flags = CPUINFO_CACHE_UNIFIED, + }; + break; +#endif /* CPUINFO_ARCH_X86 */ + default: + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 8, + .sets = 1024, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + } + break; + case 0x82: + /* + * Intel ISA Reference: + * "2nd-level cache: 256 KByte, 8-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 256-KB, 8-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 256 * 1024, + .associativity = 4, + .sets = 2048, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x83: + /* + * Intel ISA Reference: + * "2nd-level cache: 512 KByte, 8-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 512-KB, 8-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 8, + .sets = 2048, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x84: + /* + * Intel ISA Reference: + * "2nd-level cache: 1 MByte, 8-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 1-MB, 8-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 8, + .sets = 4096, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x85: + /* + * Intel ISA Reference: + * "2nd-level cache: 2 MByte, 8-way set associative, 32 byte line size" + * Application Note 485: + * "2nd-level cache: 2-MB, 8-way set associative, 32-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 2 * 1024 * 1024, + .associativity = 8, + .sets = 8192, + .partitions = 1, + .line_size = 32, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x86: + /* + * Intel ISA Reference: + * "2nd-level cache: 512 KByte, 4-way set associative, 64 byte line size" + * Application Note 485: + * "2nd-level cache: 512-KB, 4-way set associative, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 4, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0x87: + /* + * Intel ISA Reference: + * "2nd-level cache: 1 MByte, 8-way set associative, 64 byte line size" + * Application Note 485: + * "2nd-level cache: 1-MB, 8-way set associative, 64-byte line size" + */ + cache->l2 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 8, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xA0: + /* + * Intel ISA Reference: + * "DTLB: 4k pages, fully associative, 32 entries" + */ + *dtlb_4KB = (struct cpuinfo_tlb) { + .entries = 32, + .associativity = 32, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xB0: + /* + * Intel ISA Reference: + * "Instruction TLB: 4 KByte pages, 4-way set associative, 128 entries" + * Application Note 485: + * "Instruction TLB: 4-KB Pages, 4-way set associative, 128 entries" + */ + *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 128, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xB1: + /* + * Intel ISA Reference: + * "Instruction TLB: 2M pages, 4-way, 8 entries or 4M pages, 4-way, 4 entries" + * Application Note 485: + * "Instruction TLB: 2-MB pages, 4-way, 8 entries or 4M pages, 4-way, 4 entries" + */ + *itlb_2MB = (struct cpuinfo_tlb) { + .entries = 8, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 4, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0xB2: + /* + * Intel ISA Reference: + * "Instruction TLB: 4KByte pages, 4-way set associative, 64 entries" + * Application Note 485: + * "Instruction TLB: 4-KB pages, 4-way set associative, 64 entries" + */ + *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 64, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xB3: + /* + * Intel ISA Reference: + * "Data TLB: 4 KByte pages, 4-way set associative, 128 entries" + * Application Note 485: + * "Data TLB: 4-KB Pages, 4-way set associative, 128 entries" + */ + *dtlb_4KB = (struct cpuinfo_tlb) { + .entries = 128, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xB4: + /* + * Intel ISA Reference: + * "Data TLB1: 4 KByte pages, 4-way associative, 256 entries" + * Application Note 485: + * "Data TLB: 4-KB Pages, 4-way set associative, 256 entries" + */ + *dtlb_4KB = (struct cpuinfo_tlb) { + .entries = 256, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xB5: + /* + * Intel ISA Reference: + * "Instruction TLB: 4KByte pages, 8-way set associative, 64 entries" + */ + *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 64, + .associativity = 8, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xB6: + /* + * Intel ISA Reference: + * "Instruction TLB: 4KByte pages, 8-way set associative, 128 entries" + */ + *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 128, + .associativity = 8, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xBA: + /* + * Intel ISA Reference: + * "Data TLB1: 4 KByte pages, 4-way associative, 64 entries" + * Application Note 485: + * "Data TLB: 4-KB Pages, 4-way set associative, 64 entries" + */ + *itlb_4KB = (struct cpuinfo_tlb) { + .entries = 64, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xC0: + /* + * Intel ISA Reference: + * "Data TLB: 4 KByte and 4 MByte pages, 4-way associative, 8 entries" + * Application Note 485: + * "Data TLB: 4-KB or 4-MB Pages, 4-way set associative, 8 entries" + */ + *itlb_4KB = *itlb_4MB = (struct cpuinfo_tlb) { + .entries = 8, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0xC1: + /* + * Intel ISA Reference: + * "Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries" + */ + *stlb2_4KB = *stlb2_2MB = (struct cpuinfo_tlb) { + .entries = 1024, + .associativity = 8, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_2MB, + }; + break; + case 0xC2: + /* + * Intel ISA Reference: + * "DTLB: 4 KByte/2 MByte pages, 4-way associative, 16 entries" + */ + *dtlb_4KB = *dtlb_2MB = (struct cpuinfo_tlb) { + .entries = 16, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_2MB, + }; + break; + case 0xC3: + /* + * Intel ISA Reference: + * "Shared 2nd-Level TLB: 4 KByte/2 MByte pages, 6-way associative, 1536 entries. + * Also 1GBbyte pages, 4-way, 16 entries." + */ + *stlb2_4KB = *stlb2_2MB = (struct cpuinfo_tlb) { + .entries = 1536, + .associativity = 6, + .pages = CPUINFO_PAGE_SIZE_4KB | CPUINFO_PAGE_SIZE_2MB, + }; + *stlb2_1GB = (struct cpuinfo_tlb) { + .entries = 16, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_1GB, + }; + break; + case 0xC4: + /* + * Intel ISA Reference: + * "DTLB: 2M/4M Byte pages, 4-way associative, 32 entries" + */ + *dtlb_2MB = *dtlb_4MB = (struct cpuinfo_tlb) { + .entries = 32, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_2MB | CPUINFO_PAGE_SIZE_4MB, + }; + break; + case 0xCA: + /* + * Intel ISA Reference: + * "Shared 2nd-Level TLB: 4 KByte pages, 4-way associative, 512 entries" + * Application Note 485: + * "Shared 2nd-level TLB: 4 KB pages, 4-way set associative, 512 entries" + */ + *stlb2_4KB = (struct cpuinfo_tlb) { + .entries = 512, + .associativity = 4, + .pages = CPUINFO_PAGE_SIZE_4KB, + }; + break; + case 0xD0: + /* + * Intel ISA Reference: + * "3rd-level cache: 512 KByte, 4-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 512-kB, 4-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 512 * 1024, + .associativity = 4, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xD1: + /* + * Intel ISA Reference: + * "3rd-level cache: 1 MByte, 4-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 1-MB, 4-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 4, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xD2: + /* + * Intel ISA Reference: + * "3rd-level cache: 2 MByte, 4-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 2-MB, 4-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 2 * 1024 * 2014, + .associativity = 4, + .sets = 8192, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xD6: + /* + * Intel ISA Reference: + * "3rd-level cache: 1 MByte, 8-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 1-MB, 8-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 1024 * 1024, + .associativity = 8, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xD7: + /* + * Intel ISA Reference: + * "3rd-level cache: 2 MByte, 8-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 2-MB, 8-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 2 * 1024 * 1024, + .associativity = 8, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xD8: + /* + * Intel ISA Reference: + * "3rd-level cache: 4 MByte, 8-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 4-MB, 8-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 4 * 1024 * 1024, + .associativity = 8, + .sets = 8192, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xDC: + /* + * Intel ISA Reference: + * "3rd-level cache: 1.5 MByte, 12-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 1.5-MB, 12-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 3 * 512 * 1024, + .associativity = 12, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xDD: + /* + * Intel ISA Reference: + * "3rd-level cache: 3 MByte, 12-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 3-MB, 12-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 3 * 1024 * 1024, + .associativity = 12, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xDE: + /* + * Intel ISA Reference: + * "3rd-level cache: 6 MByte, 12-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 6-MB, 12-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 6 * 1024 * 1024, + .associativity = 12, + .sets = 8192, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xE2: + /* + * Intel ISA Reference: + * "3rd-level cache: 2 MByte, 16-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 2-MB, 16-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 2 * 1024 * 1024, + .associativity = 16, + .sets = 2048, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xE3: + /* + * Intel ISA Reference: + * "3rd-level cache: 4 MByte, 16-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 4-MB, 16-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 4 * 1024 * 1024, + .associativity = 16, + .sets = 4096, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xE4: + /* + * Intel ISA Reference: + * "3rd-level cache: 8 MByte, 16-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 8-MB, 16-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 8 * 1024 * 1024, + .associativity = 16, + .sets = 8192, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xEA: + /* + * Intel ISA Reference: + * "3rd-level cache: 12MByte, 24-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 12-MB, 24-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 12 * 1024 * 1024, + .associativity = 24, + .sets = 8192, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xEB: + /* + * Intel ISA Reference: + * "3rd-level cache: 18MByte, 24-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 18-MB, 24-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 18 * 1024 * 1024, + .associativity = 24, + .sets = 12288, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xEC: + /* + * Intel ISA Reference: + * "3rd-level cache: 24MByte, 24-way set associative, 64 byte line size" + * Application Note 485: + * "3rd-level cache: 24-MB, 24-way set associative, 64-byte line size" + */ + cache->l3 = (struct cpuinfo_x86_cache) { + .size = 24 * 1024 * 1024, + .associativity = 24, + .sets = 16384, + .partitions = 1, + .line_size = 64, + .flags = CPUINFO_CACHE_INCLUSIVE, + }; + break; + case 0xF0: + /* + * Intel ISA Reference: + * "64-Byte prefetching" + * Application Note 485: + * "64-byte Prefetching" + */ + cache->prefetch_size = 64; + break; + case 0xF1: + /* + * Intel ISA Reference: + * "128-Byte prefetching" + * Application Note 485: + * "128-byte Prefetching" + */ + cache->prefetch_size = 128; + break; + } +} diff --git a/source/3rdparty/cpuinfo/src/x86/cache/deterministic.c b/source/3rdparty/cpuinfo/src/x86/cache/deterministic.c new file mode 100644 index 0000000..befd502 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/cache/deterministic.c @@ -0,0 +1,257 @@ +#include + +#include +#include +#include +#include + + +enum cache_type { + cache_type_none = 0, + cache_type_data = 1, + cache_type_instruction = 2, + cache_type_unified = 3, +}; + +bool cpuinfo_x86_decode_deterministic_cache_parameters( + struct cpuid_regs regs, + struct cpuinfo_x86_caches* cache, + uint32_t* package_cores_max) +{ + const uint32_t type = regs.eax & UINT32_C(0x1F); + if (type == cache_type_none) { + return false; + } + + /* Level starts at 1 */ + const uint32_t level = (regs.eax >> 5) & UINT32_C(0x7); + + const uint32_t sets = 1 + regs.ecx; + const uint32_t line_size = 1 + (regs.ebx & UINT32_C(0x00000FFF)); + const uint32_t partitions = 1 + ((regs.ebx >> 12) & UINT32_C(0x000003FF)); + const uint32_t associativity = 1 + (regs.ebx >> 22); + + *package_cores_max = 1 + (regs.eax >> 26); + const uint32_t processors = 1 + ((regs.eax >> 14) & UINT32_C(0x00000FFF)); + const uint32_t apic_bits = bit_length(processors); + + uint32_t flags = 0; + if (regs.edx & UINT32_C(0x00000002)) { + flags |= CPUINFO_CACHE_INCLUSIVE; + } + if (regs.edx & UINT32_C(0x00000004)) { + flags |= CPUINFO_CACHE_COMPLEX_INDEXING; + } + switch (level) { + case 1: + switch (type) { + case cache_type_unified: + cache->l1d = cache->l1i = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags | CPUINFO_CACHE_UNIFIED, + .apic_bits = apic_bits + }; + break; + case cache_type_data: + cache->l1d = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + case cache_type_instruction: + cache->l1i = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + } + break; + case 2: + switch (type) { + case cache_type_instruction: + cpuinfo_log_warning("unexpected L2 instruction cache reported in leaf 0x00000004 is ignored"); + break; + case cache_type_unified: + flags |= CPUINFO_CACHE_UNIFIED; + case cache_type_data: + cache->l2 = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + } + break; + case 3: + switch (type) { + case cache_type_instruction: + cpuinfo_log_warning("unexpected L3 instruction cache reported in leaf 0x00000004 is ignored"); + break; + case cache_type_unified: + flags |= CPUINFO_CACHE_UNIFIED; + case cache_type_data: + cache->l3 = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + } + break; + case 4: + switch (type) { + case cache_type_instruction: + cpuinfo_log_warning("unexpected L4 instruction cache reported in leaf 0x00000004 is ignored"); + break; + case cache_type_unified: + flags |= CPUINFO_CACHE_UNIFIED; + case cache_type_data: + cache->l4 = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + } + break; + default: + cpuinfo_log_warning("unexpected L%"PRIu32" cache reported in leaf 0x00000004 is ignored", level); + break; + } + return true; +} + + +bool cpuinfo_x86_decode_cache_properties( + struct cpuid_regs regs, + struct cpuinfo_x86_caches* cache) +{ + const uint32_t type = regs.eax & UINT32_C(0x1F); + if (type == cache_type_none) { + return false; + } + + const uint32_t level = (regs.eax >> 5) & UINT32_C(0x7); + const uint32_t cores = 1 + ((regs.eax >> 14) & UINT32_C(0x00000FFF)); + const uint32_t apic_bits = bit_length(cores); + + const uint32_t sets = 1 + regs.ecx; + const uint32_t line_size = 1 + (regs.ebx & UINT32_C(0x00000FFF)); + const uint32_t partitions = 1 + ((regs.ebx >> 12) & UINT32_C(0x000003FF)); + const uint32_t associativity = 1 + (regs.ebx >> 22); + + uint32_t flags = 0; + if (regs.edx & UINT32_C(0x00000002)) { + flags |= CPUINFO_CACHE_INCLUSIVE; + } + + switch (level) { + case 1: + switch (type) { + case cache_type_unified: + cache->l1d = cache->l1i = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags | CPUINFO_CACHE_UNIFIED, + .apic_bits = apic_bits + }; + break; + case cache_type_data: + cache->l1d = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + case cache_type_instruction: + cache->l1i = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + } + break; + case 2: + switch (type) { + case cache_type_instruction: + cpuinfo_log_warning("unexpected L2 instruction cache reported in leaf 0x8000001D is ignored"); + break; + case cache_type_unified: + flags |= CPUINFO_CACHE_UNIFIED; + case cache_type_data: + cache->l2 = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + } + break; + case 3: + switch (type) { + case cache_type_instruction: + cpuinfo_log_warning("unexpected L3 instruction cache reported in leaf 0x8000001D is ignored"); + break; + case cache_type_unified: + flags |= CPUINFO_CACHE_UNIFIED; + case cache_type_data: + cache->l3 = (struct cpuinfo_x86_cache) { + .size = associativity * partitions * line_size * sets, + .associativity = associativity, + .sets = sets, + .partitions = partitions, + .line_size = line_size, + .flags = flags, + .apic_bits = apic_bits + }; + break; + } + break; + default: + cpuinfo_log_warning("unexpected L%"PRIu32" cache reported in leaf 0x8000001D is ignored", level); + break; + } + return true; +} diff --git a/source/3rdparty/cpuinfo/src/x86/cache/init.c b/source/3rdparty/cpuinfo/src/x86/cache/init.c new file mode 100644 index 0000000..dd1f1ea --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/cache/init.c @@ -0,0 +1,88 @@ +#include + +#include +#include +#include +#include +#include + + +union cpuinfo_x86_cache_descriptors { + struct cpuid_regs regs; + uint8_t as_bytes[16]; +}; + +enum cache_type { + cache_type_none = 0, + cache_type_data = 1, + cache_type_instruction = 2, + cache_type_unified = 3, +}; + +void cpuinfo_x86_detect_cache( + uint32_t max_base_index, uint32_t max_extended_index, + bool amd_topology_extensions, + enum cpuinfo_vendor vendor, + const struct cpuinfo_x86_model_info* model_info, + struct cpuinfo_x86_caches* cache, + struct cpuinfo_tlb* itlb_4KB, + struct cpuinfo_tlb* itlb_2MB, + struct cpuinfo_tlb* itlb_4MB, + struct cpuinfo_tlb* dtlb0_4KB, + struct cpuinfo_tlb* dtlb0_2MB, + struct cpuinfo_tlb* dtlb0_4MB, + struct cpuinfo_tlb* dtlb_4KB, + struct cpuinfo_tlb* dtlb_2MB, + struct cpuinfo_tlb* dtlb_4MB, + struct cpuinfo_tlb* dtlb_1GB, + struct cpuinfo_tlb* stlb2_4KB, + struct cpuinfo_tlb* stlb2_2MB, + struct cpuinfo_tlb* stlb2_1GB, + uint32_t* log2_package_cores_max) +{ + if (max_base_index >= 2) { + union cpuinfo_x86_cache_descriptors descriptors; + descriptors.regs = cpuid(2); + uint32_t iterations = (uint8_t) descriptors.as_bytes[0]; + if (iterations != 0) { +iterate_descriptors: + for (uint32_t i = 1 /* note: not 0 */; i < 16; i++) { + const uint8_t descriptor = descriptors.as_bytes[i]; + if (descriptor != 0) { + cpuinfo_x86_decode_cache_descriptor( + descriptor, vendor, model_info, + cache, + itlb_4KB, itlb_2MB, itlb_4MB, + dtlb0_4KB, dtlb0_2MB, dtlb0_4MB, + dtlb_4KB, dtlb_2MB, dtlb_4MB, dtlb_1GB, + stlb2_4KB, stlb2_2MB, stlb2_1GB, + &cache->prefetch_size); + } + } + if (--iterations != 0) { + descriptors.regs = cpuid(2); + goto iterate_descriptors; + } + } + + if (vendor != cpuinfo_vendor_amd && vendor != cpuinfo_vendor_hygon && max_base_index >= 4) { + struct cpuid_regs leaf4; + uint32_t input_ecx = 0; + uint32_t package_cores_max = 0; + do { + leaf4 = cpuidex(4, input_ecx++); + } while (cpuinfo_x86_decode_deterministic_cache_parameters( + leaf4, cache, &package_cores_max)); + if (package_cores_max != 0) { + *log2_package_cores_max = bit_length(package_cores_max); + } + } + } + if (amd_topology_extensions && max_extended_index >= UINT32_C(0x8000001D)) { + struct cpuid_regs leaf0x8000001D; + uint32_t input_ecx = 0; + do { + leaf0x8000001D = cpuidex(UINT32_C(0x8000001D), input_ecx++); + } while (cpuinfo_x86_decode_cache_properties(leaf0x8000001D, cache)); + } +} diff --git a/source/3rdparty/cpuinfo/src/x86/cpuid.h b/source/3rdparty/cpuinfo/src/x86/cpuid.h new file mode 100644 index 0000000..9e9e013 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/cpuid.h @@ -0,0 +1,79 @@ +#pragma once +#include + +#if defined(__GNUC__) + #include +#elif defined(_MSC_VER) + #include +#endif + +#if CPUINFO_MOCK + #include +#endif +#include + + +#if defined(__GNUC__) || defined(_MSC_VER) + static inline struct cpuid_regs cpuid(uint32_t eax) { + #if CPUINFO_MOCK + uint32_t regs_array[4]; + cpuinfo_mock_get_cpuid(eax, regs_array); + return (struct cpuid_regs) { + .eax = regs_array[0], + .ebx = regs_array[1], + .ecx = regs_array[2], + .edx = regs_array[3], + }; + #else + struct cpuid_regs regs; + #if defined(__GNUC__) + __cpuid(eax, regs.eax, regs.ebx, regs.ecx, regs.edx); + #else + int regs_array[4]; + __cpuid(regs_array, (int) eax); + regs.eax = regs_array[0]; + regs.ebx = regs_array[1]; + regs.ecx = regs_array[2]; + regs.edx = regs_array[3]; + #endif + return regs; + #endif + } + + static inline struct cpuid_regs cpuidex(uint32_t eax, uint32_t ecx) { + #if CPUINFO_MOCK + uint32_t regs_array[4]; + cpuinfo_mock_get_cpuidex(eax, ecx, regs_array); + return (struct cpuid_regs) { + .eax = regs_array[0], + .ebx = regs_array[1], + .ecx = regs_array[2], + .edx = regs_array[3], + }; + #else + struct cpuid_regs regs; + #if defined(__GNUC__) + __cpuid_count(eax, ecx, regs.eax, regs.ebx, regs.ecx, regs.edx); + #else + int regs_array[4]; + __cpuidex(regs_array, (int) eax, (int) ecx); + regs.eax = regs_array[0]; + regs.ebx = regs_array[1]; + regs.ecx = regs_array[2]; + regs.edx = regs_array[3]; + #endif + return regs; + #endif + } +#endif + +static inline uint64_t xgetbv(uint32_t ext_ctrl_reg) { + #ifdef _MSC_VER + return (uint64_t)_xgetbv((unsigned int)ext_ctrl_reg); + #else + uint32_t lo, hi; + __asm__(".byte 0x0F, 0x01, 0xD0" : "=a" (lo), "=d" (hi) : "c" (ext_ctrl_reg)); + return ((uint64_t) hi << 32) | (uint64_t) lo; + #endif +} + diff --git a/source/3rdparty/cpuinfo/src/x86/info.c b/source/3rdparty/cpuinfo/src/x86/info.c new file mode 100644 index 0000000..ceb6b84 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/info.c @@ -0,0 +1,19 @@ +#include + +#include +#include + + +struct cpuinfo_x86_model_info cpuinfo_x86_decode_model_info(uint32_t eax) { + struct cpuinfo_x86_model_info model_info; + model_info.stepping = eax & 0xF; + model_info.base_model = (eax >> 4) & 0xF; + model_info.base_family = (eax >> 8) & 0xF; + model_info.processor_type = (eax >> 12) & 0x3; + model_info.extended_model = (eax >> 16) & 0xF; + model_info.extended_family = (eax >> 20) & 0xFF; + + model_info.family = model_info.base_family + model_info.extended_family; + model_info.model = model_info.base_model + (model_info.extended_model << 4); + return model_info; +} diff --git a/source/3rdparty/cpuinfo/src/x86/init.c b/source/3rdparty/cpuinfo/src/x86/init.c new file mode 100644 index 0000000..244359c --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/init.c @@ -0,0 +1,75 @@ +#include +#include + +#include +#include +#include +#include +#include +#include + + +struct cpuinfo_x86_isa cpuinfo_isa = { 0 }; +CPUINFO_INTERNAL uint32_t cpuinfo_x86_clflush_size = 0; + +void cpuinfo_x86_init_processor(struct cpuinfo_x86_processor* processor) { + const struct cpuid_regs leaf0 = cpuid(0); + const uint32_t max_base_index = leaf0.eax; + const enum cpuinfo_vendor vendor = processor->vendor = + cpuinfo_x86_decode_vendor(leaf0.ebx, leaf0.ecx, leaf0.edx); + + const struct cpuid_regs leaf0x80000000 = cpuid(UINT32_C(0x80000000)); + const uint32_t max_extended_index = + leaf0x80000000.eax >= UINT32_C(0x80000000) ? leaf0x80000000.eax : 0; + + const struct cpuid_regs leaf0x80000001 = max_extended_index >= UINT32_C(0x80000001) ? + cpuid(UINT32_C(0x80000001)) : (struct cpuid_regs) { 0, 0, 0, 0 }; + + if (max_base_index >= 1) { + const struct cpuid_regs leaf1 = cpuid(1); + processor->cpuid = leaf1.eax; + + const struct cpuinfo_x86_model_info model_info = cpuinfo_x86_decode_model_info(leaf1.eax); + const enum cpuinfo_uarch uarch = processor->uarch = + cpuinfo_x86_decode_uarch(vendor, &model_info); + + cpuinfo_x86_clflush_size = ((leaf1.ebx >> 8) & UINT32_C(0x000000FF)) * 8; + + /* + * Topology extensions support: + * - AMD: ecx[bit 22] in extended info (reserved bit on Intel CPUs). + */ + const bool amd_topology_extensions = !!(leaf0x80000001.ecx & UINT32_C(0x00400000)); + + cpuinfo_x86_detect_cache( + max_base_index, max_extended_index, amd_topology_extensions, vendor, &model_info, + &processor->cache, + &processor->tlb.itlb_4KB, + &processor->tlb.itlb_2MB, + &processor->tlb.itlb_4MB, + &processor->tlb.dtlb0_4KB, + &processor->tlb.dtlb0_2MB, + &processor->tlb.dtlb0_4MB, + &processor->tlb.dtlb_4KB, + &processor->tlb.dtlb_2MB, + &processor->tlb.dtlb_4MB, + &processor->tlb.dtlb_1GB, + &processor->tlb.stlb2_4KB, + &processor->tlb.stlb2_2MB, + &processor->tlb.stlb2_1GB, + &processor->topology.core_bits_length); + + cpuinfo_x86_detect_topology(max_base_index, max_extended_index, leaf1, &processor->topology); + + cpuinfo_isa = cpuinfo_x86_detect_isa(leaf1, leaf0x80000001, + max_base_index, max_extended_index, vendor, uarch); + } + if (max_extended_index >= UINT32_C(0x80000004)) { + struct cpuid_regs brand_string[3]; + for (uint32_t i = 0; i < 3; i++) { + brand_string[i] = cpuid(UINT32_C(0x80000002) + i); + } + memcpy(processor->brand_string, brand_string, sizeof(processor->brand_string)); + cpuinfo_log_debug("raw CPUID brand string: \"%48s\"", processor->brand_string); + } +} diff --git a/source/3rdparty/cpuinfo/src/x86/isa.c b/source/3rdparty/cpuinfo/src/x86/isa.c new file mode 100644 index 0000000..f2e5a28 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/isa.c @@ -0,0 +1,724 @@ +#include +#include +#include + +#include +#include + + +#if CPUINFO_ARCH_X86 + #ifdef _MSC_VER + #pragma pack(push, 2) + #endif + struct fxsave_region { + uint16_t fpu_control_word; + uint16_t fpu_status_word; + uint16_t fpu_tag_word; + uint16_t fpu_opcode; + uint32_t fpu_instruction_pointer_offset; + uint32_t fpu_instruction_pointer_selector; + uint32_t fpu_operand_pointer_offset; + uint32_t fpu_operand_pointer_selector; + uint32_t mxcsr_state; + uint32_t mxcsr_mask; + uint64_t fpu_registers[8 * 2]; + uint64_t xmm_registers[8 * 2]; + uint64_t padding[28]; + } + #ifndef _MSC_VER + __attribute__((__aligned__(16), __packed__)) + #endif + ; /* end of fxsave_region structure */ + #ifdef _MSC_VER + #pragma pack(pop, 2) + #endif +#endif + + +struct cpuinfo_x86_isa cpuinfo_x86_detect_isa( + const struct cpuid_regs basic_info, const struct cpuid_regs extended_info, + uint32_t max_base_index, uint32_t max_extended_index, + enum cpuinfo_vendor vendor, enum cpuinfo_uarch uarch) +{ + struct cpuinfo_x86_isa isa = { 0 }; + + const struct cpuid_regs structured_feature_info0 = + (max_base_index >= 7) ? cpuidex(7, 0) : (struct cpuid_regs) { 0, 0, 0, 0}; + const struct cpuid_regs structured_feature_info1 = + (max_base_index >= 7) ? cpuidex(7, 1) : (struct cpuid_regs) { 0, 0, 0, 0}; + + const uint32_t processor_capacity_info_index = UINT32_C(0x80000008); + const struct cpuid_regs processor_capacity_info = + (max_extended_index >= processor_capacity_info_index) ? + cpuid(processor_capacity_info_index) : (struct cpuid_regs) { 0, 0, 0, 0 }; + + bool avx_regs = false, avx512_regs = false, mpx_regs = false; + /* + * OSXSAVE: Operating system enabled XSAVE instructions for application use: + * - Intel, AMD: ecx[bit 26] in basic info = XSAVE/XRSTOR instructions supported by a chip. + * - Intel, AMD: ecx[bit 27] in basic info = XSAVE/XRSTOR instructions enabled by OS. + */ + const uint32_t osxsave_mask = UINT32_C(0x0C000000); + if ((basic_info.ecx & osxsave_mask) == osxsave_mask) { + uint64_t xcr0_valid_bits = 0; + if (max_base_index >= 0xD) { + const struct cpuid_regs regs = cpuidex(0xD, 0); + xcr0_valid_bits = ((uint64_t) regs.edx << 32) | regs.eax; + } + + const uint64_t xfeature_enabled_mask = xgetbv(0); + + /* + * AVX registers: + * - Intel, AMD: XFEATURE_ENABLED_MASK[bit 1] for low 128 bits of ymm registers + * - Intel, AMD: XFEATURE_ENABLED_MASK[bit 2] for high 128 bits of ymm registers + */ + const uint64_t avx_regs_mask = UINT64_C(0x0000000000000006); + if ((xcr0_valid_bits & avx_regs_mask) == avx_regs_mask) { + avx_regs = (xfeature_enabled_mask & avx_regs_mask) == avx_regs_mask; + } + + /* + * AVX512 registers: + * - Intel, AMD: XFEATURE_ENABLED_MASK[bit 1] for low 128 bits of zmm registers + * - Intel, AMD: XFEATURE_ENABLED_MASK[bit 2] for bits 128-255 of zmm registers + * - Intel: XFEATURE_ENABLED_MASK[bit 5] for 8 64-bit OpMask registers (k0-k7) + * - Intel: XFEATURE_ENABLED_MASK[bit 6] for the high 256 bits of the zmm registers zmm0-zmm15 + * - Intel: XFEATURE_ENABLED_MASK[bit 7] for the 512-bit zmm registers zmm16-zmm31 + */ + const uint64_t avx512_regs_mask = UINT64_C(0x00000000000000E6); + if ((xcr0_valid_bits & avx512_regs_mask) == avx512_regs_mask) { + avx512_regs = (xfeature_enabled_mask & avx512_regs_mask) == avx512_regs_mask; + } + + /* + * MPX registers: + * - Intel: XFEATURE_ENABLED_MASK[bit 3] for BNDREGS + * - Intel: XFEATURE_ENABLED_MASK[bit 4] for BNDCSR + */ + const uint64_t mpx_regs_mask = UINT64_C(0x0000000000000018); + if ((xcr0_valid_bits & mpx_regs_mask) == mpx_regs_mask) { + mpx_regs = (xfeature_enabled_mask & mpx_regs_mask) == mpx_regs_mask; + } + } + +#if CPUINFO_ARCH_X86 + /* + * RDTSC instruction: + * - Intel, AMD: edx[bit 4] in basic info. + * - AMD: edx[bit 4] in extended info (reserved bit on Intel CPUs). + */ + isa.rdtsc = !!((basic_info.edx | extended_info.edx) & UINT32_C(0x00000010)); +#endif + + /* + * SYSENTER/SYSEXIT instructions: + * - Intel, AMD: edx[bit 11] in basic info. + */ + isa.sysenter = !!(basic_info.edx & UINT32_C(0x00000800)); + +#if CPUINFO_ARCH_X86 + /* + * SYSCALL/SYSRET instructions: + * - Intel, AMD: edx[bit 11] in extended info. + */ + isa.syscall = !!(extended_info.edx & UINT32_C(0x00000800)); +#endif + + /* + * RDMSR/WRMSR instructions: + * - Intel, AMD: edx[bit 5] in basic info. + * - AMD: edx[bit 5] in extended info (reserved bit on Intel CPUs). + */ + isa.msr = !!((basic_info.edx | extended_info.edx) & UINT32_C(0x00000020)); + + /* + * CLZERO instruction: + * - AMD: ebx[bit 0] in processor capacity info (reserved bit on Intel CPUs). + */ + isa.clzero = !!(processor_capacity_info.ebx & UINT32_C(0x00000001)); + + /* + * CLFLUSH instruction: + * - Intel, AMD: edx[bit 19] in basic info. + */ + isa.clflush = !!(basic_info.edx & UINT32_C(0x00080000)); + + /* + * CLFLUSHOPT instruction: + * - Intel: ebx[bit 23] in structured feature info (ecx = 0). + */ + isa.clflushopt = !!(structured_feature_info0.ebx & UINT32_C(0x00800000)); + + /* + * MWAIT/MONITOR instructions: + * - Intel, AMD: ecx[bit 3] in basic info. + */ + isa.mwait = !!(basic_info.ecx & UINT32_C(0x00000008)); + + /* + * MWAITX/MONITORX instructions: + * - AMD: ecx[bit 29] in extended info. + */ + isa.mwaitx = !!(extended_info.ecx & UINT32_C(0x20000000)); + + /* + * FXSAVE/FXRSTOR instructions: + * - Intel, AMD: edx[bit 24] in basic info. + * - AMD: edx[bit 24] in extended info (zero bit on Intel CPUs, EMMX bit on Cyrix CPUs). + */ + switch (vendor) { +#if CPUINFO_ARCH_X86 + case cpuinfo_vendor_cyrix: + case cpuinfo_vendor_nsc: + isa.emmx = !!(extended_info.edx & UINT32_C(0x01000000)); + break; +#endif + default: + isa.fxsave = !!((basic_info.edx | extended_info.edx) & UINT32_C(0x01000000)); + break; + } + + /* + * XSAVE/XRSTOR instructions: + * - Intel, AMD: ecx[bit 26] in basic info. + */ + isa.xsave = !!(basic_info.ecx & UINT32_C(0x04000000)); + +#if CPUINFO_ARCH_X86 + /* + * x87 FPU instructions: + * - Intel, AMD: edx[bit 0] in basic info. + * - AMD: edx[bit 0] in extended info (reserved bit on Intel CPUs). + */ + isa.fpu = !!((basic_info.edx | extended_info.edx) & UINT32_C(0x00000001)); + + /* + * MMX instructions: + * - Intel, AMD: edx[bit 23] in basic info. + * - AMD: edx[bit 23] in extended info (zero bit on Intel CPUs). + */ + isa.mmx = !!((basic_info.edx | extended_info.edx) & UINT32_C(0x00800000)); + + /* + * MMX+/Integer SSE instructions: + * - Intel, AMD: edx[bit 25] in basic info (SSE feature flag). + * - Pre-SSE AMD: edx[bit 22] in extended info (zero bit on Intel CPUs). + */ + isa.mmx_plus = !!((basic_info.edx & UINT32_C(0x02000000)) | (extended_info.edx & UINT32_C(0x00400000))); +#endif + + /* + * 3dnow! instructions: + * - AMD: edx[bit 31] of extended info (zero bit on Intel CPUs). + */ + isa.three_d_now = !!(extended_info.edx & UINT32_C(0x80000000)); + + /* + * 3dnow!+ instructions: + * - AMD: edx[bit 30] of extended info (zero bit on Intel CPUs). + */ + isa.three_d_now_plus = !!(extended_info.edx & UINT32_C(0x40000000)); + +#if CPUINFO_ARCH_X86 + /* + * 3dnow! Geode instructions: + * - No CPUID bit, detect as Geode microarchitecture + 3dnow!+ support + */ + isa.three_d_now_geode = isa.three_d_now_plus && (uarch == cpuinfo_uarch_geode); +#endif + + /* + * PREFETCH instruction: + * - AMD: ecx[bit 8] of extended info (one of 3dnow! prefetch instructions). + * On Intel this bit indicates PREFETCHW, but not PREFETCH support. + * - AMD: edx[bit 31] of extended info (implied by 3dnow! support). Reserved bit on Intel CPUs. + * - AMD: edx[bit 30] of extended info (implied by 3dnow!+ support). Reserved bit on Intel CPUs. + * - AMD: edx[bit 29] of extended info (x86-64 support). Does not imply PREFETCH support on non-AMD CPUs!!! + */ + switch (vendor) { + case cpuinfo_vendor_intel: + /* + * Instruction is not documented in the manual, + * and the 3dnow! prefetch CPUID bit indicates PREFETCHW instruction. + */ + break; + case cpuinfo_vendor_amd: + case cpuinfo_vendor_hygon: + isa.prefetch = !!((extended_info.ecx & UINT32_C(0x00000100)) | (extended_info.edx & UINT32_C(0xE0000000))); + break; + default: + /* + * Conservatively assume, that 3dnow!/3dnow!+ support implies PREFETCH support, but + * 3dnow! prefetch CPUID bit follows Intel spec (PREFETCHW, but not PREFETCH). + */ + isa.prefetch = !!(extended_info.edx & UINT32_C(0xC0000000)); + break; + } + + /* + * PREFETCHW instruction: + * - AMD: ecx[bit 8] of extended info (one of 3dnow! prefetch instructions). + * - Intel: ecx[bit 8] of extended info (PREFETCHW instruction only). + * - AMD: edx[bit 31] of extended info (implied by 3dnow! support). Reserved bit on Intel CPUs. + * - AMD: edx[bit 30] of extended info (implied by 3dnow!+ support). Reserved bit on Intel CPUs. + * - AMD: edx[bit 29] of extended info (x86-64 support). Does not imply PREFETCHW support on non-AMD CPUs!!! + */ + switch (vendor) { + case cpuinfo_vendor_amd: + case cpuinfo_vendor_hygon: + isa.prefetchw = !!((extended_info.ecx & UINT32_C(0x00000100)) | (extended_info.edx & UINT32_C(0xE0000000))); + break; + default: + /* Assume, that 3dnow!/3dnow!+ support implies PREFETCHW support, not implications from x86-64 support */ + isa.prefetchw = !!((extended_info.ecx & UINT32_C(0x00000100)) | (extended_info.edx & UINT32_C(0xC0000000))); + break; + } + + /* + * PREFETCHWT1 instruction: + * - Intel: ecx[bit 0] of structured feature info (ecx = 0). Reserved bit on AMD. + */ + isa.prefetchwt1 = !!(structured_feature_info0.ecx & UINT32_C(0x00000001)); + +#if CPUINFO_ARCH_X86 + /* + * SSE instructions: + * - Intel, AMD: edx[bit 25] in basic info. + */ + isa.sse = !!(basic_info.edx & UINT32_C(0x02000000)); + + /* + * SSE2 instructions: + * - Intel, AMD: edx[bit 26] in basic info. + */ + isa.sse2 = !!(basic_info.edx & UINT32_C(0x04000000)); +#endif + + /* + * SSE3 instructions: + * - Intel, AMD: ecx[bit 0] in basic info. + */ + isa.sse3 = !!(basic_info.ecx & UINT32_C(0x00000001)); + +#if CPUINFO_ARCH_X86 + /* + * CPUs with x86-64 or SSE3 always support DAZ (denormals-as-zero) mode. + * Only early Pentium 4 models may not support it. + */ + if (isa.sse3) { + isa.daz = true; + } else { + /* Detect DAZ support from masked MXCSR bits */ + if (isa.sse && isa.fxsave) { + struct fxsave_region region = { 0 }; + #ifdef _MSC_VER + _fxsave(®ion); + #else + __asm__ __volatile__ ("fxsave %[region];" : [region] "+m" (region)); + #endif + + /* + * Denormals-as-zero (DAZ) flag: + * - Intel, AMD: MXCSR[bit 6] + */ + isa.daz = !!(region.mxcsr_mask & UINT32_C(0x00000040)); + } + } +#endif + + /* + * SSSE3 instructions: + * - Intel, AMD: ecx[bit 9] in basic info. + */ + isa.ssse3 = !!(basic_info.ecx & UINT32_C(0x0000200)); + + + /* + * SSE4.1 instructions: + * - Intel, AMD: ecx[bit 19] in basic info. + */ + isa.sse4_1 = !!(basic_info.ecx & UINT32_C(0x00080000)); + + /* + * SSE4.2 instructions: + * - Intel: ecx[bit 20] in basic info (reserved bit on AMD CPUs). + */ + isa.sse4_2 = !!(basic_info.ecx & UINT32_C(0x00100000)); + + /* + * SSE4A instructions: + * - AMD: ecx[bit 6] in extended info (reserved bit on Intel CPUs). + */ + isa.sse4a = !!(extended_info.ecx & UINT32_C(0x00000040)); + + /* + * Misaligned memory operands in SSE instructions: + * - AMD: ecx[bit 7] in extended info (reserved bit on Intel CPUs). + */ + isa.misaligned_sse = !!(extended_info.ecx & UINT32_C(0x00000080)); + + /* + * AVX instructions: + * - Intel, AMD: ecx[bit 28] in basic info. + */ + isa.avx = avx_regs && !!(basic_info.ecx & UINT32_C(0x10000000)); + + /* + * FMA3 instructions: + * - Intel: ecx[bit 12] in basic info (reserved bit on AMD CPUs). + */ + isa.fma3 = avx_regs && !!(basic_info.ecx & UINT32_C(0x00001000)); + + /* + * FMA4 instructions: + * - AMD: ecx[bit 16] in extended info (reserved bit on Intel CPUs). + */ + isa.fma4 = avx_regs && !!(extended_info.ecx & UINT32_C(0x00010000)); + + /* + * XOP instructions: + * - AMD: ecx[bit 11] in extended info (reserved bit on Intel CPUs). + */ + isa.xop = avx_regs && !!(extended_info.ecx & UINT32_C(0x00000800)); + + /* + * F16C instructions: + * - Intel, AMD: ecx[bit 29] in basic info. + */ + isa.f16c = avx_regs && !!(basic_info.ecx & UINT32_C(0x20000000)); + + /* + * AVX2 instructions: + * - Intel: ebx[bit 5] in structured feature info (ecx = 0). + */ + isa.avx2 = avx_regs && !!(structured_feature_info0.ebx & UINT32_C(0x00000020)); + + /* + * AVX512F instructions: + * - Intel: ebx[bit 16] in structured feature info (ecx = 0). + */ + isa.avx512f = avx512_regs && !!(structured_feature_info0.ebx & UINT32_C(0x00010000)); + + /* + * AVX512PF instructions: + * - Intel: ebx[bit 26] in structured feature info (ecx = 0). + */ + isa.avx512pf = avx512_regs && !!(structured_feature_info0.ebx & UINT32_C(0x04000000)); + + /* + * AVX512ER instructions: + * - Intel: ebx[bit 27] in structured feature info (ecx = 0). + */ + isa.avx512er = avx512_regs && !!(structured_feature_info0.ebx & UINT32_C(0x08000000)); + + /* + * AVX512CD instructions: + * - Intel: ebx[bit 28] in structured feature info (ecx = 0). + */ + isa.avx512cd = avx512_regs && !!(structured_feature_info0.ebx & UINT32_C(0x10000000)); + + /* + * AVX512DQ instructions: + * - Intel: ebx[bit 17] in structured feature info (ecx = 0). + */ + isa.avx512dq = avx512_regs && !!(structured_feature_info0.ebx & UINT32_C(0x00020000)); + + /* + * AVX512BW instructions: + * - Intel: ebx[bit 30] in structured feature info (ecx = 0). + */ + isa.avx512bw = avx512_regs && !!(structured_feature_info0.ebx & UINT32_C(0x40000000)); + + /* + * AVX512VL instructions: + * - Intel: ebx[bit 31] in structured feature info (ecx = 0). + */ + isa.avx512vl = avx512_regs && !!(structured_feature_info0.ebx & UINT32_C(0x80000000)); + + /* + * AVX512IFMA instructions: + * - Intel: ebx[bit 21] in structured feature info (ecx = 0). + */ + isa.avx512ifma = avx512_regs && !!(structured_feature_info0.ebx & UINT32_C(0x00200000)); + + /* + * AVX512VBMI instructions: + * - Intel: ecx[bit 1] in structured feature info (ecx = 0). + */ + isa.avx512vbmi = avx512_regs && !!(structured_feature_info0.ecx & UINT32_C(0x00000002)); + + /* + * AVX512VBMI2 instructions: + * - Intel: ecx[bit 6] in structured feature info (ecx = 0). + */ + isa.avx512vbmi2 = avx512_regs && !!(structured_feature_info0.ecx & UINT32_C(0x00000040)); + + /* + * AVX512BITALG instructions: + * - Intel: ecx[bit 12] in structured feature info (ecx = 0). + */ + isa.avx512bitalg = avx512_regs && !!(structured_feature_info0.ecx & UINT32_C(0x00001000)); + + /* + * AVX512VPOPCNTDQ instructions: + * - Intel: ecx[bit 14] in structured feature info (ecx = 0). + */ + isa.avx512vpopcntdq = avx512_regs && !!(structured_feature_info0.ecx & UINT32_C(0x00004000)); + + /* + * AVX512VNNI instructions: + * - Intel: ecx[bit 11] in structured feature info (ecx = 0). + */ + isa.avx512vnni = avx512_regs && !!(structured_feature_info0.ecx & UINT32_C(0x00000800)); + + /* + * AVX512_4VNNIW instructions: + * - Intel: edx[bit 2] in structured feature info (ecx = 0). + */ + isa.avx512_4vnniw = avx512_regs && !!(structured_feature_info0.edx & UINT32_C(0x00000004)); + + /* + * AVX512_4FMAPS instructions: + * - Intel: edx[bit 3] in structured feature info (ecx = 0). + */ + isa.avx512_4fmaps = avx512_regs && !!(structured_feature_info0.edx & UINT32_C(0x00000008)); + + /* + * AVX512_VP2INTERSECT instructions: + * - Intel: edx[bit 8] in structured feature info (ecx = 0). + */ + isa.avx512vp2intersect = avx512_regs && !!(structured_feature_info0.edx & UINT32_C(0x00000100)); + + /* + * AVX512_BF16 instructions: + * - Intel: eax[bit 5] in structured feature info (ecx = 1). + */ + isa.avx512bf16 = avx512_regs && !!(structured_feature_info1.eax & UINT32_C(0x00000020)); + + /* + * HLE instructions: + * - Intel: ebx[bit 4] in structured feature info (ecx = 0). + */ + isa.hle = !!(structured_feature_info0.ebx & UINT32_C(0x00000010)); + + /* + * RTM instructions: + * - Intel: ebx[bit 11] in structured feature info (ecx = 0). + */ + isa.rtm = !!(structured_feature_info0.ebx & UINT32_C(0x00000800)); + + /* + * XTEST instruction: + * - Intel: either HLE or RTM is supported + */ + isa.xtest = isa.hle || isa.rtm; + + /* + * MPX registers and instructions: + * - Intel: ebx[bit 14] in structured feature info (ecx = 0). + */ + isa.mpx = mpx_regs && !!(structured_feature_info0.ebx & UINT32_C(0x00004000)); + +#if CPUINFO_ARCH_X86 + /* + * CMOV instructions: + * - Intel, AMD: edx[bit 15] in basic info. + * - AMD: edx[bit 15] in extended info (zero bit on Intel CPUs). + */ + isa.cmov = !!((basic_info.edx | extended_info.edx) & UINT32_C(0x00008000)); + + /* + * CMPXCHG8B instruction: + * - Intel, AMD: edx[bit 8] in basic info. + * - AMD: edx[bit 8] in extended info (reserved bit on Intel CPUs). + */ + isa.cmpxchg8b = !!((basic_info.edx | extended_info.edx) & UINT32_C(0x00000100)); +#endif + + /* + * CMPXCHG16B instruction: + * - Intel, AMD: ecx[bit 13] in basic info. + */ + isa.cmpxchg16b = !!(basic_info.ecx & UINT32_C(0x00002000)); + + /* + * CLWB instruction: + * - Intel: ebx[bit 24] in structured feature info (ecx = 0). + */ + isa.clwb = !!(structured_feature_info0.ebx & UINT32_C(0x01000000)); + + /* + * MOVBE instruction: + * - Intel: ecx[bit 22] in basic info. + */ + isa.movbe = !!(basic_info.ecx & UINT32_C(0x00400000)); + +#if CPUINFO_ARCH_X86_64 + /* + * Some early x86-64 CPUs lack LAHF & SAHF instructions. + * A special CPU feature bit must be checked to ensure their availability: + * - Intel, AMD: ecx[bit 0] in extended info. + */ + isa.lahf_sahf = !!(extended_info.ecx & UINT32_C(0x00000001)); +#endif + + /* + * RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE instructions. + * - Intel: ebx[bit 0] in structured feature info (ecx = 0). + */ + isa.fs_gs_base = !!(structured_feature_info0.ebx & UINT32_C(0x00000001)); + + /* + * LZCNT instruction: + * - Intel, AMD: ecx[bit 5] in extended info. + */ + isa.lzcnt = !!(extended_info.ecx & UINT32_C(0x00000020)); + + /* + * POPCNT instruction: + * - Intel, AMD: ecx[bit 23] in basic info. + */ + isa.popcnt = !!(basic_info.ecx & UINT32_C(0x00800000)); + + /* + * TBM instructions: + * - AMD: ecx[bit 21] in extended info (reserved bit on Intel CPUs). + */ + isa.tbm = !!(extended_info.ecx & UINT32_C(0x00200000)); + + /* + * BMI instructions: + * - Intel, AMD: ebx[bit 3] in structured feature info (ecx = 0). + */ + isa.bmi = !!(structured_feature_info0.ebx & UINT32_C(0x00000008)); + + /* + * BMI2 instructions: + * - Intel: ebx[bit 8] in structured feature info (ecx = 0). + */ + isa.bmi2 = !!(structured_feature_info0.ebx & UINT32_C(0x00000100)); + + /* + * ADCX/ADOX instructions: + * - Intel: ebx[bit 19] in structured feature info (ecx = 0). + */ + isa.adx = !!(structured_feature_info0.ebx & UINT32_C(0x00080000)); + + /* + * AES instructions: + * - Intel: ecx[bit 25] in basic info (reserved bit on AMD CPUs). + */ + isa.aes = !!(basic_info.ecx & UINT32_C(0x02000000)); + + /* + * VAES instructions: + * - Intel: ecx[bit 9] in structured feature info (ecx = 0). + */ + isa.vaes = !!(structured_feature_info0.ecx & UINT32_C(0x00000200)); + + /* + * PCLMULQDQ instruction: + * - Intel: ecx[bit 1] in basic info (reserved bit on AMD CPUs). + */ + isa.pclmulqdq = !!(basic_info.ecx & UINT32_C(0x00000002)); + + /* + * VPCLMULQDQ instruction: + * - Intel: ecx[bit 10] in structured feature info (ecx = 0). + */ + isa.vpclmulqdq = !!(structured_feature_info0.ecx & UINT32_C(0x00000400)); + + /* + * GFNI instructions: + * - Intel: ecx[bit 8] in structured feature info (ecx = 0). + */ + isa.gfni = !!(structured_feature_info0.ecx & UINT32_C(0x00000100)); + + /* + * RDRAND instruction: + * - Intel: ecx[bit 30] in basic info (reserved bit on AMD CPUs). + */ + isa.rdrand = !!(basic_info.ecx & UINT32_C(0x40000000)); + + /* + * RDSEED instruction: + * - Intel: ebx[bit 18] in structured feature info (ecx = 0). + */ + isa.rdseed = !!(structured_feature_info0.ebx & UINT32_C(0x00040000)); + + /* + * SHA instructions: + * - Intel: ebx[bit 29] in structured feature info (ecx = 0). + */ + isa.sha = !!(structured_feature_info0.ebx & UINT32_C(0x20000000)); + + if (vendor == cpuinfo_vendor_via) { + const struct cpuid_regs padlock_meta_info = cpuid(UINT32_C(0xC0000000)); + const uint32_t max_padlock_index = padlock_meta_info.eax; + const uint32_t padlock_info_index = UINT32_C(0xC0000001); + if (max_padlock_index >= padlock_info_index) { + const struct cpuid_regs padlock_info = cpuid(padlock_info_index); + + /* + * Padlock RNG extension: + * - VIA: edx[bit 2] in padlock info = RNG exists on chip flag. + * - VIA: edx[bit 3] in padlock info = RNG enabled by OS. + */ + const uint32_t padlock_rng_mask = UINT32_C(0x0000000C); + isa.rng = (padlock_info.edx & padlock_rng_mask) == padlock_rng_mask; + + /* + * Padlock ACE extension: + * - VIA: edx[bit 6] in padlock info = ACE exists on chip flag. + * - VIA: edx[bit 7] in padlock info = ACE enabled by OS. + */ + const uint32_t padlock_ace_mask = UINT32_C(0x000000C0); + isa.ace = (padlock_info.edx & padlock_ace_mask) == padlock_ace_mask; + + /* + * Padlock ACE 2 extension: + * - VIA: edx[bit 8] in padlock info = ACE2 exists on chip flag. + * - VIA: edx[bit 9] in padlock info = ACE 2 enabled by OS. + */ + const uint32_t padlock_ace2_mask = UINT32_C(0x00000300); + isa.ace2 = (padlock_info.edx & padlock_ace2_mask) == padlock_ace2_mask; + + /* + * Padlock PHE extension: + * - VIA: edx[bit 10] in padlock info = PHE exists on chip flag. + * - VIA: edx[bit 11] in padlock info = PHE enabled by OS. + */ + const uint32_t padlock_phe_mask = UINT32_C(0x00000C00); + isa.phe = (padlock_info.edx & padlock_phe_mask) == padlock_phe_mask; + + /* + * Padlock PMM extension: + * - VIA: edx[bit 12] in padlock info = PMM exists on chip flag. + * - VIA: edx[bit 13] in padlock info = PMM enabled by OS. + */ + const uint32_t padlock_pmm_mask = UINT32_C(0x00003000); + isa.pmm = (padlock_info.edx & padlock_pmm_mask) == padlock_pmm_mask; + } + } + + /* + * LWP instructions: + * - AMD: ecx[bit 15] in extended info (reserved bit on Intel CPUs). + */ + isa.lwp = !!(extended_info.ecx & UINT32_C(0x00008000)); + + /* + * RDTSCP instruction: + * - Intel, AMD: edx[bit 27] in extended info. + */ + isa.rdtscp = !!(extended_info.edx & UINT32_C(0x08000000)); + + /* + * RDPID instruction: + * - Intel: ecx[bit 22] in structured feature info (ecx = 0). + */ + isa.rdpid = !!(structured_feature_info0.ecx & UINT32_C(0x00400000)); + + return isa; +} diff --git a/source/3rdparty/cpuinfo/src/x86/linux/api.h b/source/3rdparty/cpuinfo/src/x86/linux/api.h new file mode 100644 index 0000000..1c9485b --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/linux/api.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + + +struct cpuinfo_x86_linux_processor { + uint32_t apic_id; + uint32_t linux_id; + uint32_t flags; +}; + +CPUINFO_INTERNAL bool cpuinfo_x86_linux_parse_proc_cpuinfo( + uint32_t max_processors_count, + struct cpuinfo_x86_linux_processor processors[restrict static max_processors_count]); diff --git a/source/3rdparty/cpuinfo/src/x86/linux/cpuinfo.c b/source/3rdparty/cpuinfo/src/x86/linux/cpuinfo.c new file mode 100644 index 0000000..90ff814 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/linux/cpuinfo.c @@ -0,0 +1,207 @@ +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Size, in chars, of the on-stack buffer used for parsing lines of /proc/cpuinfo. + * This is also the limit on the length of a single line. + */ +#define BUFFER_SIZE 2048 + + +static uint32_t parse_processor_number( + const char* processor_start, + const char* processor_end) +{ + const size_t processor_length = (size_t) (processor_end - processor_start); + + if (processor_length == 0) { + cpuinfo_log_warning("Processor number in /proc/cpuinfo is ignored: string is empty"); + return 0; + } + + uint32_t processor_number = 0; + for (const char* digit_ptr = processor_start; digit_ptr != processor_end; digit_ptr++) { + const uint32_t digit = (uint32_t) (*digit_ptr - '0'); + if (digit > 10) { + cpuinfo_log_warning("non-decimal suffix %.*s in /proc/cpuinfo processor number is ignored", + (int) (processor_end - digit_ptr), digit_ptr); + break; + } + + processor_number = processor_number * 10 + digit; + } + + return processor_number; +} + +/* + * Decode APIC ID reported by Linux kernel for x86/x86-64 architecture. + * Example of APIC ID reported in /proc/cpuinfo: + * + * apicid : 2 + */ +static void parse_apic_id( + const char* apic_start, + const char* apic_end, + struct cpuinfo_x86_linux_processor processor[restrict static 1]) +{ + uint32_t apic_id = 0; + for (const char* digit_ptr = apic_start; digit_ptr != apic_end; digit_ptr++) { + const uint32_t digit = *digit_ptr - '0'; + if (digit >= 10) { + cpuinfo_log_warning("APIC ID %.*s in /proc/cpuinfo is ignored due to unexpected non-digit character '%c' at offset %zu", + (int) (apic_end - apic_start), apic_start, + *digit_ptr, (size_t) (digit_ptr - apic_start)); + return; + } + + apic_id = apic_id * 10 + digit; + } + + processor->apic_id = apic_id; + processor->flags |= CPUINFO_LINUX_FLAG_APIC_ID; +} + +struct proc_cpuinfo_parser_state { + uint32_t processor_index; + uint32_t max_processors_count; + struct cpuinfo_x86_linux_processor* processors; + struct cpuinfo_x86_linux_processor dummy_processor; +}; + +/* + * Decode a single line of /proc/cpuinfo information. + * Lines have format [ ]*:[ ] + */ +static bool parse_line( + const char* line_start, + const char* line_end, + struct proc_cpuinfo_parser_state state[restrict static 1], + uint64_t line_number) +{ + /* Empty line. Skip. */ + if (line_start == line_end) { + return true; + } + + /* Search for ':' on the line. */ + const char* separator = line_start; + for (; separator != line_end; separator++) { + if (*separator == ':') { + break; + } + } + /* Skip line if no ':' separator was found. */ + if (separator == line_end) { + cpuinfo_log_info("Line %.*s in /proc/cpuinfo is ignored: key/value separator ':' not found", + (int) (line_end - line_start), line_start); + return true; + } + + /* Skip trailing spaces in key part. */ + const char* key_end = separator; + for (; key_end != line_start; key_end--) { + if (key_end[-1] != ' ' && key_end[-1] != '\t') { + break; + } + } + /* Skip line if key contains nothing but spaces. */ + if (key_end == line_start) { + cpuinfo_log_info("Line %.*s in /proc/cpuinfo is ignored: key contains only spaces", + (int) (line_end - line_start), line_start); + return true; + } + + /* Skip leading spaces in value part. */ + const char* value_start = separator + 1; + for (; value_start != line_end; value_start++) { + if (*value_start != ' ') { + break; + } + } + /* Value part contains nothing but spaces. Skip line. */ + if (value_start == line_end) { + cpuinfo_log_info("Line %.*s in /proc/cpuinfo is ignored: value contains only spaces", + (int) (line_end - line_start), line_start); + return true; + } + + /* Skip trailing spaces in value part (if any) */ + const char* value_end = line_end; + for (; value_end != value_start; value_end--) { + if (value_end[-1] != ' ') { + break; + } + } + + const uint32_t processor_index = state->processor_index; + const uint32_t max_processors_count = state->max_processors_count; + struct cpuinfo_x86_linux_processor* processors = state->processors; + struct cpuinfo_x86_linux_processor* processor = &state->dummy_processor; + if (processor_index < max_processors_count) { + processor = &processors[processor_index]; + } + + const size_t key_length = key_end - line_start; + switch (key_length) { + case 6: + if (memcmp(line_start, "apicid", key_length) == 0) { + parse_apic_id(value_start, value_end, processor); + } else { + goto unknown; + } + break; + case 9: + if (memcmp(line_start, "processor", key_length) == 0) { + const uint32_t new_processor_index = parse_processor_number(value_start, value_end); + if (new_processor_index < processor_index) { + /* Strange: decreasing processor number */ + cpuinfo_log_warning( + "unexpectedly low processor number %"PRIu32" following processor %"PRIu32" in /proc/cpuinfo", + new_processor_index, processor_index); + } else if (new_processor_index > processor_index + 1) { + /* Strange, but common: skipped processor $(processor_index + 1) */ + cpuinfo_log_info( + "unexpectedly high processor number %"PRIu32" following processor %"PRIu32" in /proc/cpuinfo", + new_processor_index, processor_index); + } + if (new_processor_index >= max_processors_count) { + /* Log and ignore processor */ + cpuinfo_log_warning("processor %"PRIu32" in /proc/cpuinfo is ignored: index exceeds system limit %"PRIu32, + new_processor_index, max_processors_count - 1); + } else { + processors[new_processor_index].flags |= CPUINFO_LINUX_FLAG_PROC_CPUINFO; + } + state->processor_index = new_processor_index; + return true; + } else { + goto unknown; + } + break; + default: + unknown: + cpuinfo_log_debug("unknown /proc/cpuinfo key: %.*s", (int) key_length, line_start); + + } + return true; +} + +bool cpuinfo_x86_linux_parse_proc_cpuinfo( + uint32_t max_processors_count, + struct cpuinfo_x86_linux_processor processors[restrict static max_processors_count]) +{ + struct proc_cpuinfo_parser_state state = { + .processor_index = 0, + .max_processors_count = max_processors_count, + .processors = processors, + }; + return cpuinfo_linux_parse_multiline_file("/proc/cpuinfo", BUFFER_SIZE, + (cpuinfo_line_callback) parse_line, &state); +} diff --git a/source/3rdparty/cpuinfo/src/x86/linux/init.c b/source/3rdparty/cpuinfo/src/x86/linux/init.c new file mode 100644 index 0000000..f565789 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/linux/init.c @@ -0,0 +1,629 @@ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + + +static inline uint32_t bit_mask(uint32_t bits) { + return (UINT32_C(1) << bits) - UINT32_C(1); +} + +static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) { + return (bitfield & mask) == mask; +} + +static inline uint32_t min(uint32_t a, uint32_t b) { + return a < b ? a : b; +} + +static inline int cmp(uint32_t a, uint32_t b) { + return (a > b) - (a < b); +} + +static int cmp_x86_linux_processor(const void* ptr_a, const void* ptr_b) { + const struct cpuinfo_x86_linux_processor* processor_a = (const struct cpuinfo_x86_linux_processor*) ptr_a; + const struct cpuinfo_x86_linux_processor* processor_b = (const struct cpuinfo_x86_linux_processor*) ptr_b; + + /* Move usable processors towards the start of the array */ + const bool usable_a = bitmask_all(processor_a->flags, CPUINFO_LINUX_FLAG_VALID); + const bool usable_b = bitmask_all(processor_b->flags, CPUINFO_LINUX_FLAG_VALID); + if (usable_a != usable_b) { + return (int) usable_b - (int) usable_a; + } + + /* Compare based on APIC ID (i.e. processor 0 < processor 1) */ + const uint32_t id_a = processor_a->apic_id; + const uint32_t id_b = processor_b->apic_id; + return cmp(id_a, id_b); +} + +static void cpuinfo_x86_count_objects( + uint32_t linux_processors_count, + const struct cpuinfo_x86_linux_processor linux_processors[restrict static linux_processors_count], + const struct cpuinfo_x86_processor processor[restrict static 1], + uint32_t valid_processor_mask, + uint32_t llc_apic_bits, + uint32_t cores_count_ptr[restrict static 1], + uint32_t clusters_count_ptr[restrict static 1], + uint32_t packages_count_ptr[restrict static 1], + uint32_t l1i_count_ptr[restrict static 1], + uint32_t l1d_count_ptr[restrict static 1], + uint32_t l2_count_ptr[restrict static 1], + uint32_t l3_count_ptr[restrict static 1], + uint32_t l4_count_ptr[restrict static 1]) +{ + const uint32_t core_apic_mask = + ~(bit_mask(processor->topology.thread_bits_length) << processor->topology.thread_bits_offset); + const uint32_t package_apic_mask = + core_apic_mask & ~(bit_mask(processor->topology.core_bits_length) << processor->topology.core_bits_offset); + const uint32_t llc_apic_mask = ~bit_mask(llc_apic_bits); + const uint32_t cluster_apic_mask = package_apic_mask | llc_apic_mask; + + uint32_t cores_count = 0, clusters_count = 0, packages_count = 0; + uint32_t l1i_count = 0, l1d_count = 0, l2_count = 0, l3_count = 0, l4_count = 0; + uint32_t last_core_id = UINT32_MAX, last_cluster_id = UINT32_MAX, last_package_id = UINT32_MAX; + uint32_t last_l1i_id = UINT32_MAX, last_l1d_id = UINT32_MAX; + uint32_t last_l2_id = UINT32_MAX, last_l3_id = UINT32_MAX, last_l4_id = UINT32_MAX; + for (uint32_t i = 0; i < linux_processors_count; i++) { + if (bitmask_all(linux_processors[i].flags, valid_processor_mask)) { + const uint32_t apic_id = linux_processors[i].apic_id; + cpuinfo_log_debug("APID ID %"PRIu32": system processor %"PRIu32, apic_id, linux_processors[i].linux_id); + + /* All bits of APIC ID except thread ID mask */ + const uint32_t core_id = apic_id & core_apic_mask; + if (core_id != last_core_id) { + last_core_id = core_id; + cores_count++; + } + /* All bits of APIC ID except thread ID and core ID masks */ + const uint32_t package_id = apic_id & package_apic_mask; + if (package_id != last_package_id) { + last_package_id = package_id; + packages_count++; + } + /* Bits of APIC ID which are part of either LLC or package ID mask */ + const uint32_t cluster_id = apic_id & cluster_apic_mask; + if (cluster_id != last_cluster_id) { + last_cluster_id = cluster_id; + clusters_count++; + } + if (processor->cache.l1i.size != 0) { + const uint32_t l1i_id = apic_id & ~bit_mask(processor->cache.l1i.apic_bits); + if (l1i_id != last_l1i_id) { + last_l1i_id = l1i_id; + l1i_count++; + } + } + if (processor->cache.l1d.size != 0) { + const uint32_t l1d_id = apic_id & ~bit_mask(processor->cache.l1d.apic_bits); + if (l1d_id != last_l1d_id) { + last_l1d_id = l1d_id; + l1d_count++; + } + } + if (processor->cache.l2.size != 0) { + const uint32_t l2_id = apic_id & ~bit_mask(processor->cache.l2.apic_bits); + if (l2_id != last_l2_id) { + last_l2_id = l2_id; + l2_count++; + } + } + if (processor->cache.l3.size != 0) { + const uint32_t l3_id = apic_id & ~bit_mask(processor->cache.l3.apic_bits); + if (l3_id != last_l3_id) { + last_l3_id = l3_id; + l3_count++; + } + } + if (processor->cache.l4.size != 0) { + const uint32_t l4_id = apic_id & ~bit_mask(processor->cache.l4.apic_bits); + if (l4_id != last_l4_id) { + last_l4_id = l4_id; + l4_count++; + } + } + } + } + *cores_count_ptr = cores_count; + *clusters_count_ptr = clusters_count; + *packages_count_ptr = packages_count; + *l1i_count_ptr = l1i_count; + *l1d_count_ptr = l1d_count; + *l2_count_ptr = l2_count; + *l3_count_ptr = l3_count; + *l4_count_ptr = l4_count; +} + +void cpuinfo_x86_linux_init(void) { + struct cpuinfo_x86_linux_processor* x86_linux_processors = NULL; + struct cpuinfo_processor* processors = NULL; + struct cpuinfo_core* cores = NULL; + struct cpuinfo_cluster* clusters = NULL; + struct cpuinfo_package* packages = NULL; + const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL; + const struct cpuinfo_core** linux_cpu_to_core_map = NULL; + struct cpuinfo_cache* l1i = NULL; + struct cpuinfo_cache* l1d = NULL; + struct cpuinfo_cache* l2 = NULL; + struct cpuinfo_cache* l3 = NULL; + struct cpuinfo_cache* l4 = NULL; + + const uint32_t max_processors_count = cpuinfo_linux_get_max_processors_count(); + cpuinfo_log_debug("system maximum processors count: %"PRIu32, max_processors_count); + + const uint32_t max_possible_processors_count = 1 + + cpuinfo_linux_get_max_possible_processor(max_processors_count); + cpuinfo_log_debug("maximum possible processors count: %"PRIu32, max_possible_processors_count); + const uint32_t max_present_processors_count = 1 + + cpuinfo_linux_get_max_present_processor(max_processors_count); + cpuinfo_log_debug("maximum present processors count: %"PRIu32, max_present_processors_count); + + uint32_t valid_processor_mask = 0; + uint32_t x86_linux_processors_count = max_processors_count; + if (max_present_processors_count != 0) { + x86_linux_processors_count = min(x86_linux_processors_count, max_present_processors_count); + valid_processor_mask = CPUINFO_LINUX_FLAG_PRESENT; + } else { + valid_processor_mask = CPUINFO_LINUX_FLAG_PROC_CPUINFO; + } + if (max_possible_processors_count != 0) { + x86_linux_processors_count = min(x86_linux_processors_count, max_possible_processors_count); + valid_processor_mask |= CPUINFO_LINUX_FLAG_POSSIBLE; + } + + x86_linux_processors = calloc(x86_linux_processors_count, sizeof(struct cpuinfo_x86_linux_processor)); + if (x86_linux_processors == NULL) { + cpuinfo_log_error( + "failed to allocate %zu bytes for descriptions of %"PRIu32" x86 logical processors", + x86_linux_processors_count * sizeof(struct cpuinfo_x86_linux_processor), + x86_linux_processors_count); + return; + } + + if (max_possible_processors_count != 0) { + cpuinfo_linux_detect_possible_processors( + x86_linux_processors_count, &x86_linux_processors->flags, + sizeof(struct cpuinfo_x86_linux_processor), + CPUINFO_LINUX_FLAG_POSSIBLE); + } + + if (max_present_processors_count != 0) { + cpuinfo_linux_detect_present_processors( + x86_linux_processors_count, &x86_linux_processors->flags, + sizeof(struct cpuinfo_x86_linux_processor), + CPUINFO_LINUX_FLAG_PRESENT); + } + + if (!cpuinfo_x86_linux_parse_proc_cpuinfo(x86_linux_processors_count, x86_linux_processors)) { + cpuinfo_log_error("failed to parse processor information from /proc/cpuinfo"); + return; + } + + for (uint32_t i = 0; i < x86_linux_processors_count; i++) { + if (bitmask_all(x86_linux_processors[i].flags, valid_processor_mask)) { + x86_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_VALID; + } + } + + struct cpuinfo_x86_processor x86_processor; + memset(&x86_processor, 0, sizeof(x86_processor)); + cpuinfo_x86_init_processor(&x86_processor); + char brand_string[48]; + cpuinfo_x86_normalize_brand_string(x86_processor.brand_string, brand_string); + + uint32_t processors_count = 0; + for (uint32_t i = 0; i < x86_linux_processors_count; i++) { + if (bitmask_all(x86_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + x86_linux_processors[i].linux_id = i; + processors_count++; + } + } + + qsort(x86_linux_processors, x86_linux_processors_count, sizeof(struct cpuinfo_x86_linux_processor), + cmp_x86_linux_processor); + + processors = calloc(processors_count, sizeof(struct cpuinfo_processor)); + if (processors == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors", + processors_count * sizeof(struct cpuinfo_processor), processors_count); + goto cleanup; + } + + uint32_t llc_apic_bits = 0; + if (x86_processor.cache.l4.size != 0) { + llc_apic_bits = x86_processor.cache.l4.apic_bits; + } else if (x86_processor.cache.l3.size != 0) { + llc_apic_bits = x86_processor.cache.l3.apic_bits; + } else if (x86_processor.cache.l2.size != 0) { + llc_apic_bits = x86_processor.cache.l2.apic_bits; + } else if (x86_processor.cache.l1d.size != 0) { + llc_apic_bits = x86_processor.cache.l1d.apic_bits; + } + uint32_t packages_count = 0, clusters_count = 0, cores_count = 0; + uint32_t l1i_count = 0, l1d_count = 0, l2_count = 0, l3_count = 0, l4_count = 0; + cpuinfo_x86_count_objects( + x86_linux_processors_count, x86_linux_processors, &x86_processor, valid_processor_mask, llc_apic_bits, + &cores_count, &clusters_count, &packages_count, &l1i_count, &l1d_count, &l2_count, &l3_count, &l4_count); + + cpuinfo_log_debug("detected %"PRIu32" cores", cores_count); + cpuinfo_log_debug("detected %"PRIu32" clusters", clusters_count); + cpuinfo_log_debug("detected %"PRIu32" packages", packages_count); + cpuinfo_log_debug("detected %"PRIu32" L1I caches", l1i_count); + cpuinfo_log_debug("detected %"PRIu32" L1D caches", l1d_count); + cpuinfo_log_debug("detected %"PRIu32" L2 caches", l2_count); + cpuinfo_log_debug("detected %"PRIu32" L3 caches", l3_count); + cpuinfo_log_debug("detected %"PRIu32" L4 caches", l4_count); + + linux_cpu_to_processor_map = calloc(x86_linux_processors_count, sizeof(struct cpuinfo_processor*)); + if (linux_cpu_to_processor_map == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for mapping entries of %"PRIu32" logical processors", + x86_linux_processors_count * sizeof(struct cpuinfo_processor*), + x86_linux_processors_count); + goto cleanup; + } + + linux_cpu_to_core_map = calloc(x86_linux_processors_count, sizeof(struct cpuinfo_core*)); + if (linux_cpu_to_core_map == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for mapping entries of %"PRIu32" cores", + x86_linux_processors_count * sizeof(struct cpuinfo_core*), + x86_linux_processors_count); + goto cleanup; + } + + cores = calloc(cores_count, sizeof(struct cpuinfo_core)); + if (cores == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores", + cores_count * sizeof(struct cpuinfo_core), cores_count); + goto cleanup; + } + + clusters = calloc(clusters_count, sizeof(struct cpuinfo_cluster)); + if (clusters == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters", + clusters_count * sizeof(struct cpuinfo_cluster), clusters_count); + goto cleanup; + } + + packages = calloc(packages_count, sizeof(struct cpuinfo_package)); + if (packages == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" physical packages", + packages_count * sizeof(struct cpuinfo_package), packages_count); + goto cleanup; + } + + if (l1i_count != 0) { + l1i = calloc(l1i_count, sizeof(struct cpuinfo_cache)); + if (l1i == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches", + l1i_count * sizeof(struct cpuinfo_cache), l1i_count); + goto cleanup; + } + } + if (l1d_count != 0) { + l1d = calloc(l1d_count, sizeof(struct cpuinfo_cache)); + if (l1d == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches", + l1d_count * sizeof(struct cpuinfo_cache), l1d_count); + goto cleanup; + } + } + if (l2_count != 0) { + l2 = calloc(l2_count, sizeof(struct cpuinfo_cache)); + if (l2 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches", + l2_count * sizeof(struct cpuinfo_cache), l2_count); + goto cleanup; + } + } + if (l3_count != 0) { + l3 = calloc(l3_count, sizeof(struct cpuinfo_cache)); + if (l3 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches", + l3_count * sizeof(struct cpuinfo_cache), l3_count); + goto cleanup; + } + } + if (l4_count != 0) { + l4 = calloc(l4_count, sizeof(struct cpuinfo_cache)); + if (l4 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L4 caches", + l4_count * sizeof(struct cpuinfo_cache), l4_count); + goto cleanup; + } + } + + const uint32_t core_apic_mask = + ~(bit_mask(x86_processor.topology.thread_bits_length) << x86_processor.topology.thread_bits_offset); + const uint32_t package_apic_mask = + core_apic_mask & ~(bit_mask(x86_processor.topology.core_bits_length) << x86_processor.topology.core_bits_offset); + const uint32_t llc_apic_mask = ~bit_mask(llc_apic_bits); + const uint32_t cluster_apic_mask = package_apic_mask | llc_apic_mask; + + uint32_t processor_index = UINT32_MAX, core_index = UINT32_MAX, cluster_index = UINT32_MAX, package_index = UINT32_MAX; + uint32_t l1i_index = UINT32_MAX, l1d_index = UINT32_MAX, l2_index = UINT32_MAX, l3_index = UINT32_MAX, l4_index = UINT32_MAX; + uint32_t cluster_id = 0, core_id = 0, smt_id = 0; + uint32_t last_apic_core_id = UINT32_MAX, last_apic_cluster_id = UINT32_MAX, last_apic_package_id = UINT32_MAX; + uint32_t last_l1i_id = UINT32_MAX, last_l1d_id = UINT32_MAX; + uint32_t last_l2_id = UINT32_MAX, last_l3_id = UINT32_MAX, last_l4_id = UINT32_MAX; + for (uint32_t i = 0; i < x86_linux_processors_count; i++) { + if (bitmask_all(x86_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) { + const uint32_t apic_id = x86_linux_processors[i].apic_id; + processor_index++; + smt_id++; + + /* All bits of APIC ID except thread ID mask */ + const uint32_t apid_core_id = apic_id & core_apic_mask; + if (apid_core_id != last_apic_core_id) { + core_index++; + core_id++; + smt_id = 0; + } + /* Bits of APIC ID which are part of either LLC or package ID mask */ + const uint32_t apic_cluster_id = apic_id & cluster_apic_mask; + if (apic_cluster_id != last_apic_cluster_id) { + cluster_index++; + cluster_id++; + } + /* All bits of APIC ID except thread ID and core ID masks */ + const uint32_t apic_package_id = apic_id & package_apic_mask; + if (apic_package_id != last_apic_package_id) { + package_index++; + core_id = 0; + cluster_id = 0; + } + + /* Initialize logical processor object */ + processors[processor_index].smt_id = smt_id; + processors[processor_index].core = cores + core_index; + processors[processor_index].cluster = clusters + cluster_index; + processors[processor_index].package = packages + package_index; + processors[processor_index].linux_id = x86_linux_processors[i].linux_id; + processors[processor_index].apic_id = x86_linux_processors[i].apic_id; + + if (apid_core_id != last_apic_core_id) { + /* new core */ + cores[core_index] = (struct cpuinfo_core) { + .processor_start = processor_index, + .processor_count = 1, + .core_id = core_id, + .cluster = clusters + cluster_index, + .package = packages + package_index, + .vendor = x86_processor.vendor, + .uarch = x86_processor.uarch, + .cpuid = x86_processor.cpuid, + }; + clusters[cluster_index].core_count += 1; + packages[package_index].core_count += 1; + last_apic_core_id = apid_core_id; + } else { + /* another logical processor on the same core */ + cores[core_index].processor_count++; + } + + if (apic_cluster_id != last_apic_cluster_id) { + /* new cluster */ + clusters[cluster_index].processor_start = processor_index; + clusters[cluster_index].processor_count = 1; + clusters[cluster_index].core_start = core_index; + clusters[cluster_index].cluster_id = cluster_id; + clusters[cluster_index].package = packages + package_index; + clusters[cluster_index].vendor = x86_processor.vendor; + clusters[cluster_index].uarch = x86_processor.uarch; + clusters[cluster_index].cpuid = x86_processor.cpuid; + packages[package_index].cluster_count += 1; + last_apic_cluster_id = apic_cluster_id; + } else { + /* another logical processor on the same cluster */ + clusters[cluster_index].processor_count++; + } + + if (apic_package_id != last_apic_package_id) { + /* new package */ + packages[package_index].processor_start = processor_index; + packages[package_index].processor_count = 1; + packages[package_index].core_start = core_index; + packages[package_index].cluster_start = cluster_index; + cpuinfo_x86_format_package_name(x86_processor.vendor, brand_string, packages[package_index].name); + last_apic_package_id = apic_package_id; + } else { + /* another logical processor on the same package */ + packages[package_index].processor_count++; + } + + linux_cpu_to_processor_map[x86_linux_processors[i].linux_id] = processors + processor_index; + linux_cpu_to_core_map[x86_linux_processors[i].linux_id] = cores + core_index; + + if (x86_processor.cache.l1i.size != 0) { + const uint32_t l1i_id = apic_id & ~bit_mask(x86_processor.cache.l1i.apic_bits); + processors[i].cache.l1i = &l1i[l1i_index]; + if (l1i_id != last_l1i_id) { + /* new cache */ + last_l1i_id = l1i_id; + l1i[++l1i_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l1i.size, + .associativity = x86_processor.cache.l1i.associativity, + .sets = x86_processor.cache.l1i.sets, + .partitions = x86_processor.cache.l1i.partitions, + .line_size = x86_processor.cache.l1i.line_size, + .flags = x86_processor.cache.l1i.flags, + .processor_start = processor_index, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l1i[l1i_index].processor_count += 1; + } + processors[i].cache.l1i = &l1i[l1i_index]; + } else { + /* reset cache id */ + last_l1i_id = UINT32_MAX; + } + if (x86_processor.cache.l1d.size != 0) { + const uint32_t l1d_id = apic_id & ~bit_mask(x86_processor.cache.l1d.apic_bits); + processors[i].cache.l1d = &l1d[l1d_index]; + if (l1d_id != last_l1d_id) { + /* new cache */ + last_l1d_id = l1d_id; + l1d[++l1d_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l1d.size, + .associativity = x86_processor.cache.l1d.associativity, + .sets = x86_processor.cache.l1d.sets, + .partitions = x86_processor.cache.l1d.partitions, + .line_size = x86_processor.cache.l1d.line_size, + .flags = x86_processor.cache.l1d.flags, + .processor_start = processor_index, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l1d[l1d_index].processor_count += 1; + } + processors[i].cache.l1d = &l1d[l1d_index]; + } else { + /* reset cache id */ + last_l1d_id = UINT32_MAX; + } + if (x86_processor.cache.l2.size != 0) { + const uint32_t l2_id = apic_id & ~bit_mask(x86_processor.cache.l2.apic_bits); + processors[i].cache.l2 = &l2[l2_index]; + if (l2_id != last_l2_id) { + /* new cache */ + last_l2_id = l2_id; + l2[++l2_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l2.size, + .associativity = x86_processor.cache.l2.associativity, + .sets = x86_processor.cache.l2.sets, + .partitions = x86_processor.cache.l2.partitions, + .line_size = x86_processor.cache.l2.line_size, + .flags = x86_processor.cache.l2.flags, + .processor_start = processor_index, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l2[l2_index].processor_count += 1; + } + processors[i].cache.l2 = &l2[l2_index]; + } else { + /* reset cache id */ + last_l2_id = UINT32_MAX; + } + if (x86_processor.cache.l3.size != 0) { + const uint32_t l3_id = apic_id & ~bit_mask(x86_processor.cache.l3.apic_bits); + processors[i].cache.l3 = &l3[l3_index]; + if (l3_id != last_l3_id) { + /* new cache */ + last_l3_id = l3_id; + l3[++l3_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l3.size, + .associativity = x86_processor.cache.l3.associativity, + .sets = x86_processor.cache.l3.sets, + .partitions = x86_processor.cache.l3.partitions, + .line_size = x86_processor.cache.l3.line_size, + .flags = x86_processor.cache.l3.flags, + .processor_start = processor_index, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l3[l3_index].processor_count += 1; + } + processors[i].cache.l3 = &l3[l3_index]; + } else { + /* reset cache id */ + last_l3_id = UINT32_MAX; + } + if (x86_processor.cache.l4.size != 0) { + const uint32_t l4_id = apic_id & ~bit_mask(x86_processor.cache.l4.apic_bits); + processors[i].cache.l4 = &l4[l4_index]; + if (l4_id != last_l4_id) { + /* new cache */ + last_l4_id = l4_id; + l4[++l4_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l4.size, + .associativity = x86_processor.cache.l4.associativity, + .sets = x86_processor.cache.l4.sets, + .partitions = x86_processor.cache.l4.partitions, + .line_size = x86_processor.cache.l4.line_size, + .flags = x86_processor.cache.l4.flags, + .processor_start = processor_index, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l4[l4_index].processor_count += 1; + } + processors[i].cache.l4 = &l4[l4_index]; + } else { + /* reset cache id */ + last_l4_id = UINT32_MAX; + } + } + } + + /* Commit changes */ + cpuinfo_processors = processors; + cpuinfo_cores = cores; + cpuinfo_clusters = clusters; + cpuinfo_packages = packages; + cpuinfo_cache[cpuinfo_cache_level_1i] = l1i; + cpuinfo_cache[cpuinfo_cache_level_1d] = l1d; + cpuinfo_cache[cpuinfo_cache_level_2] = l2; + cpuinfo_cache[cpuinfo_cache_level_3] = l3; + cpuinfo_cache[cpuinfo_cache_level_4] = l4; + + cpuinfo_processors_count = processors_count; + cpuinfo_cores_count = cores_count; + cpuinfo_clusters_count = clusters_count; + cpuinfo_packages_count = packages_count; + cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1i_count; + cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1d_count; + cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count; + cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count; + cpuinfo_cache_count[cpuinfo_cache_level_4] = l4_count; + cpuinfo_max_cache_size = cpuinfo_compute_max_cache_size(&processors[0]); + + cpuinfo_global_uarch = (struct cpuinfo_uarch_info) { + .uarch = x86_processor.uarch, + .cpuid = x86_processor.cpuid, + .processor_count = processors_count, + .core_count = cores_count, + }; + + cpuinfo_linux_cpu_max = x86_linux_processors_count; + cpuinfo_linux_cpu_to_processor_map = linux_cpu_to_processor_map; + cpuinfo_linux_cpu_to_core_map = linux_cpu_to_core_map; + + __sync_synchronize(); + + cpuinfo_is_initialized = true; + + processors = NULL; + cores = NULL; + clusters = NULL; + packages = NULL; + l1i = l1d = l2 = l3 = l4 = NULL; + linux_cpu_to_processor_map = NULL; + linux_cpu_to_core_map = NULL; + +cleanup: + free(x86_linux_processors); + free(processors); + free(cores); + free(clusters); + free(packages); + free(l1i); + free(l1d); + free(l2); + free(l3); + free(l4); + free(linux_cpu_to_processor_map); + free(linux_cpu_to_core_map); +} diff --git a/source/3rdparty/cpuinfo/src/x86/mach/init.c b/source/3rdparty/cpuinfo/src/x86/mach/init.c new file mode 100644 index 0000000..b44d3ad --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/mach/init.c @@ -0,0 +1,356 @@ +#include +#include +#include + +#include +#include +#include +#include +#include + + +static inline uint32_t max(uint32_t a, uint32_t b) { + return a > b ? a : b; +} + +static inline uint32_t bit_mask(uint32_t bits) { + return (UINT32_C(1) << bits) - UINT32_C(1); +} + +void cpuinfo_x86_mach_init(void) { + struct cpuinfo_processor* processors = NULL; + struct cpuinfo_core* cores = NULL; + struct cpuinfo_cluster* clusters = NULL; + struct cpuinfo_package* packages = NULL; + struct cpuinfo_cache* l1i = NULL; + struct cpuinfo_cache* l1d = NULL; + struct cpuinfo_cache* l2 = NULL; + struct cpuinfo_cache* l3 = NULL; + struct cpuinfo_cache* l4 = NULL; + + struct cpuinfo_mach_topology mach_topology = cpuinfo_mach_detect_topology(); + processors = calloc(mach_topology.threads, sizeof(struct cpuinfo_processor)); + if (processors == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors", + mach_topology.threads * sizeof(struct cpuinfo_processor), mach_topology.threads); + goto cleanup; + } + cores = calloc(mach_topology.cores, sizeof(struct cpuinfo_core)); + if (cores == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores", + mach_topology.cores * sizeof(struct cpuinfo_core), mach_topology.cores); + goto cleanup; + } + /* On x86 cluster of cores is a physical package */ + clusters = calloc(mach_topology.packages, sizeof(struct cpuinfo_cluster)); + if (clusters == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters", + mach_topology.packages * sizeof(struct cpuinfo_cluster), mach_topology.packages); + goto cleanup; + } + packages = calloc(mach_topology.packages, sizeof(struct cpuinfo_package)); + if (packages == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" physical packages", + mach_topology.packages * sizeof(struct cpuinfo_package), mach_topology.packages); + goto cleanup; + } + + struct cpuinfo_x86_processor x86_processor; + memset(&x86_processor, 0, sizeof(x86_processor)); + cpuinfo_x86_init_processor(&x86_processor); + char brand_string[48]; + cpuinfo_x86_normalize_brand_string(x86_processor.brand_string, brand_string); + + const uint32_t threads_per_core = mach_topology.threads / mach_topology.cores; + const uint32_t threads_per_package = mach_topology.threads / mach_topology.packages; + const uint32_t cores_per_package = mach_topology.cores / mach_topology.packages; + for (uint32_t i = 0; i < mach_topology.packages; i++) { + clusters[i] = (struct cpuinfo_cluster) { + .processor_start = i * threads_per_package, + .processor_count = threads_per_package, + .core_start = i * cores_per_package, + .core_count = cores_per_package, + .cluster_id = 0, + .package = packages + i, + .vendor = x86_processor.vendor, + .uarch = x86_processor.uarch, + .cpuid = x86_processor.cpuid, + }; + packages[i].processor_start = i * threads_per_package; + packages[i].processor_count = threads_per_package; + packages[i].core_start = i * cores_per_package; + packages[i].core_count = cores_per_package; + packages[i].cluster_start = i; + packages[i].cluster_count = 1; + cpuinfo_x86_format_package_name(x86_processor.vendor, brand_string, packages[i].name); + } + for (uint32_t i = 0; i < mach_topology.cores; i++) { + cores[i] = (struct cpuinfo_core) { + .processor_start = i * threads_per_core, + .processor_count = threads_per_core, + .core_id = i % cores_per_package, + .cluster = clusters + i / cores_per_package, + .package = packages + i / cores_per_package, + .vendor = x86_processor.vendor, + .uarch = x86_processor.uarch, + .cpuid = x86_processor.cpuid, + }; + } + for (uint32_t i = 0; i < mach_topology.threads; i++) { + const uint32_t smt_id = i % threads_per_core; + const uint32_t core_id = i / threads_per_core; + const uint32_t package_id = i / threads_per_package; + + /* Reconstruct APIC IDs from topology components */ + const uint32_t thread_bits_mask = bit_mask(x86_processor.topology.thread_bits_length); + const uint32_t core_bits_mask = bit_mask(x86_processor.topology.core_bits_length); + const uint32_t package_bits_offset = max( + x86_processor.topology.thread_bits_offset + x86_processor.topology.thread_bits_length, + x86_processor.topology.core_bits_offset + x86_processor.topology.core_bits_length); + const uint32_t apic_id = + ((smt_id & thread_bits_mask) << x86_processor.topology.thread_bits_offset) | + ((core_id & core_bits_mask) << x86_processor.topology.core_bits_offset) | + (package_id << package_bits_offset); + cpuinfo_log_debug("reconstructed APIC ID 0x%08"PRIx32" for thread %"PRIu32, apic_id, i); + + processors[i].smt_id = smt_id; + processors[i].core = cores + i / threads_per_core; + processors[i].cluster = clusters + i / threads_per_package; + processors[i].package = packages + i / threads_per_package; + processors[i].apic_id = apic_id; + } + + uint32_t threads_per_l1 = 0, l1_count = 0; + if (x86_processor.cache.l1i.size != 0 || x86_processor.cache.l1d.size != 0) { + threads_per_l1 = mach_topology.threads_per_cache[1]; + if (threads_per_l1 == 0) { + /* Assume that threads on the same core share L1 */ + threads_per_l1 = mach_topology.threads / mach_topology.cores; + cpuinfo_log_warning("Mach kernel did not report number of threads sharing L1 cache; assume %"PRIu32, + threads_per_l1); + } + l1_count = mach_topology.threads / threads_per_l1; + cpuinfo_log_debug("detected %"PRIu32" L1 caches", l1_count); + } + + uint32_t threads_per_l2 = 0, l2_count = 0; + if (x86_processor.cache.l2.size != 0) { + threads_per_l2 = mach_topology.threads_per_cache[2]; + if (threads_per_l2 == 0) { + if (x86_processor.cache.l3.size != 0) { + /* This is not a last-level cache; assume that threads on the same core share L2 */ + threads_per_l2 = mach_topology.threads / mach_topology.cores; + } else { + /* This is a last-level cache; assume that threads on the same package share L2 */ + threads_per_l2 = mach_topology.threads / mach_topology.packages; + } + cpuinfo_log_warning("Mach kernel did not report number of threads sharing L2 cache; assume %"PRIu32, + threads_per_l2); + } + l2_count = mach_topology.threads / threads_per_l2; + cpuinfo_log_debug("detected %"PRIu32" L2 caches", l2_count); + } + + uint32_t threads_per_l3 = 0, l3_count = 0; + if (x86_processor.cache.l3.size != 0) { + threads_per_l3 = mach_topology.threads_per_cache[3]; + if (threads_per_l3 == 0) { + /* + * Assume that threads on the same package share L3. + * However, is it not necessarily the last-level cache (there may be L4 cache as well) + */ + threads_per_l3 = mach_topology.threads / mach_topology.packages; + cpuinfo_log_warning("Mach kernel did not report number of threads sharing L3 cache; assume %"PRIu32, + threads_per_l3); + } + l3_count = mach_topology.threads / threads_per_l3; + cpuinfo_log_debug("detected %"PRIu32" L3 caches", l3_count); + } + + uint32_t threads_per_l4 = 0, l4_count = 0; + if (x86_processor.cache.l4.size != 0) { + threads_per_l4 = mach_topology.threads_per_cache[4]; + if (threads_per_l4 == 0) { + /* + * Assume that all threads share this L4. + * As of now, L4 cache exists only on notebook x86 CPUs, which are single-package, + * but multi-socket systems could have shared L4 (like on IBM POWER8). + */ + threads_per_l4 = mach_topology.threads; + cpuinfo_log_warning("Mach kernel did not report number of threads sharing L4 cache; assume %"PRIu32, + threads_per_l4); + } + l4_count = mach_topology.threads / threads_per_l4; + cpuinfo_log_debug("detected %"PRIu32" L4 caches", l4_count); + } + + if (x86_processor.cache.l1i.size != 0) { + l1i = calloc(l1_count, sizeof(struct cpuinfo_cache)); + if (l1i == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches", + l1_count * sizeof(struct cpuinfo_cache), l1_count); + return; + } + for (uint32_t c = 0; c < l1_count; c++) { + l1i[c] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l1i.size, + .associativity = x86_processor.cache.l1i.associativity, + .sets = x86_processor.cache.l1i.sets, + .partitions = x86_processor.cache.l1i.partitions, + .line_size = x86_processor.cache.l1i.line_size, + .flags = x86_processor.cache.l1i.flags, + .processor_start = c * threads_per_l1, + .processor_count = threads_per_l1, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l1i = &l1i[t / threads_per_l1]; + } + } + + if (x86_processor.cache.l1d.size != 0) { + l1d = calloc(l1_count, sizeof(struct cpuinfo_cache)); + if (l1d == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches", + l1_count * sizeof(struct cpuinfo_cache), l1_count); + return; + } + for (uint32_t c = 0; c < l1_count; c++) { + l1d[c] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l1d.size, + .associativity = x86_processor.cache.l1d.associativity, + .sets = x86_processor.cache.l1d.sets, + .partitions = x86_processor.cache.l1d.partitions, + .line_size = x86_processor.cache.l1d.line_size, + .flags = x86_processor.cache.l1d.flags, + .processor_start = c * threads_per_l1, + .processor_count = threads_per_l1, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l1d = &l1d[t / threads_per_l1]; + } + } + + if (l2_count != 0) { + l2 = calloc(l2_count, sizeof(struct cpuinfo_cache)); + if (l2 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches", + l2_count * sizeof(struct cpuinfo_cache), l2_count); + return; + } + for (uint32_t c = 0; c < l2_count; c++) { + l2[c] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l2.size, + .associativity = x86_processor.cache.l2.associativity, + .sets = x86_processor.cache.l2.sets, + .partitions = x86_processor.cache.l2.partitions, + .line_size = x86_processor.cache.l2.line_size, + .flags = x86_processor.cache.l2.flags, + .processor_start = c * threads_per_l2, + .processor_count = threads_per_l2, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l2 = &l2[t / threads_per_l2]; + } + } + + if (l3_count != 0) { + l3 = calloc(l3_count, sizeof(struct cpuinfo_cache)); + if (l3 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches", + l3_count * sizeof(struct cpuinfo_cache), l3_count); + return; + } + for (uint32_t c = 0; c < l3_count; c++) { + l3[c] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l3.size, + .associativity = x86_processor.cache.l3.associativity, + .sets = x86_processor.cache.l3.sets, + .partitions = x86_processor.cache.l3.partitions, + .line_size = x86_processor.cache.l3.line_size, + .flags = x86_processor.cache.l3.flags, + .processor_start = c * threads_per_l3, + .processor_count = threads_per_l3, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l3 = &l3[t / threads_per_l3]; + } + } + + if (l4_count != 0) { + l4 = calloc(l4_count, sizeof(struct cpuinfo_cache)); + if (l4 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L4 caches", + l4_count * sizeof(struct cpuinfo_cache), l4_count); + return; + } + for (uint32_t c = 0; c < l4_count; c++) { + l4[c] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l4.size, + .associativity = x86_processor.cache.l4.associativity, + .sets = x86_processor.cache.l4.sets, + .partitions = x86_processor.cache.l4.partitions, + .line_size = x86_processor.cache.l4.line_size, + .flags = x86_processor.cache.l4.flags, + .processor_start = c * threads_per_l4, + .processor_count = threads_per_l4, + }; + } + for (uint32_t t = 0; t < mach_topology.threads; t++) { + processors[t].cache.l4 = &l4[t / threads_per_l4]; + } + } + + /* Commit changes */ + cpuinfo_processors = processors; + cpuinfo_cores = cores; + cpuinfo_clusters = clusters; + cpuinfo_packages = packages; + cpuinfo_cache[cpuinfo_cache_level_1i] = l1i; + cpuinfo_cache[cpuinfo_cache_level_1d] = l1d; + cpuinfo_cache[cpuinfo_cache_level_2] = l2; + cpuinfo_cache[cpuinfo_cache_level_3] = l3; + cpuinfo_cache[cpuinfo_cache_level_4] = l4; + + cpuinfo_processors_count = mach_topology.threads; + cpuinfo_cores_count = mach_topology.cores; + cpuinfo_clusters_count = mach_topology.packages; + cpuinfo_packages_count = mach_topology.packages; + cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1_count; + cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1_count; + cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count; + cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count; + cpuinfo_cache_count[cpuinfo_cache_level_4] = l4_count; + cpuinfo_max_cache_size = cpuinfo_compute_max_cache_size(&processors[0]); + + cpuinfo_global_uarch = (struct cpuinfo_uarch_info) { + .uarch = x86_processor.uarch, + .cpuid = x86_processor.cpuid, + .processor_count = mach_topology.threads, + .core_count = mach_topology.cores, + }; + + __sync_synchronize(); + + cpuinfo_is_initialized = true; + + processors = NULL; + cores = NULL; + clusters = NULL; + packages = NULL; + l1i = l1d = l2 = l3 = l4 = NULL; + +cleanup: + free(processors); + free(cores); + free(clusters); + free(packages); + free(l1i); + free(l1d); + free(l2); + free(l3); + free(l4); +} diff --git a/source/3rdparty/cpuinfo/src/x86/mockcpuid.c b/source/3rdparty/cpuinfo/src/x86/mockcpuid.c new file mode 100644 index 0000000..2631f09 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/mockcpuid.c @@ -0,0 +1,70 @@ +#include +#include + +#if !CPUINFO_MOCK + #error This file should be built only in mock mode +#endif + +#include + + +static struct cpuinfo_mock_cpuid* cpuinfo_mock_cpuid_data = NULL; +static uint32_t cpuinfo_mock_cpuid_entries = 0; +static uint32_t cpuinfo_mock_cpuid_leaf4_iteration = 0; + +void CPUINFO_ABI cpuinfo_mock_set_cpuid(struct cpuinfo_mock_cpuid* dump, size_t entries) { + cpuinfo_mock_cpuid_data = dump; + cpuinfo_mock_cpuid_entries = entries; +}; + +void CPUINFO_ABI cpuinfo_mock_get_cpuid(uint32_t eax, uint32_t regs[4]) { + if (eax != 4) { + cpuinfo_mock_cpuid_leaf4_iteration = 0; + } + if (cpuinfo_mock_cpuid_data != NULL && cpuinfo_mock_cpuid_entries != 0) { + if (eax == 4) { + uint32_t skip_entries = cpuinfo_mock_cpuid_leaf4_iteration; + for (uint32_t i = 0; i < cpuinfo_mock_cpuid_entries; i++) { + if (eax == cpuinfo_mock_cpuid_data[i].input_eax) { + if (skip_entries-- == 0) { + regs[0] = cpuinfo_mock_cpuid_data[i].eax; + regs[1] = cpuinfo_mock_cpuid_data[i].ebx; + regs[2] = cpuinfo_mock_cpuid_data[i].ecx; + regs[3] = cpuinfo_mock_cpuid_data[i].edx; + cpuinfo_mock_cpuid_leaf4_iteration++; + return; + } + } + } + } else { + for (uint32_t i = 0; i < cpuinfo_mock_cpuid_entries; i++) { + if (eax == cpuinfo_mock_cpuid_data[i].input_eax) { + regs[0] = cpuinfo_mock_cpuid_data[i].eax; + regs[1] = cpuinfo_mock_cpuid_data[i].ebx; + regs[2] = cpuinfo_mock_cpuid_data[i].ecx; + regs[3] = cpuinfo_mock_cpuid_data[i].edx; + return; + } + } + } + } + regs[0] = regs[1] = regs[2] = regs[3] = 0; +} + +void CPUINFO_ABI cpuinfo_mock_get_cpuidex(uint32_t eax, uint32_t ecx, uint32_t regs[4]) { + cpuinfo_mock_cpuid_leaf4_iteration = 0; + if (cpuinfo_mock_cpuid_data != NULL && cpuinfo_mock_cpuid_entries != 0) { + for (uint32_t i = 0; i < cpuinfo_mock_cpuid_entries; i++) { + if (eax == cpuinfo_mock_cpuid_data[i].input_eax && + ecx == cpuinfo_mock_cpuid_data[i].input_ecx) + { + regs[0] = cpuinfo_mock_cpuid_data[i].eax; + regs[1] = cpuinfo_mock_cpuid_data[i].ebx; + regs[2] = cpuinfo_mock_cpuid_data[i].ecx; + regs[3] = cpuinfo_mock_cpuid_data[i].edx; + return; + } + } + } + regs[0] = regs[1] = regs[2] = regs[3] = 0; +} diff --git a/source/3rdparty/cpuinfo/src/x86/name.c b/source/3rdparty/cpuinfo/src/x86/name.c new file mode 100644 index 0000000..957a0d8 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/name.c @@ -0,0 +1,708 @@ +#include +#include +#include +#include +#include + +#include +#include +#include + + +/* The state of the parser to be preserved between parsing different tokens. */ +struct parser_state { + /* + * Pointer to the start of the previous token if it is "model". + * NULL if previous token is not "model". + */ + char* context_model; + /* + * Pointer to the start of the previous token if it is a single-uppercase-letter token. + * NULL if previous token is anything different. + */ + char* context_upper_letter; + /* + * Pointer to the start of the previous token if it is "Dual". + * NULL if previous token is not "Dual". + */ + char* context_dual; + /* + * Pointer to the start of the previous token if it is "Core", "Dual-Core", "QuadCore", etc. + * NULL if previous token is anything different. + */ + char* context_core; + /* + * Pointer to the start of the previous token if it is "Eng" or "Engineering", etc. + * NULL if previous token is anything different. + */ + char* context_engineering; + /* + * Pointer to the '@' symbol in the brand string (separates frequency specification). + * NULL if there is no '@' symbol. + */ + char* frequency_separator; + /* Indicates whether the brand string (after transformations) contains frequency. */ + bool frequency_token; + /* Indicates whether the processor is of Xeon family (contains "Xeon" substring). */ + bool xeon; + /* Indicates whether the processor model number was already parsed. */ + bool parsed_model_number; + /* Indicates whether the processor is an engineering sample (contains "Engineering Sample" or "Eng Sample" substrings). */ + bool engineering_sample; +}; + +/** @brief Resets information about the previous token. Keeps all other state information. */ +static void reset_context(struct parser_state* state) { + state->context_model = NULL; + state->context_upper_letter = NULL; + state->context_dual = NULL; + state->context_core = NULL; +} + +/** + * @brief Overwrites the supplied string with space characters if it exactly matches the given string. + * @param string The string to be compared against other string, and erased in case of matching. + * @param length The length of the two string to be compared against each other. + * @param target The string to compare against. + * @retval true If the two strings match and the first supplied string was erased (overwritten with space characters). + * @retval false If the two strings are different and the first supplied string remained unchanged. + */ +static inline bool erase_matching(char* string, size_t length, const char* target) { + const bool match = memcmp(string, target, length) == 0; + if (match) { + memset(string, ' ', length); + } + return match; +} + +/** + * @brief Checks if the supplied ASCII character is an uppercase latin letter. + * @param character The character to analyse. + * @retval true If the supplied character is an uppercase latin letter ('A' to 'Z'). + * @retval false If the supplied character is anything different. + */ +static inline bool is_upper_letter(char character) { + return (uint32_t) (character - 'A') <= (uint32_t)('Z' - 'A'); +} + +/** + * @brief Checks if the supplied ASCII character is a digit. + * @param character The character to analyse. + * @retval true If the supplied character is a digit ('0' to '9'). + * @retval false If the supplied character is anything different. + */ +static inline bool is_digit(char character) { + return (uint32_t) (character - '0') < UINT32_C(10); +} + +static inline bool is_zero_number(const char* token_start, const char* token_end) { + for (const char* char_ptr = token_start; char_ptr != token_end; char_ptr++) { + if (*char_ptr != '0') { + return false; + } + } + return true; +} + +static inline bool is_space(const char* token_start, const char* token_end) { + for (const char* char_ptr = token_start; char_ptr != token_end; char_ptr++) { + if (*char_ptr != ' ') { + return false; + } + } + return true; +} + +static inline bool is_number(const char* token_start, const char* token_end) { + for (const char* char_ptr = token_start; char_ptr != token_end; char_ptr++) { + if (!is_digit(*char_ptr)) { + return false; + } + } + return true; +} + +static inline bool is_model_number(const char* token_start, const char* token_end) { + for (const char* char_ptr = token_start + 1; char_ptr < token_end; char_ptr++) { + if (is_digit(char_ptr[-1]) && is_digit(char_ptr[0])) { + return true; + } + } + return false; +} + +static inline bool is_frequency(const char* token_start, const char* token_end) { + const size_t token_length = (size_t) (token_end - token_start); + if (token_length > 3 && token_end[-2] == 'H' && token_end[-1] == 'z') { + switch (token_end[-3]) { + case 'K': + case 'M': + case 'G': + return true; + } + } + return false; +} + +/** + * @warning Input and output tokens can overlap + */ +static inline char* move_token(const char* token_start, const char* token_end, char* output_ptr) { + const size_t token_length = (size_t) (token_end - token_start); + memmove(output_ptr, token_start, token_length); + return output_ptr + token_length; +} + +static bool transform_token(char* token_start, char* token_end, struct parser_state* state) { + const struct parser_state previousState = *state; + reset_context(state); + + size_t token_length = (size_t) (token_end - token_start); + + if (state->frequency_separator != NULL) { + if (token_start > state->frequency_separator) { + if (state->parsed_model_number) { + memset(token_start, ' ', token_length); + } + } + } + + + /* Early AMD and Cyrix processors have "tm" suffix for trademark, e.g. + * "AMD-K6tm w/ multimedia extensions" + * "Cyrix MediaGXtm MMXtm Enhanced" + */ + if (token_length > 2) { + const char context_char = token_end[-3]; + if (is_digit(context_char) || is_upper_letter(context_char)) { + if (erase_matching(token_end - 2, 2, "tm")) { + token_end -= 2; + token_length -= 2; + } + } + } + if (token_length > 4) { + /* Some early AMD CPUs have "AMD-" at the beginning, e.g. + * "AMD-K5(tm) Processor" + * "AMD-K6tm w/ multimedia extensions" + * "AMD-K6(tm) 3D+ Processor" + * "AMD-K6(tm)-III Processor" + */ + if (erase_matching(token_start, 4, "AMD-")) { + token_start += 4; + token_length -= 4; + } + } + switch (token_length) { + case 1: + /* + * On some Intel processors there is a space between the first letter of + * the name and the number after it, e.g. + * "Intel(R) Core(TM) i7 CPU X 990 @ 3.47GHz" + * "Intel(R) Core(TM) CPU Q 820 @ 1.73GHz" + * We want to merge these parts together, in reverse order, i.e. "X 990" -> "990X", "820" -> "820Q" + */ + if (is_upper_letter(token_start[0])) { + state->context_upper_letter = token_start; + return true; + } + break; + case 2: + /* Erase everything after "w/" in "AMD-K6tm w/ multimedia extensions" */ + if (erase_matching(token_start, token_length, "w/")) { + return false; + } + /* + * Intel Xeon processors since Ivy Bridge use versions, e.g. + * "Intel Xeon E3-1230 v2" + * Some processor branch strings report them as "V", others report as "v". + * Normalize the former (upper-case) to the latter (lower-case) version + */ + if (token_start[0] == 'V' && is_digit(token_start[1])) { + token_start[0] = 'v'; + return true; + } + break; + case 3: + /* + * Erase "CPU" in brand string on Intel processors, e.g. + * "Intel(R) Core(TM) i5 CPU 650 @ 3.20GHz" + * "Intel(R) Xeon(R) CPU X3210 @ 2.13GHz" + * "Intel(R) Atom(TM) CPU Z2760 @ 1.80GHz" + */ + if (erase_matching(token_start, token_length, "CPU")) { + return true; + } + /* + * Erase everything after "SOC" on AMD System-on-Chips, e.g. + * "AMD GX-212JC SOC with Radeon(TM) R2E Graphics \0" + */ + if (erase_matching(token_start, token_length, "SOC")) { + return false; + } + /* + * Erase "AMD" in brand string on AMD processors, e.g. + * "AMD Athlon(tm) Processor" + * "AMD Engineering Sample" + * "Quad-Core AMD Opteron(tm) Processor 2344 HE" + */ + if (erase_matching(token_start, token_length, "AMD")) { + return true; + } + /* + * Erase "VIA" in brand string on VIA processors, e.g. + * "VIA C3 Ezra" + * "VIA C7-M Processor 1200MHz" + * "VIA Nano L3050@1800MHz" + */ + if (erase_matching(token_start, token_length, "VIA")) { + return true; + } + /* Erase "IDT" in brand string on early Centaur processors, e.g. "IDT WinChip 2-3D" */ + if (erase_matching(token_start, token_length, "IDT")) { + return true; + } + /* + * Erase everything starting with "MMX" in + * "Cyrix MediaGXtm MMXtm Enhanced" ("tm" suffix is removed by this point) + */ + if (erase_matching(token_start, token_length, "MMX")) { + return false; + } + /* + * Erase everything starting with "APU" on AMD processors, e.g. + * "AMD A10-4600M APU with Radeon(tm) HD Graphics" + * "AMD A10-7850K APU with Radeon(TM) R7 Graphics" + * "AMD A6-6310 APU with AMD Radeon R4 Graphics" + */ + if (erase_matching(token_start, token_length, "APU")) { + return false; + } + /* + * Remember to discard string if it contains "Eng Sample", + * e.g. "Eng Sample, ZD302046W4K43_36/30/20_2/8_A" + */ + if (memcmp(token_start, "Eng", token_length) == 0) { + state->context_engineering = token_start; + } + break; + case 4: + /* Remember to erase "Dual Core" in "AMD Athlon(tm) 64 X2 Dual Core Processor 3800+" */ + if (memcmp(token_start, "Dual", token_length) == 0) { + state->context_dual = token_start; + } + /* Remember if the processor is on Xeon family */ + if (memcmp(token_start, "Xeon", token_length) == 0) { + state->xeon = true; + } + /* Erase "Dual Core" in "AMD Athlon(tm) 64 X2 Dual Core Processor 3800+" */ + if (previousState.context_dual != NULL) { + if (memcmp(token_start, "Core", token_length) == 0) { + memset(previousState.context_dual, ' ', (size_t) (token_end - previousState.context_dual)); + state->context_core = token_end; + return true; + } + } + break; + case 5: + /* + * Erase "Intel" in brand string on Intel processors, e.g. + * "Intel(R) Xeon(R) CPU X3210 @ 2.13GHz" + * "Intel(R) Atom(TM) CPU D2700 @ 2.13GHz" + * "Genuine Intel(R) processor 800MHz" + */ + if (erase_matching(token_start, token_length, "Intel")) { + return true; + } + /* + * Erase "Cyrix" in brand string on Cyrix processors, e.g. + * "Cyrix MediaGXtm MMXtm Enhanced" + */ + if (erase_matching(token_start, token_length, "Cyrix")) { + return true; + } + /* + * Erase everything following "Geode" (but not "Geode" token itself) on Geode processors, e.g. + * "Geode(TM) Integrated Processor by AMD PCS" + * "Geode(TM) Integrated Processor by National Semi" + */ + if (memcmp(token_start, "Geode", token_length) == 0) { + return false; + } + /* Remember to erase "model unknown" in "AMD Processor model unknown" */ + if (memcmp(token_start, "model", token_length) == 0) { + state->context_model = token_start; + return true; + } + break; + case 6: + /* + * Erase everything starting with "Radeon" or "RADEON" on AMD APUs, e.g. + * "A8-7670K Radeon R7, 10 Compute Cores 4C+6G" + * "FX-8800P Radeon R7, 12 Compute Cores 4C+8G" + * "A12-9800 RADEON R7, 12 COMPUTE CORES 4C+8G" + * "A9-9410 RADEON R5, 5 COMPUTE CORES 2C+3G" + */ + if (erase_matching(token_start, token_length, "Radeon") || erase_matching(token_start, token_length, "RADEON")) { + return false; + } + /* + * Erase "Mobile" when it is not part of the processor name, + * e.g. in "AMD Turion(tm) X2 Ultra Dual-Core Mobile ZM-82" + */ + if (previousState.context_core != NULL) { + if (erase_matching(token_start, token_length, "Mobile")) { + return true; + } + } + /* Erase "family" in "Intel(R) Pentium(R) III CPU family 1266MHz" */ + if (erase_matching(token_start, token_length, "family")) { + return true; + } + /* Discard the string if it contains "Engineering Sample" */ + if (previousState.context_engineering != NULL) { + if (memcmp(token_start, "Sample", token_length) == 0) { + state->engineering_sample = true; + return false; + } + } + break; + case 7: + /* + * Erase "Geniune" in brand string on Intel engineering samples, e.g. + * "Genuine Intel(R) processor 800MHz" + * "Genuine Intel(R) CPU @ 2.13GHz" + * "Genuine Intel(R) CPU 0000 @ 1.73GHz" + */ + if (erase_matching(token_start, token_length, "Genuine")) { + return true; + } + /* + * Erase "12-core" in brand string on AMD Threadripper, e.g. + * "AMD Ryzen Threadripper 1920X 12-Core Processor" + */ + if (erase_matching(token_start, token_length, "12-Core")) { + return true; + } + /* + * Erase "16-core" in brand string on AMD Threadripper, e.g. + * "AMD Ryzen Threadripper 1950X 16-Core Processor" + */ + if (erase_matching(token_start, token_length, "16-Core")) { + return true; + } + /* Erase "model unknown" in "AMD Processor model unknown" */ + if (previousState.context_model != NULL) { + if (memcmp(token_start, "unknown", token_length) == 0) { + memset(previousState.context_model, ' ', token_end - previousState.context_model); + return true; + } + } + /* + * Discard the string if it contains "Eng Sample:" or "Eng Sample," e.g. + * "AMD Eng Sample, ZD302046W4K43_36/30/20_2/8_A" + * "AMD Eng Sample: 2D3151A2M88E4_35/31_N" + */ + if (previousState.context_engineering != NULL) { + if (memcmp(token_start, "Sample,", token_length) == 0 || memcmp(token_start, "Sample:", token_length) == 0) { + state->engineering_sample = true; + return false; + } + } + break; + case 8: + /* Erase "QuadCore" in "VIA QuadCore L4700 @ 1.2+ GHz" */ + if (erase_matching(token_start, token_length, "QuadCore")) { + state->context_core = token_end; + return true; + } + /* Erase "Six-Core" in "AMD FX(tm)-6100 Six-Core Processor" */ + if (erase_matching(token_start, token_length, "Six-Core")) { + state->context_core = token_end; + return true; + } + break; + case 9: + if (erase_matching(token_start, token_length, "Processor")) { + return true; + } + if (erase_matching(token_start, token_length, "processor")) { + return true; + } + /* Erase "Dual-Core" in "Pentium(R) Dual-Core CPU T4200 @ 2.00GHz" */ + if (erase_matching(token_start, token_length, "Dual-Core")) { + state->context_core = token_end; + return true; + } + /* Erase "Quad-Core" in AMD processors, e.g. + * "Quad-Core AMD Opteron(tm) Processor 2347 HE" + * "AMD FX(tm)-4170 Quad-Core Processor" + */ + if (erase_matching(token_start, token_length, "Quad-Core")) { + state->context_core = token_end; + return true; + } + /* Erase "Transmeta" in brand string on Transmeta processors, e.g. + * "Transmeta(tm) Crusoe(tm) Processor TM5800" + * "Transmeta Efficeon(tm) Processor TM8000" + */ + if (erase_matching(token_start, token_length, "Transmeta")) { + return true; + } + break; + case 10: + /* + * Erase "Eight-Core" in AMD processors, e.g. + * "AMD FX(tm)-8150 Eight-Core Processor" + */ + if (erase_matching(token_start, token_length, "Eight-Core")) { + state->context_core = token_end; + return true; + } + break; + case 11: + /* + * Erase "Triple-Core" in AMD processors, e.g. + * "AMD Phenom(tm) II N830 Triple-Core Processor" + * "AMD Phenom(tm) 8650 Triple-Core Processor" + */ + if (erase_matching(token_start, token_length, "Triple-Core")) { + state->context_core = token_end; + return true; + } + /* + * Remember to discard string if it contains "Engineering Sample", + * e.g. "AMD Engineering Sample" + */ + if (memcmp(token_start, "Engineering", token_length) == 0) { + state->context_engineering = token_start; + return true; + } + break; + } + if (is_zero_number(token_start, token_end)) { + memset(token_start, ' ', token_length); + return true; + } + /* On some Intel processors the last letter of the name is put before the number, + * and an additional space it added, e.g. + * "Intel(R) Core(TM) i7 CPU X 990 @ 3.47GHz" + * "Intel(R) Core(TM) CPU Q 820 @ 1.73GHz" + * "Intel(R) Core(TM) i5 CPU M 480 @ 2.67GHz" + * We fix this issue, i.e. "X 990" -> "990X", "Q 820" -> "820Q" + */ + if (previousState.context_upper_letter != 0) { + /* A single letter token followed by 2-to-5 digit letter is merged together */ + switch (token_length) { + case 2: + case 3: + case 4: + case 5: + if (is_number(token_start, token_end)) { + /* Load the previous single-letter token */ + const char letter = *previousState.context_upper_letter; + /* Erase the previous single-letter token */ + *previousState.context_upper_letter = ' '; + /* Move the current token one position to the left */ + move_token(token_start, token_end, token_start - 1); + token_start -= 1; + /* + * Add the letter on the end + * Note: accessing token_start[-1] is safe because this is not the first token + */ + token_end[-1] = letter; + } + } + } + if (state->frequency_separator != NULL) { + if (is_model_number(token_start, token_end)) { + state->parsed_model_number = true; + } + } + if (is_frequency(token_start, token_end)) { + state->frequency_token = true; + } + return true; +} + +uint32_t cpuinfo_x86_normalize_brand_string( + const char raw_name[48], + char normalized_name[48]) +{ + normalized_name[0] = '\0'; + char name[48]; + memcpy(name, raw_name, sizeof(name)); + + /* + * First find the end of the string + * Start search from the end because some brand strings contain zeroes in the middle + */ + char* name_end = &name[48]; + while (name_end[-1] == '\0') { + /* + * Adject name_end by 1 position and check that we didn't reach the start of the brand string. + * This is possible if all characters are zero. + */ + if (--name_end == name) { + /* All characters are zeros */ + return 0; + } + } + + struct parser_state parser_state = { 0 }; + + /* Now unify all whitespace characters: replace tabs and '\0' with spaces */ + { + bool inside_parentheses = false; + for (char* char_ptr = name; char_ptr != name_end; char_ptr++) { + switch (*char_ptr) { + case '(': + inside_parentheses = true; + *char_ptr = ' '; + break; + case ')': + inside_parentheses = false; + *char_ptr = ' '; + break; + case '@': + parser_state.frequency_separator = char_ptr; + case '\0': + case '\t': + *char_ptr = ' '; + break; + default: + if (inside_parentheses) { + *char_ptr = ' '; + } + } + } + } + + /* Iterate through all tokens and erase redundant parts */ + { + bool is_token = false; + char* token_start; + for (char* char_ptr = name; char_ptr != name_end; char_ptr++) { + if (*char_ptr == ' ') { + if (is_token) { + is_token = false; + if (!transform_token(token_start, char_ptr, &parser_state)) { + name_end = char_ptr; + break; + } + } + } else { + if (!is_token) { + is_token = true; + token_start = char_ptr; + } + } + } + if (is_token) { + transform_token(token_start, name_end, &parser_state); + } + } + + /* If this is an engineering sample, return empty string */ + if (parser_state.engineering_sample) { + return 0; + } + + /* Check if there is some string before the frequency separator. */ + if (parser_state.frequency_separator != NULL) { + if (is_space(name, parser_state.frequency_separator)) { + /* If only frequency is available, return empty string */ + return 0; + } + } + + /* Compact tokens: collapse multiple spacing into one */ + { + char* output_ptr = normalized_name; + char* token_start; + bool is_token = false; + bool previous_token_ends_with_dash = true; + bool current_token_starts_with_dash = false; + uint32_t token_count = 1; + for (char* char_ptr = name; char_ptr != name_end; char_ptr++) { + const char character = *char_ptr; + if (character == ' ') { + if (is_token) { + is_token = false; + if (!current_token_starts_with_dash && !previous_token_ends_with_dash) { + token_count += 1; + *output_ptr++ = ' '; + } + output_ptr = move_token(token_start, char_ptr, output_ptr); + /* Note: char_ptr[-1] exists because there is a token before this space */ + previous_token_ends_with_dash = (char_ptr[-1] == '-'); + } + } else { + if (!is_token) { + is_token = true; + token_start = char_ptr; + current_token_starts_with_dash = (character == '-'); + } + } + } + if (is_token) { + if (!current_token_starts_with_dash && !previous_token_ends_with_dash) { + token_count += 1; + *output_ptr++ = ' '; + } + output_ptr = move_token(token_start, name_end, output_ptr); + } + if (parser_state.frequency_token && token_count <= 1) { + /* The only remaining part is frequency */ + normalized_name[0] = '\0'; + return 0; + } + if (output_ptr < &normalized_name[48]) { + *output_ptr = '\0'; + } else { + normalized_name[47] = '\0'; + } + return (uint32_t) (output_ptr - normalized_name); + } +} + +static const char* vendor_string_map[] = { + [cpuinfo_vendor_intel] = "Intel", + [cpuinfo_vendor_amd] = "AMD", + [cpuinfo_vendor_via] = "VIA", + [cpuinfo_vendor_hygon] = "Hygon", + [cpuinfo_vendor_rdc] = "RDC", + [cpuinfo_vendor_dmp] = "DM&P", + [cpuinfo_vendor_transmeta] = "Transmeta", + [cpuinfo_vendor_cyrix] = "Cyrix", + [cpuinfo_vendor_rise] = "Rise", + [cpuinfo_vendor_nsc] = "NSC", + [cpuinfo_vendor_sis] = "SiS", + [cpuinfo_vendor_nexgen] = "NexGen", + [cpuinfo_vendor_umc] = "UMC", +}; + +uint32_t cpuinfo_x86_format_package_name( + enum cpuinfo_vendor vendor, + const char normalized_brand_string[48], + char package_name[CPUINFO_PACKAGE_NAME_MAX]) +{ + if (normalized_brand_string[0] == '\0') { + package_name[0] = '\0'; + return 0; + } + + const char* vendor_string = NULL; + if ((uint32_t) vendor < (uint32_t) CPUINFO_COUNT_OF(vendor_string_map)) { + vendor_string = vendor_string_map[(uint32_t) vendor]; + } + if (vendor_string == NULL) { + strncpy(package_name, normalized_brand_string, CPUINFO_PACKAGE_NAME_MAX); + package_name[CPUINFO_PACKAGE_NAME_MAX - 1] = '\0'; + return 0; + } else { + snprintf(package_name, CPUINFO_PACKAGE_NAME_MAX, + "%s %s", vendor_string, normalized_brand_string); + return (uint32_t) strlen(vendor_string) + 1; + } +} diff --git a/source/3rdparty/cpuinfo/src/x86/topology.c b/source/3rdparty/cpuinfo/src/x86/topology.c new file mode 100644 index 0000000..0e83d46 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/topology.c @@ -0,0 +1,127 @@ +#include +#include + +#include +#include +#include +#include +#include + + +enum topology_type { + topology_type_invalid = 0, + topology_type_smt = 1, + topology_type_core = 2, +}; + +void cpuinfo_x86_detect_topology( + uint32_t max_base_index, + uint32_t max_extended_index, + struct cpuid_regs leaf1, + struct cpuinfo_x86_topology* topology) +{ + /* + * HTT: indicates multi-core/hyper-threading support on this core. + * - Intel, AMD: edx[bit 28] in basic info. + */ + const bool htt = !!(leaf1.edx & UINT32_C(0x10000000)); + + uint32_t apic_id = 0; + if (htt) { + apic_id = leaf1.ebx >> 24; + bool amd_cmp_legacy = false; + if (max_extended_index >= UINT32_C(0x80000001)) { + const struct cpuid_regs leaf0x80000001 = cpuid(UINT32_C(0x80000001)); + /* + * CmpLegacy: core multi-processing legacy mode. + * - AMD: ecx[bit 1] in extended info (reserved bit on Intel CPUs). + */ + amd_cmp_legacy = !!(leaf0x80000001.ecx & UINT32_C(0x00000002)); + } + if (amd_cmp_legacy) { + if (max_extended_index >= UINT32_C(0x80000008)) { + const struct cpuid_regs leaf0x80000008 = cpuid(UINT32_C(0x80000008)); + /* + * NC: number of physical cores - 1. The number of cores in the processor is NC+1. + * - AMD: ecx[bits 0-7] in leaf 0x80000008 (reserved zero bits on Intel CPUs). + */ + const uint32_t cores_per_processor = 1 + (leaf0x80000008.ecx & UINT32_C(0x000000FF)); + topology->core_bits_length = bit_length(cores_per_processor); + cpuinfo_log_debug("HTT: APIC ID = %08"PRIx32", cores per processor = %"PRIu32, apic_id, cores_per_processor); + } else { + /* + * LogicalProcessorCount: the number of cores per processor. + * - AMD: ebx[bits 16-23] in basic info (different interpretation on Intel CPUs). + */ + const uint32_t cores_per_processor = (leaf1.ebx >> 16) & UINT32_C(0x000000FF); + if (cores_per_processor != 0) { + topology->core_bits_length = bit_length(cores_per_processor); + } + cpuinfo_log_debug("HTT: APIC ID = %08"PRIx32", cores per processor = %"PRIu32, apic_id, cores_per_processor); + } + } else { + /* + * Maximum number of addressable IDs for logical processors in this physical package. + * - Intel: ebx[bits 16-23] in basic info (different interpretation on AMD CPUs). + */ + const uint32_t logical_processors = (leaf1.ebx >> 16) & UINT32_C(0x000000FF); + if (logical_processors != 0) { + const uint32_t log2_max_logical_processors = bit_length(logical_processors); + const uint32_t log2_max_threads_per_core = log2_max_logical_processors - topology->core_bits_length; + topology->core_bits_offset = log2_max_threads_per_core; + topology->thread_bits_length = log2_max_threads_per_core; + } + cpuinfo_log_debug("HTT: APIC ID = %08"PRIx32", logical processors = %"PRIu32, apic_id, logical_processors); + } + } + + /* + * x2APIC: indicated support for x2APIC feature. + * - Intel: ecx[bit 21] in basic info (reserved bit on AMD CPUs). + */ + const bool x2apic = !!(leaf1.ecx & UINT32_C(0x00200000)); + if (x2apic && (max_base_index >= UINT32_C(0xB))) { + uint32_t level = 0; + uint32_t type; + uint32_t total_shift = 0; + topology->thread_bits_offset = topology->thread_bits_length = 0; + topology->core_bits_offset = topology->core_bits_length = 0; + do { + const struct cpuid_regs leafB = cpuidex(UINT32_C(0xB), level); + type = (leafB.ecx >> 8) & UINT32_C(0x000000FF); + const uint32_t level_shift = leafB.eax & UINT32_C(0x0000001F); + const uint32_t x2apic_id = leafB.edx; + apic_id = x2apic_id; + switch (type) { + case topology_type_invalid: + break; + case topology_type_smt: + cpuinfo_log_debug("x2 level %"PRIu32": APIC ID = %08"PRIx32", " + "type SMT, shift %"PRIu32", total shift %"PRIu32, + level, apic_id, level_shift, total_shift); + topology->thread_bits_offset = total_shift; + topology->thread_bits_length = level_shift; + break; + case topology_type_core: + cpuinfo_log_debug("x2 level %"PRIu32": APIC ID = %08"PRIx32", " + "type core, shift %"PRIu32", total shift %"PRIu32, + level, apic_id, level_shift, total_shift); + topology->core_bits_offset = total_shift; + topology->core_bits_length = level_shift; + break; + default: + cpuinfo_log_warning("unexpected topology type %"PRIu32" (offset %"PRIu32", length %"PRIu32") " + "reported in leaf 0x0000000B is ignored", type, total_shift, level_shift); + break; + } + total_shift += level_shift; + level += 1; + } while (type != 0); + cpuinfo_log_debug("x2APIC ID 0x%08"PRIx32", " + "SMT offset %"PRIu32" length %"PRIu32", core offset %"PRIu32" length %"PRIu32, apic_id, + topology->thread_bits_offset, topology->thread_bits_length, + topology->core_bits_offset, topology->core_bits_length); + } + + topology->apic_id = apic_id; +} diff --git a/source/3rdparty/cpuinfo/src/x86/uarch.c b/source/3rdparty/cpuinfo/src/x86/uarch.c new file mode 100644 index 0000000..3705499 --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/uarch.c @@ -0,0 +1,241 @@ +#include + +#include +#include + + +enum cpuinfo_uarch cpuinfo_x86_decode_uarch( + enum cpuinfo_vendor vendor, + const struct cpuinfo_x86_model_info* model_info) +{ + switch (vendor) { + case cpuinfo_vendor_intel: + switch (model_info->family) { +#if CPUINFO_ARCH_X86 + case 0x05: + switch (model_info->model) { + case 0x01: // Pentium (60, 66) + case 0x02: // Pentium (75, 90, 100, 120, 133, 150, 166, 200) + case 0x03: // Pentium OverDrive for Intel486-based systems + case 0x04: // Pentium MMX + return cpuinfo_uarch_p5; + case 0x09: + return cpuinfo_uarch_quark; + } + break; +#endif /* CPUINFO_ARCH_X86 */ + case 0x06: + switch (model_info->model) { + /* Mainstream cores */ +#if CPUINFO_ARCH_X86 + case 0x01: // Pentium Pro + case 0x03: // Pentium II (Klamath) and Pentium II Overdrive + case 0x05: // Pentium II (Deschutes, Tonga), Pentium II Celeron (Covington), Pentium II Xeon (Drake) + case 0x06: // Pentium II (Dixon), Pentium II Celeron (Mendocino) + case 0x07: // Pentium III (Katmai), Pentium III Xeon (Tanner) + case 0x08: // Pentium III (Coppermine), Pentium II Celeron (Coppermine-128), Pentium III Xeon (Cascades) + case 0x0A: // Pentium III Xeon (Cascades-2MB) + case 0x0B: // Pentium III (Tualatin), Pentium III Celeron (Tualatin-256) + return cpuinfo_uarch_p6; + case 0x09: // Pentium M (Banias), Pentium M Celeron (Banias-0, Banias-512) + case 0x0D: // Pentium M (Dothan), Pentium M Celeron (Dothan-512, Dothan-1024) + case 0x15: // Intel 80579 (Tolapai) + return cpuinfo_uarch_dothan; + case 0x0E: // Core Solo/Duo (Yonah), Pentium Dual-Core T2xxx (Yonah), Celeron M (Yonah-512, Yonah-1024), Dual-Core Xeon (Sossaman) + return cpuinfo_uarch_yonah; +#endif /* CPUINFO_ARCH_X86 */ + case 0x0F: // Core 2 Duo (Conroe, Conroe-2M, Merom), Core 2 Quad (Tigerton), Xeon (Woodcrest, Clovertown, Kentsfield) + case 0x16: // Celeron (Conroe-L, Merom-L), Core 2 Duo (Merom) + return cpuinfo_uarch_conroe; + case 0x17: // Core 2 Duo (Penryn-3M), Core 2 Quad (Yorkfield), Core 2 Extreme (Yorkfield), Xeon (Harpertown), Pentium Dual-Core (Penryn) + case 0x1D: // Xeon (Dunnington) + return cpuinfo_uarch_penryn; + case 0x1A: // Core iX (Bloomfield), Xeon (Gainestown) + case 0x1E: // Core iX (Lynnfield, Clarksfield) + case 0x1F: // Core iX (Havendale) + case 0x2E: // Xeon (Beckton) + case 0x25: // Core iX (Clarkdale) + case 0x2C: // Core iX (Gulftown), Xeon (Gulftown) + case 0x2F: // Xeon (Eagleton) + return cpuinfo_uarch_nehalem; + case 0x2A: // Core iX (Sandy Bridge) + case 0x2D: // Core iX (Sandy Bridge-E), Xeon (Sandy Bridge EP/EX) + return cpuinfo_uarch_sandy_bridge; + case 0x3A: // Core iX (Ivy Bridge) + case 0x3E: // Ivy Bridge-E + return cpuinfo_uarch_ivy_bridge; + case 0x3C: + case 0x3F: // Haswell-E + case 0x45: // Haswell ULT + case 0x46: // Haswell with eDRAM + return cpuinfo_uarch_haswell; + case 0x3D: // Broadwell-U + case 0x47: // Broadwell-H + case 0x4F: // Broadwell-E + case 0x56: // Broadwell-DE + return cpuinfo_uarch_broadwell; + case 0x4E: // Sky Lake Client Y/U + case 0x55: // Sky/Cascade/Cooper Lake Server + case 0x5E: // Sky Lake Client DT/H/S + case 0x8E: // Kaby/Whiskey/Amber/Comet Lake Y/U + case 0x9E: // Kaby/Coffee Lake DT/H/S + case 0xA5: // Comet Lake H/S + case 0xA6: // Comet Lake U/Y + return cpuinfo_uarch_sky_lake; + case 0x66: // Cannon Lake (Core i3-8121U) + return cpuinfo_uarch_palm_cove; + case 0x6A: // Ice Lake-DE + case 0x6C: // Ice Lake-SP + case 0x7D: // Ice Lake-Y + case 0x7E: // Ice Lake-U + return cpuinfo_uarch_sunny_cove; + + /* Low-power cores */ + case 0x1C: // Diamondville, Silverthorne, Pineview + case 0x26: // Tunnel Creek + return cpuinfo_uarch_bonnell; + case 0x27: // Medfield + case 0x35: // Cloverview + case 0x36: // Cedarview, Centerton + return cpuinfo_uarch_saltwell; + case 0x37: // Bay Trail + case 0x4A: // Merrifield + case 0x4D: // Avoton, Rangeley + case 0x5A: // Moorefield + case 0x5D: // SoFIA + return cpuinfo_uarch_silvermont; + case 0x4C: // Braswell, Cherry Trail + case 0x75: // Spreadtrum SC9853I-IA + return cpuinfo_uarch_airmont; + case 0x5C: // Apollo Lake + case 0x5F: // Denverton + return cpuinfo_uarch_goldmont; + case 0x7A: // Gemini Lake + return cpuinfo_uarch_goldmont_plus; + + /* Knights-series cores */ + case 0x57: + return cpuinfo_uarch_knights_landing; + case 0x85: + return cpuinfo_uarch_knights_mill; + } + break; + case 0x0F: + switch (model_info->model) { + case 0x00: // Pentium 4 Xeon (Foster) + case 0x01: // Pentium 4 Celeron (Willamette-128), Pentium 4 Xeon (Foster, Foster MP) + case 0x02: // Pentium 4 (Northwood), Pentium 4 EE (Gallatin), Pentium 4 Celeron (Northwood-128, Northwood-256), Pentium 4 Xeon (Gallatin DP, Prestonia) + return cpuinfo_uarch_willamette; + break; + case 0x03: // Pentium 4 (Prescott), Pentium 4 Xeon (Nocona) + case 0x04: // Pentium 4 (Prescott-2M), Pentium 4 EE (Prescott-2M), Pentium D (Smithfield), Celeron D (Prescott-256), Pentium 4 Xeon (Cranford, Irwindale, Paxville) + case 0x06: // Pentium 4 (Cedar Mill), Pentium D EE (Presler), Celeron D (Cedar Mill), Pentium 4 Xeon (Dempsey, Tulsa) + return cpuinfo_uarch_prescott; + } + break; + } + break; + case cpuinfo_vendor_amd: + switch (model_info->family) { +#if CPUINFO_ARCH_X86 + case 0x5: + switch (model_info->model) { + case 0x00: + case 0x01: + case 0x02: + return cpuinfo_uarch_k5; + case 0x06: + case 0x07: + case 0x08: + case 0x0D: + return cpuinfo_uarch_k6; + case 0x0A: + return cpuinfo_uarch_geode; + } + break; + case 0x6: + return cpuinfo_uarch_k7; +#endif /* CPUINFO_ARCH_X86 */ + case 0xF: // Opteron, Athlon 64, Sempron + case 0x11: // Turion + return cpuinfo_uarch_k8; + case 0x10: // Opteron, Phenom, Athlon, Sempron + case 0x12: // Llano APU + return cpuinfo_uarch_k10; + case 0x14: + return cpuinfo_uarch_bobcat; + case 0x15: + switch (model_info->model) { + case 0x00: // Engineering samples + case 0x01: // Zambezi, Interlagos + return cpuinfo_uarch_bulldozer; + case 0x02: // Vishera + case 0x10: // Trinity + case 0x13: // Richland + return cpuinfo_uarch_piledriver; + case 0x38: // Godavari + case 0x30: // Kaveri + return cpuinfo_uarch_steamroller; + case 0x60: // Carrizo + case 0x65: // Bristol Ridge + case 0x70: // Stoney Ridge + return cpuinfo_uarch_excavator; + default: + switch (model_info->extended_model) { + case 0x0: + return cpuinfo_uarch_bulldozer; + case 0x1: // No L3 cache + case 0x2: // With L3 cache + return cpuinfo_uarch_piledriver; + case 0x3: // With L3 cache + case 0x4: // No L3 cache + return cpuinfo_uarch_steamroller; + } + break; + } + break; + case 0x16: + if (model_info->model >= 0x03) { + return cpuinfo_uarch_puma; + } else { + return cpuinfo_uarch_jaguar; + } + case 0x17: + switch (model_info->model) { + case 0x01: // 14 nm Naples, Whitehaven, Summit Ridge, Snowy Owl + case 0x08: // 12 nm Pinnacle Ridge + case 0x11: // 14 nm Raven Ridge, Great Horned Owl + case 0x18: // 12 nm Picasso + return cpuinfo_uarch_zen; + case 0x31: // Rome, Castle Peak + case 0x60: // Renoir + case 0x68: // Lucienne + case 0x71: // Matisse + case 0x90: // Van Gogh + case 0x98: // Mero + return cpuinfo_uarch_zen2; + } + break; + case 0x19: + switch (model_info->model) { + case 0x01: // Genesis + case 0x21: // Vermeer + case 0x30: // Badami, Trento + case 0x40: // Rembrandt + case 0x50: // Cezanne + return cpuinfo_uarch_zen3; + } + break; + } + break; + case cpuinfo_vendor_hygon: + switch (model_info->family) { + case 0x00: + return cpuinfo_uarch_dhyana; + } + break; + default: + break; + } + return cpuinfo_uarch_unknown; +} diff --git a/source/3rdparty/cpuinfo/src/x86/vendor.c b/source/3rdparty/cpuinfo/src/x86/vendor.c new file mode 100644 index 0000000..bad50fa --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/vendor.c @@ -0,0 +1,189 @@ +#include + +#include +#include + + +/* Intel vendor string: "GenuineIntel" */ +#define Genu UINT32_C(0x756E6547) +#define ineI UINT32_C(0x49656E69) +#define ntel UINT32_C(0x6C65746E) + +/* AMD vendor strings: "AuthenticAMD", "AMDisbetter!", "AMD ISBETTER" */ +#define Auth UINT32_C(0x68747541) +#define enti UINT32_C(0x69746E65) +#define cAMD UINT32_C(0x444D4163) +#define AMDi UINT32_C(0x69444D41) +#define sbet UINT32_C(0x74656273) +#define ter UINT32_C(0x21726574) +#define AMD UINT32_C(0x20444D41) +#define ISBE UINT32_C(0x45425349) +#define TTER UINT32_C(0x52455454) + +/* VIA (Centaur) vendor strings: "CentaurHauls", "VIA VIA VIA " */ +#define Cent UINT32_C(0x746E6543) +#define aurH UINT32_C(0x48727561) +#define auls UINT32_C(0x736C7561) +#define VIA UINT32_C(0x20414956) + +/* Hygon vendor string: "HygonGenuine" */ +#define Hygo UINT32_C(0x6F677948) +#define nGen UINT32_C(0x6E65476E) +#define uine UINT32_C(0x656E6975) + +/* Transmeta vendor strings: "GenuineTMx86", "TransmetaCPU" */ +#define ineT UINT32_C(0x54656E69) +#define Mx86 UINT32_C(0x3638784D) +#define Tran UINT32_C(0x6E617254) +#define smet UINT32_C(0x74656D73) +#define aCPU UINT32_C(0x55504361) + +/* Cyrix vendor string: "CyrixInstead" */ +#define Cyri UINT32_C(0x69727943) +#define xIns UINT32_C(0x736E4978) +#define tead UINT32_C(0x64616574) + +/* Rise vendor string: "RiseRiseRise" */ +#define Rise UINT32_C(0x65736952) + +/* NSC vendor string: "Geode by NSC" */ +#define Geod UINT32_C(0x646F6547) +#define e_by UINT32_C(0x79622065) +#define NSC UINT32_C(0x43534E20) + +/* SiS vendor string: "SiS SiS SiS " */ +#define SiS UINT32_C(0x20536953) + +/* NexGen vendor string: "NexGenDriven" */ +#define NexG UINT32_C(0x4778654E) +#define enDr UINT32_C(0x72446E65) +#define iven UINT32_C(0x6E657669) + +/* UMC vendor string: "UMC UMC UMC " */ +#define UMC UINT32_C(0x20434D55) + +/* RDC vendor string: "Genuine RDC" */ +#define ine UINT32_C(0x20656E69) +#define RDC UINT32_C(0x43445220) + +/* D&MP vendor string: "Vortex86 SoC" */ +#define Vort UINT32_C(0x74726F56) +#define ex86 UINT32_C(0x36387865) +#define SoC UINT32_C(0x436F5320) + + +enum cpuinfo_vendor cpuinfo_x86_decode_vendor(uint32_t ebx, uint32_t ecx, uint32_t edx) { + switch (ebx) { + case Genu: + switch (edx) { + case ineI: + if (ecx == ntel) { + /* "GenuineIntel" */ + return cpuinfo_vendor_intel; + } + break; +#if CPUINFO_ARCH_X86 + case ineT: + if (ecx == Mx86) { + /* "GenuineTMx86" */ + return cpuinfo_vendor_transmeta; + } + break; + case ine: + if (ecx == RDC) { + /* "Genuine RDC" */ + return cpuinfo_vendor_rdc; + } + break; +#endif + } + break; + case Auth: + if (edx == enti && ecx == cAMD) { + /* "AuthenticAMD" */ + return cpuinfo_vendor_amd; + } + break; + case Cent: + if (edx == aurH && ecx == auls) { + /* "CentaurHauls" */ + return cpuinfo_vendor_via; + } + break; + case Hygo: + if (edx == nGen && ecx == uine) { + /* "HygonGenuine" */ + return cpuinfo_vendor_hygon; + } + break; +#if CPUINFO_ARCH_X86 + case AMDi: + if (edx == sbet && ecx == ter) { + /* "AMDisbetter!" */ + return cpuinfo_vendor_amd; + } + break; + case AMD: + if (edx == ISBE && ecx == TTER) { + /* "AMD ISBETTER" */ + return cpuinfo_vendor_amd; + } + break; + case VIA: + if (edx == VIA && ecx == VIA) { + /* "VIA VIA VIA " */ + return cpuinfo_vendor_via; + } + break; + case Tran: + if (edx == smet && ecx == aCPU) { + /* "TransmetaCPU" */ + return cpuinfo_vendor_transmeta; + } + break; + case Cyri: + if (edx == xIns && ecx == tead) { + /* "CyrixInstead" */ + return cpuinfo_vendor_cyrix; + } + break; + case Rise: + if (edx == Rise && ecx == Rise) { + /* "RiseRiseRise" */ + return cpuinfo_vendor_rise; + } + break; + case Geod: + if (edx == e_by && ecx == NSC) { + /* "Geode by NSC" */ + return cpuinfo_vendor_nsc; + } + break; + case SiS: + if (edx == SiS && ecx == SiS) { + /* "SiS SiS SiS " */ + return cpuinfo_vendor_sis; + } + break; + case NexG: + if (edx == enDr && ecx == iven) { + /* "NexGenDriven" */ + return cpuinfo_vendor_nexgen; + } + break; + case UMC: + if (edx == UMC && ecx == UMC) { + /* "UMC UMC UMC " */ + return cpuinfo_vendor_umc; + } + break; + case Vort: + if (edx == ex86 && ecx == SoC) { + /* "Vortex86 SoC" */ + return cpuinfo_vendor_dmp; + } + break; +#endif + } + return cpuinfo_vendor_unknown; +} diff --git a/source/3rdparty/cpuinfo/src/x86/windows/api.h b/source/3rdparty/cpuinfo/src/x86/windows/api.h new file mode 100644 index 0000000..33d917e --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/windows/api.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include + +#include +#include + +struct cpuinfo_arm_linux_processor { + /** + * Minimum processor ID on the package which includes this logical processor. + * This value can serve as an ID for the cluster of logical processors: it is the + * same for all logical processors on the same package. + */ + uint32_t package_leader_id; + /** + * Minimum processor ID on the core which includes this logical processor. + * This value can serve as an ID for the cluster of logical processors: it is the + * same for all logical processors on the same package. + */ + /** + * Number of logical processors in the package. + */ + uint32_t package_processor_count; + /** + * Maximum frequency, in kHZ. + * The value is parsed from /sys/devices/system/cpu/cpu/cpufreq/cpuinfo_max_freq + * If failed to read or parse the file, the value is 0. + */ + uint32_t max_frequency; + /** + * Minimum frequency, in kHZ. + * The value is parsed from /sys/devices/system/cpu/cpu/cpufreq/cpuinfo_min_freq + * If failed to read or parse the file, the value is 0. + */ + uint32_t min_frequency; + /** Linux processor ID */ + uint32_t system_processor_id; + uint32_t flags; +}; diff --git a/source/3rdparty/cpuinfo/src/x86/windows/init.c b/source/3rdparty/cpuinfo/src/x86/windows/init.c new file mode 100644 index 0000000..274075c --- /dev/null +++ b/source/3rdparty/cpuinfo/src/x86/windows/init.c @@ -0,0 +1,634 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#ifdef __GNUC__ + #define CPUINFO_ALLOCA __builtin_alloca +#else + #define CPUINFO_ALLOCA _alloca +#endif + + +static inline uint32_t bit_mask(uint32_t bits) { + return (UINT32_C(1) << bits) - UINT32_C(1); +} + +static inline uint32_t low_index_from_kaffinity(KAFFINITY kaffinity) { + #if defined(_M_X64) || defined(_M_AMD64) + unsigned long index; + _BitScanForward64(&index, (unsigned __int64) kaffinity); + return (uint32_t) index; + #elif defined(_M_IX86) + unsigned long index; + _BitScanForward(&index, (unsigned long) kaffinity); + return (uint32_t) index; + #else + #error Platform-specific implementation required + #endif +} + +static void cpuinfo_x86_count_caches( + uint32_t processors_count, + const struct cpuinfo_processor* processors, + const struct cpuinfo_x86_processor* x86_processor, + uint32_t* l1i_count_ptr, + uint32_t* l1d_count_ptr, + uint32_t* l2_count_ptr, + uint32_t* l3_count_ptr, + uint32_t* l4_count_ptr) +{ + uint32_t l1i_count = 0, l1d_count = 0, l2_count = 0, l3_count = 0, l4_count = 0; + uint32_t last_l1i_id = UINT32_MAX, last_l1d_id = UINT32_MAX; + uint32_t last_l2_id = UINT32_MAX, last_l3_id = UINT32_MAX, last_l4_id = UINT32_MAX; + for (uint32_t i = 0; i < processors_count; i++) { + const uint32_t apic_id = processors[i].apic_id; + cpuinfo_log_debug("APID ID %"PRIu32": logical processor %"PRIu32, apic_id, i); + + if (x86_processor->cache.l1i.size != 0) { + const uint32_t l1i_id = apic_id & ~bit_mask(x86_processor->cache.l1i.apic_bits); + if (l1i_id != last_l1i_id) { + last_l1i_id = l1i_id; + l1i_count++; + } + } + if (x86_processor->cache.l1d.size != 0) { + const uint32_t l1d_id = apic_id & ~bit_mask(x86_processor->cache.l1d.apic_bits); + if (l1d_id != last_l1d_id) { + last_l1d_id = l1d_id; + l1d_count++; + } + } + if (x86_processor->cache.l2.size != 0) { + const uint32_t l2_id = apic_id & ~bit_mask(x86_processor->cache.l2.apic_bits); + if (l2_id != last_l2_id) { + last_l2_id = l2_id; + l2_count++; + } + } + if (x86_processor->cache.l3.size != 0) { + const uint32_t l3_id = apic_id & ~bit_mask(x86_processor->cache.l3.apic_bits); + if (l3_id != last_l3_id) { + last_l3_id = l3_id; + l3_count++; + } + } + if (x86_processor->cache.l4.size != 0) { + const uint32_t l4_id = apic_id & ~bit_mask(x86_processor->cache.l4.apic_bits); + if (l4_id != last_l4_id) { + last_l4_id = l4_id; + l4_count++; + } + } + } + *l1i_count_ptr = l1i_count; + *l1d_count_ptr = l1d_count; + *l2_count_ptr = l2_count; + *l3_count_ptr = l3_count; + *l4_count_ptr = l4_count; +} + +static bool cpuinfo_x86_windows_is_wine(void) { + HMODULE ntdll = GetModuleHandleW(L"ntdll.dll"); + if (ntdll == NULL) { + return false; + } + + return GetProcAddress(ntdll, "wine_get_version") != NULL; +} + +BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PVOID* context) { + struct cpuinfo_processor* processors = NULL; + struct cpuinfo_core* cores = NULL; + struct cpuinfo_cluster* clusters = NULL; + struct cpuinfo_package* packages = NULL; + struct cpuinfo_cache* l1i = NULL; + struct cpuinfo_cache* l1d = NULL; + struct cpuinfo_cache* l2 = NULL; + struct cpuinfo_cache* l3 = NULL; + struct cpuinfo_cache* l4 = NULL; + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX processor_infos = NULL; + + HANDLE heap = GetProcessHeap(); + const bool is_wine = cpuinfo_x86_windows_is_wine(); + + struct cpuinfo_x86_processor x86_processor; + ZeroMemory(&x86_processor, sizeof(x86_processor)); + cpuinfo_x86_init_processor(&x86_processor); + char brand_string[48]; + cpuinfo_x86_normalize_brand_string(x86_processor.brand_string, brand_string); + + const uint32_t thread_bits_mask = bit_mask(x86_processor.topology.thread_bits_length); + const uint32_t core_bits_mask = bit_mask(x86_processor.topology.core_bits_length); + const uint32_t package_bits_offset = max( + x86_processor.topology.thread_bits_offset + x86_processor.topology.thread_bits_length, + x86_processor.topology.core_bits_offset + x86_processor.topology.core_bits_length); + + /* WINE doesn't implement GetMaximumProcessorGroupCount and aborts when calling it */ + const uint32_t max_group_count = is_wine ? 1 : (uint32_t) GetMaximumProcessorGroupCount(); + cpuinfo_log_debug("detected %"PRIu32" processor groups", max_group_count); + + uint32_t processors_count = 0; + uint32_t* processors_per_group = (uint32_t*) CPUINFO_ALLOCA(max_group_count * sizeof(uint32_t)); + for (uint32_t i = 0; i < max_group_count; i++) { + processors_per_group[i] = GetMaximumProcessorCount((WORD) i); + cpuinfo_log_debug("detected %"PRIu32" processors in group %"PRIu32, + processors_per_group[i], i); + processors_count += processors_per_group[i]; + } + + uint32_t* processors_before_group = (uint32_t*) CPUINFO_ALLOCA(max_group_count * sizeof(uint32_t)); + for (uint32_t i = 0, count = 0; i < max_group_count; i++) { + processors_before_group[i] = count; + cpuinfo_log_debug("detected %"PRIu32" processors before group %"PRIu32, + processors_before_group[i], i); + count += processors_per_group[i]; + } + + processors = HeapAlloc(heap, HEAP_ZERO_MEMORY, processors_count * sizeof(struct cpuinfo_processor)); + if (processors == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors", + processors_count * sizeof(struct cpuinfo_processor), processors_count); + goto cleanup; + } + + DWORD cores_info_size = 0; + if (GetLogicalProcessorInformationEx(RelationProcessorCore, NULL, &cores_info_size) == FALSE) { + const DWORD last_error = GetLastError(); + if (last_error != ERROR_INSUFFICIENT_BUFFER) { + cpuinfo_log_error("failed to query size of processor cores information: error %"PRIu32, + (uint32_t) last_error); + goto cleanup; + } + } + + DWORD packages_info_size = 0; + if (GetLogicalProcessorInformationEx(RelationProcessorPackage, NULL, &packages_info_size) == FALSE) { + const DWORD last_error = GetLastError(); + if (last_error != ERROR_INSUFFICIENT_BUFFER) { + cpuinfo_log_error("failed to query size of processor packages information: error %"PRIu32, + (uint32_t) last_error); + goto cleanup; + } + } + + DWORD max_info_size = max(cores_info_size, packages_info_size); + + processor_infos = HeapAlloc(heap, 0, max_info_size); + if (processor_infos == NULL) { + cpuinfo_log_error("failed to allocate %"PRIu32" bytes for logical processor information", + (uint32_t) max_info_size); + goto cleanup; + } + + if (GetLogicalProcessorInformationEx(RelationProcessorPackage, processor_infos, &max_info_size) == FALSE) { + cpuinfo_log_error("failed to query processor packages information: error %"PRIu32, + (uint32_t) GetLastError()); + goto cleanup; + } + + uint32_t packages_count = 0; + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX packages_info_end = + (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX) ((uintptr_t) processor_infos + packages_info_size); + for (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX package_info = processor_infos; + package_info < packages_info_end; + package_info = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX) ((uintptr_t) package_info + package_info->Size)) + { + if (package_info->Relationship != RelationProcessorPackage) { + cpuinfo_log_warning("unexpected processor info type (%"PRIu32") for processor package information", + (uint32_t) package_info->Relationship); + continue; + } + + /* We assume that packages are reported in APIC order */ + const uint32_t package_id = packages_count++; + /* Reconstruct package part of APIC ID */ + const uint32_t package_apic_id = package_id << package_bits_offset; + /* Iterate processor groups and set the package part of APIC ID */ + for (uint32_t i = 0; i < package_info->Processor.GroupCount; i++) { + const uint32_t group_id = package_info->Processor.GroupMask[i].Group; + /* Global index of the first logical processor belonging to this group */ + const uint32_t group_processors_start = processors_before_group[group_id]; + /* Bitmask representing processors in this group belonging to this package */ + KAFFINITY group_processors_mask = package_info->Processor.GroupMask[i].Mask; + while (group_processors_mask != 0) { + const uint32_t group_processor_id = low_index_from_kaffinity(group_processors_mask); + const uint32_t processor_id = group_processors_start + group_processor_id; + processors[processor_id].package = (const struct cpuinfo_package*) NULL + package_id; + processors[processor_id].windows_group_id = (uint16_t) group_id; + processors[processor_id].windows_processor_id = (uint16_t) group_processor_id; + processors[processor_id].apic_id = package_apic_id; + + /* Reset the lowest bit in affinity mask */ + group_processors_mask &= (group_processors_mask - 1); + } + } + } + + max_info_size = max(cores_info_size, packages_info_size); + if (GetLogicalProcessorInformationEx(RelationProcessorCore, processor_infos, &max_info_size) == FALSE) { + cpuinfo_log_error("failed to query processor cores information: error %"PRIu32, + (uint32_t) GetLastError()); + goto cleanup; + } + + uint32_t cores_count = 0; + /* Index (among all cores) of the the first core on the current package */ + uint32_t package_core_start = 0; + uint32_t current_package_apic_id = 0; + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX cores_info_end = + (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX) ((uintptr_t) processor_infos + cores_info_size); + for (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX core_info = processor_infos; + core_info < cores_info_end; + core_info = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX) ((uintptr_t) core_info + core_info->Size)) + { + if (core_info->Relationship != RelationProcessorCore) { + cpuinfo_log_warning("unexpected processor info type (%"PRIu32") for processor core information", + (uint32_t) core_info->Relationship); + continue; + } + + /* We assume that cores and logical processors are reported in APIC order */ + const uint32_t core_id = cores_count++; + uint32_t smt_id = 0; + /* Reconstruct core part of APIC ID */ + const uint32_t core_apic_id = (core_id & core_bits_mask) << x86_processor.topology.core_bits_offset; + /* Iterate processor groups and set the core & SMT parts of APIC ID */ + for (uint32_t i = 0; i < core_info->Processor.GroupCount; i++) { + const uint32_t group_id = core_info->Processor.GroupMask[i].Group; + /* Global index of the first logical processor belonging to this group */ + const uint32_t group_processors_start = processors_before_group[group_id]; + /* Bitmask representing processors in this group belonging to this package */ + KAFFINITY group_processors_mask = core_info->Processor.GroupMask[i].Mask; + while (group_processors_mask != 0) { + const uint32_t group_processor_id = low_index_from_kaffinity(group_processors_mask); + const uint32_t processor_id = group_processors_start + group_processor_id; + + /* Check if this is the first core on a new package */ + if (processors[processor_id].apic_id != current_package_apic_id) { + package_core_start = core_id; + current_package_apic_id = processors[processor_id].apic_id; + } + /* Core ID w.r.t package */ + const uint32_t package_core_id = core_id - package_core_start; + + /* Update APIC ID with core and SMT parts */ + processors[processor_id].apic_id |= + ((smt_id & thread_bits_mask) << x86_processor.topology.thread_bits_offset) | + ((package_core_id & core_bits_mask) << x86_processor.topology.core_bits_offset); + cpuinfo_log_debug("reconstructed APIC ID 0x%08"PRIx32" for processor %"PRIu32" in group %"PRIu32, + processors[processor_id].apic_id, group_processor_id, group_id); + + /* Set SMT ID (assume logical processors within the core are reported in APIC order) */ + processors[processor_id].smt_id = smt_id++; + processors[processor_id].core = (const struct cpuinfo_core*) NULL + core_id; + + /* Reset the lowest bit in affinity mask */ + group_processors_mask &= (group_processors_mask - 1); + } + } + } + + cores = HeapAlloc(heap, HEAP_ZERO_MEMORY, cores_count * sizeof(struct cpuinfo_core)); + if (cores == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores", + cores_count * sizeof(struct cpuinfo_core), cores_count); + goto cleanup; + } + + clusters = HeapAlloc(heap, HEAP_ZERO_MEMORY, packages_count * sizeof(struct cpuinfo_cluster)); + if (clusters == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters", + packages_count * sizeof(struct cpuinfo_cluster), packages_count); + goto cleanup; + } + + packages = HeapAlloc(heap, HEAP_ZERO_MEMORY, packages_count * sizeof(struct cpuinfo_package)); + if (packages == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" physical packages", + packages_count * sizeof(struct cpuinfo_package), packages_count); + goto cleanup; + } + + for (uint32_t i = processors_count; i != 0; i--) { + const uint32_t processor_id = i - 1; + struct cpuinfo_processor* processor = processors + processor_id; + + /* Adjust core and package pointers for all logical processors */ + struct cpuinfo_core* core = + (struct cpuinfo_core*) ((uintptr_t) cores + (uintptr_t) processor->core); + processor->core = core; + struct cpuinfo_cluster* cluster = + (struct cpuinfo_cluster*) ((uintptr_t) clusters + (uintptr_t) processor->cluster); + processor->cluster = cluster; + struct cpuinfo_package* package = + (struct cpuinfo_package*) ((uintptr_t) packages + (uintptr_t) processor->package); + processor->package = package; + + /* This can be overwritten by lower-index processors on the same package */ + package->processor_start = processor_id; + package->processor_count += 1; + + /* This can be overwritten by lower-index processors on the same cluster */ + cluster->processor_start = processor_id; + cluster->processor_count += 1; + + /* This can be overwritten by lower-index processors on the same core*/ + core->processor_start = processor_id; + core->processor_count += 1; + } + + /* Set vendor/uarch/CPUID information for cores */ + for (uint32_t i = cores_count; i != 0; i--) { + const uint32_t global_core_id = i - 1; + struct cpuinfo_core* core = cores + global_core_id; + const struct cpuinfo_processor* processor = processors + core->processor_start; + struct cpuinfo_package* package = (struct cpuinfo_package*) processor->package; + struct cpuinfo_cluster* cluster = (struct cpuinfo_cluster*) processor->cluster; + + core->cluster = cluster; + core->package = package; + core->core_id = core_bits_mask & + (processor->apic_id >> x86_processor.topology.core_bits_offset); + core->vendor = x86_processor.vendor; + core->uarch = x86_processor.uarch; + core->cpuid = x86_processor.cpuid; + + /* This can be overwritten by lower-index cores on the same cluster/package */ + cluster->core_start = global_core_id; + cluster->core_count += 1; + package->core_start = global_core_id; + package->core_count += 1; + } + + for (uint32_t i = 0; i < packages_count; i++) { + struct cpuinfo_package* package = packages + i; + struct cpuinfo_cluster* cluster = clusters + i; + + cluster->package = package; + cluster->vendor = cores[cluster->core_start].vendor; + cluster->uarch = cores[cluster->core_start].uarch; + cluster->cpuid = cores[cluster->core_start].cpuid; + package->cluster_start = i; + package->cluster_count = 1; + cpuinfo_x86_format_package_name(x86_processor.vendor, brand_string, package->name); + } + + /* Count caches */ + uint32_t l1i_count, l1d_count, l2_count, l3_count, l4_count; + cpuinfo_x86_count_caches(processors_count, processors, &x86_processor, + &l1i_count, &l1d_count, &l2_count, &l3_count, &l4_count); + + /* Allocate cache descriptions */ + if (l1i_count != 0) { + l1i = HeapAlloc(heap, HEAP_ZERO_MEMORY, l1i_count * sizeof(struct cpuinfo_cache)); + if (l1i == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches", + l1i_count * sizeof(struct cpuinfo_cache), l1i_count); + goto cleanup; + } + } + if (l1d_count != 0) { + l1d = HeapAlloc(heap, HEAP_ZERO_MEMORY, l1d_count * sizeof(struct cpuinfo_cache)); + if (l1d == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches", + l1d_count * sizeof(struct cpuinfo_cache), l1d_count); + goto cleanup; + } + } + if (l2_count != 0) { + l2 = HeapAlloc(heap, HEAP_ZERO_MEMORY, l2_count * sizeof(struct cpuinfo_cache)); + if (l2 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches", + l2_count * sizeof(struct cpuinfo_cache), l2_count); + goto cleanup; + } + } + if (l3_count != 0) { + l3 = HeapAlloc(heap, HEAP_ZERO_MEMORY, l3_count * sizeof(struct cpuinfo_cache)); + if (l3 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches", + l3_count * sizeof(struct cpuinfo_cache), l3_count); + goto cleanup; + } + } + if (l4_count != 0) { + l4 = HeapAlloc(heap, HEAP_ZERO_MEMORY, l4_count * sizeof(struct cpuinfo_cache)); + if (l4 == NULL) { + cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L4 caches", + l4_count * sizeof(struct cpuinfo_cache), l4_count); + goto cleanup; + } + } + + /* Set cache information */ + uint32_t l1i_index = UINT32_MAX, l1d_index = UINT32_MAX, l2_index = UINT32_MAX, l3_index = UINT32_MAX, l4_index = UINT32_MAX; + uint32_t last_l1i_id = UINT32_MAX, last_l1d_id = UINT32_MAX; + uint32_t last_l2_id = UINT32_MAX, last_l3_id = UINT32_MAX, last_l4_id = UINT32_MAX; + for (uint32_t i = 0; i < processors_count; i++) { + const uint32_t apic_id = processors[i].apic_id; + + if (x86_processor.cache.l1i.size != 0) { + const uint32_t l1i_id = apic_id & ~bit_mask(x86_processor.cache.l1i.apic_bits); + processors[i].cache.l1i = &l1i[l1i_index]; + if (l1i_id != last_l1i_id) { + /* new cache */ + last_l1i_id = l1i_id; + l1i[++l1i_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l1i.size, + .associativity = x86_processor.cache.l1i.associativity, + .sets = x86_processor.cache.l1i.sets, + .partitions = x86_processor.cache.l1i.partitions, + .line_size = x86_processor.cache.l1i.line_size, + .flags = x86_processor.cache.l1i.flags, + .processor_start = i, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l1i[l1i_index].processor_count += 1; + } + processors[i].cache.l1i = &l1i[l1i_index]; + } else { + /* reset cache id */ + last_l1i_id = UINT32_MAX; + } + if (x86_processor.cache.l1d.size != 0) { + const uint32_t l1d_id = apic_id & ~bit_mask(x86_processor.cache.l1d.apic_bits); + processors[i].cache.l1d = &l1d[l1d_index]; + if (l1d_id != last_l1d_id) { + /* new cache */ + last_l1d_id = l1d_id; + l1d[++l1d_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l1d.size, + .associativity = x86_processor.cache.l1d.associativity, + .sets = x86_processor.cache.l1d.sets, + .partitions = x86_processor.cache.l1d.partitions, + .line_size = x86_processor.cache.l1d.line_size, + .flags = x86_processor.cache.l1d.flags, + .processor_start = i, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l1d[l1d_index].processor_count += 1; + } + processors[i].cache.l1d = &l1d[l1d_index]; + } else { + /* reset cache id */ + last_l1d_id = UINT32_MAX; + } + if (x86_processor.cache.l2.size != 0) { + const uint32_t l2_id = apic_id & ~bit_mask(x86_processor.cache.l2.apic_bits); + processors[i].cache.l2 = &l2[l2_index]; + if (l2_id != last_l2_id) { + /* new cache */ + last_l2_id = l2_id; + l2[++l2_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l2.size, + .associativity = x86_processor.cache.l2.associativity, + .sets = x86_processor.cache.l2.sets, + .partitions = x86_processor.cache.l2.partitions, + .line_size = x86_processor.cache.l2.line_size, + .flags = x86_processor.cache.l2.flags, + .processor_start = i, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l2[l2_index].processor_count += 1; + } + processors[i].cache.l2 = &l2[l2_index]; + } else { + /* reset cache id */ + last_l2_id = UINT32_MAX; + } + if (x86_processor.cache.l3.size != 0) { + const uint32_t l3_id = apic_id & ~bit_mask(x86_processor.cache.l3.apic_bits); + processors[i].cache.l3 = &l3[l3_index]; + if (l3_id != last_l3_id) { + /* new cache */ + last_l3_id = l3_id; + l3[++l3_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l3.size, + .associativity = x86_processor.cache.l3.associativity, + .sets = x86_processor.cache.l3.sets, + .partitions = x86_processor.cache.l3.partitions, + .line_size = x86_processor.cache.l3.line_size, + .flags = x86_processor.cache.l3.flags, + .processor_start = i, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l3[l3_index].processor_count += 1; + } + processors[i].cache.l3 = &l3[l3_index]; + } else { + /* reset cache id */ + last_l3_id = UINT32_MAX; + } + if (x86_processor.cache.l4.size != 0) { + const uint32_t l4_id = apic_id & ~bit_mask(x86_processor.cache.l4.apic_bits); + processors[i].cache.l4 = &l4[l4_index]; + if (l4_id != last_l4_id) { + /* new cache */ + last_l4_id = l4_id; + l4[++l4_index] = (struct cpuinfo_cache) { + .size = x86_processor.cache.l4.size, + .associativity = x86_processor.cache.l4.associativity, + .sets = x86_processor.cache.l4.sets, + .partitions = x86_processor.cache.l4.partitions, + .line_size = x86_processor.cache.l4.line_size, + .flags = x86_processor.cache.l4.flags, + .processor_start = i, + .processor_count = 1, + }; + } else { + /* another processor sharing the same cache */ + l4[l4_index].processor_count += 1; + } + processors[i].cache.l4 = &l4[l4_index]; + } else { + /* reset cache id */ + last_l4_id = UINT32_MAX; + } + } + + + /* Commit changes */ + cpuinfo_processors = processors; + cpuinfo_cores = cores; + cpuinfo_clusters = clusters; + cpuinfo_packages = packages; + cpuinfo_cache[cpuinfo_cache_level_1i] = l1i; + cpuinfo_cache[cpuinfo_cache_level_1d] = l1d; + cpuinfo_cache[cpuinfo_cache_level_2] = l2; + cpuinfo_cache[cpuinfo_cache_level_3] = l3; + cpuinfo_cache[cpuinfo_cache_level_4] = l4; + + cpuinfo_processors_count = processors_count; + cpuinfo_cores_count = cores_count; + cpuinfo_clusters_count = packages_count; + cpuinfo_packages_count = packages_count; + cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1i_count; + cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1d_count; + cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count; + cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count; + cpuinfo_cache_count[cpuinfo_cache_level_4] = l4_count; + cpuinfo_max_cache_size = cpuinfo_compute_max_cache_size(&processors[0]); + + cpuinfo_global_uarch = (struct cpuinfo_uarch_info) { + .uarch = x86_processor.uarch, + .cpuid = x86_processor.cpuid, + .processor_count = processors_count, + .core_count = cores_count, + }; + + MemoryBarrier(); + + cpuinfo_is_initialized = true; + + processors = NULL; + cores = NULL; + clusters = NULL; + packages = NULL; + l1i = l1d = l2 = l3 = l4 = NULL; + +cleanup: + if (processors != NULL) { + HeapFree(heap, 0, processors); + } + if (cores != NULL) { + HeapFree(heap, 0, cores); + } + if (clusters != NULL) { + HeapFree(heap, 0, clusters); + } + if (packages != NULL) { + HeapFree(heap, 0, packages); + } + if (l1i != NULL) { + HeapFree(heap, 0, l1i); + } + if (l1d != NULL) { + HeapFree(heap, 0, l1d); + } + if (l2 != NULL) { + HeapFree(heap, 0, l2); + } + if (l3 != NULL) { + HeapFree(heap, 0, l3); + } + if (l4 != NULL) { + HeapFree(heap, 0, l4); + } + return TRUE; +} diff --git a/source/3rdparty/gtest/LICENSE b/source/3rdparty/gtest/LICENSE new file mode 100644 index 0000000..1941a11 --- /dev/null +++ b/source/3rdparty/gtest/LICENSE @@ -0,0 +1,28 @@ +Copyright 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/3rdparty/gtest/include/gtest/gtest-death-test.h b/source/3rdparty/gtest/include/gtest/gtest-death-test.h new file mode 100644 index 0000000..dc878ff --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest-death-test.h @@ -0,0 +1,343 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This header file defines the public API for death tests. It is +// #included by gtest.h so a user doesn't need to include this +// directly. +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ + +#include "gtest/internal/gtest-death-test-internal.h" + +namespace testing { + +// This flag controls the style of death tests. Valid values are "threadsafe", +// meaning that the death test child process will re-execute the test binary +// from the start, running only a single death test, or "fast", +// meaning that the child process will execute the test logic immediately +// after forking. +GTEST_DECLARE_string_(death_test_style); + +#if GTEST_HAS_DEATH_TEST + +namespace internal { + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +GTEST_API_ bool InDeathTestChild(); + +} // namespace internal + +// The following macros are useful for writing death tests. + +// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is +// executed: +// +// 1. It generates a warning if there is more than one active +// thread. This is because it's safe to fork() or clone() only +// when there is a single thread. +// +// 2. The parent process clone()s a sub-process and runs the death +// test in it; the sub-process exits with code 0 at the end of the +// death test, if it hasn't exited already. +// +// 3. The parent process waits for the sub-process to terminate. +// +// 4. The parent process checks the exit code and error message of +// the sub-process. +// +// Examples: +// +// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number"); +// for (int i = 0; i < 5; i++) { +// EXPECT_DEATH(server.ProcessRequest(i), +// "Invalid request .* in ProcessRequest()") +// << "Failed to die on request " << i; +// } +// +// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting"); +// +// bool KilledBySIGHUP(int exit_code) { +// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP; +// } +// +// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!"); +// +// On the regular expressions used in death tests: +// +// GOOGLETEST_CM0005 DO NOT DELETE +// On POSIX-compliant systems (*nix), we use the library, +// which uses the POSIX extended regex syntax. +// +// On other platforms (e.g. Windows or Mac), we only support a simple regex +// syntax implemented as part of Google Test. This limited +// implementation should be enough most of the time when writing +// death tests; though it lacks many features you can find in PCRE +// or POSIX extended regex syntax. For example, we don't support +// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and +// repetition count ("x{5,7}"), among others. +// +// Below is the syntax that we do support. We chose it to be a +// subset of both PCRE and POSIX extended regex, so it's easy to +// learn wherever you come from. In the following: 'A' denotes a +// literal character, period (.), or a single \\ escape sequence; +// 'x' and 'y' denote regular expressions; 'm' and 'n' are for +// natural numbers. +// +// c matches any literal character c +// \\d matches any decimal digit +// \\D matches any character that's not a decimal digit +// \\f matches \f +// \\n matches \n +// \\r matches \r +// \\s matches any ASCII whitespace, including \n +// \\S matches any character that's not a whitespace +// \\t matches \t +// \\v matches \v +// \\w matches any letter, _, or decimal digit +// \\W matches any character that \\w doesn't match +// \\c matches any literal character c, which must be a punctuation +// . matches any single character except \n +// A? matches 0 or 1 occurrences of A +// A* matches 0 or many occurrences of A +// A+ matches 1 or many occurrences of A +// ^ matches the beginning of a string (not that of each line) +// $ matches the end of a string (not that of each line) +// xy matches x followed by y +// +// If you accidentally use PCRE or POSIX extended regex features +// not implemented by us, you will get a run-time failure. In that +// case, please try to rewrite your regular expression within the +// above syntax. +// +// This implementation is *not* meant to be as highly tuned or robust +// as a compiled regex library, but should perform well enough for a +// death test, which already incurs significant overhead by launching +// a child process. +// +// Known caveats: +// +// A "threadsafe" style death test obtains the path to the test +// program from argv[0] and re-executes it in the sub-process. For +// simplicity, the current implementation doesn't search the PATH +// when launching the sub-process. This means that the user must +// invoke the test program via a path that contains at least one +// path separator (e.g. path/to/foo_test and +// /absolute/path/to/bar_test are fine, but foo_test is not). This +// is rarely a problem as people usually don't put the test binary +// directory in PATH. +// + +// Asserts that a given statement causes the program to exit, with an +// integer exit status that satisfies predicate, and emitting error output +// that matches regex. +# define ASSERT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_) + +// Like ASSERT_EXIT, but continues on to successive tests in the +// test suite, if any: +# define EXPECT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_) + +// Asserts that a given statement causes the program to exit, either by +// explicitly exiting with a nonzero exit code or being killed by a +// signal, and emitting error output that matches regex. +# define ASSERT_DEATH(statement, regex) \ + ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Like ASSERT_DEATH, but continues on to successive tests in the +// test suite, if any: +# define EXPECT_DEATH(statement, regex) \ + EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: + +// Tests that an exit code describes a normal exit with a given exit code. +class GTEST_API_ ExitedWithCode { + public: + explicit ExitedWithCode(int exit_code); + bool operator()(int exit_status) const; + private: + // No implementation - assignment is unsupported. + void operator=(const ExitedWithCode& other); + + const int exit_code_; +}; + +# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +// Tests that an exit code describes an exit due to termination by a +// given signal. +// GOOGLETEST_CM0006 DO NOT DELETE +class GTEST_API_ KilledBySignal { + public: + explicit KilledBySignal(int signum); + bool operator()(int exit_status) const; + private: + const int signum_; +}; +# endif // !GTEST_OS_WINDOWS + +// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. +// The death testing framework causes this to have interesting semantics, +// since the sideeffects of the call are only visible in opt mode, and not +// in debug mode. +// +// In practice, this can be used to test functions that utilize the +// LOG(DFATAL) macro using the following style: +// +// int DieInDebugOr12(int* sideeffect) { +// if (sideeffect) { +// *sideeffect = 12; +// } +// LOG(DFATAL) << "death"; +// return 12; +// } +// +// TEST(TestSuite, TestDieOr12WorksInDgbAndOpt) { +// int sideeffect = 0; +// // Only asserts in dbg. +// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death"); +// +// #ifdef NDEBUG +// // opt-mode has sideeffect visible. +// EXPECT_EQ(12, sideeffect); +// #else +// // dbg-mode no visible sideeffect. +// EXPECT_EQ(0, sideeffect); +// #endif +// } +// +// This will assert that DieInDebugReturn12InOpt() crashes in debug +// mode, usually due to a DCHECK or LOG(DFATAL), but returns the +// appropriate fallback value (12 in this case) in opt mode. If you +// need to test that a function has appropriate side-effects in opt +// mode, include assertions against the side-effects. A general +// pattern for this is: +// +// EXPECT_DEBUG_DEATH({ +// // Side-effects here will have an effect after this statement in +// // opt mode, but none in debug mode. +// EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); +// }, "death"); +// +# ifdef NDEBUG + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + GTEST_EXECUTE_STATEMENT_(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + GTEST_EXECUTE_STATEMENT_(statement, regex) + +# else + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + EXPECT_DEATH(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + ASSERT_DEATH(statement, regex) + +# endif // NDEBUG for EXPECT_DEBUG_DEATH +#endif // GTEST_HAS_DEATH_TEST + +// This macro is used for implementing macros such as +// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where +// death tests are not supported. Those macros must compile on such systems +// if and only if EXPECT_DEATH and ASSERT_DEATH compile with the same parameters +// on systems that support death tests. This allows one to write such a macro on +// a system that does not support death tests and be sure that it will compile +// on a death-test supporting system. It is exposed publicly so that systems +// that have death-tests with stricter requirements than GTEST_HAS_DEATH_TEST +// can write their own equivalent of EXPECT_DEATH_IF_SUPPORTED and +// ASSERT_DEATH_IF_SUPPORTED. +// +// Parameters: +// statement - A statement that a macro such as EXPECT_DEATH would test +// for program termination. This macro has to make sure this +// statement is compiled but not executed, to ensure that +// EXPECT_DEATH_IF_SUPPORTED compiles with a certain +// parameter if and only if EXPECT_DEATH compiles with it. +// regex - A regex that a macro such as EXPECT_DEATH would use to test +// the output of statement. This parameter has to be +// compiled but not evaluated by this macro, to ensure that +// this macro only accepts expressions that a macro such as +// EXPECT_DEATH would accept. +// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED +// and a return statement for ASSERT_DEATH_IF_SUPPORTED. +// This ensures that ASSERT_DEATH_IF_SUPPORTED will not +// compile inside functions where ASSERT_DEATH doesn't +// compile. +// +// The branch that has an always false condition is used to ensure that +// statement and regex are compiled (and thus syntactically correct) but +// never executed. The unreachable code macro protects the terminator +// statement from generating an 'unreachable code' warning in case +// statement unconditionally returns or throws. The Message constructor at +// the end allows the syntax of streaming additional messages into the +// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. +# define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_LOG_(WARNING) \ + << "Death tests are not supported on this platform.\n" \ + << "Statement '" #statement "' cannot be verified."; \ + } else if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::RE::PartialMatch(".*", (regex)); \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + terminator; \ + } else \ + ::testing::Message() + +// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and +// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if +// death tests are supported; otherwise they just issue a warning. This is +// useful when you are combining death test assertions with normal test +// assertions in one test. +#if GTEST_HAS_DEATH_TEST +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + EXPECT_DEATH(statement, regex) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + ASSERT_DEATH(statement, regex) +#else +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, ) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return) +#endif + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest-matchers.h b/source/3rdparty/gtest/include/gtest/gtest-matchers.h new file mode 100644 index 0000000..d9b28e0 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest-matchers.h @@ -0,0 +1,750 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This file implements just enough of the matcher interface to allow +// EXPECT_DEATH and friends to accept a matcher argument. + +// IWYU pragma: private, include "testing/base/public/gunit.h" +// IWYU pragma: friend third_party/googletest/googlemock/.* +// IWYU pragma: friend third_party/googletest/googletest/.* + +#ifndef GTEST_INCLUDE_GTEST_GTEST_MATCHERS_H_ +#define GTEST_INCLUDE_GTEST_GTEST_MATCHERS_H_ + +#include +#include +#include +#include + +#include "gtest/gtest-printers.h" +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" + +// MSVC warning C5046 is new as of VS2017 version 15.8. +#if defined(_MSC_VER) && _MSC_VER >= 1915 +#define GTEST_MAYBE_5046_ 5046 +#else +#define GTEST_MAYBE_5046_ +#endif + +GTEST_DISABLE_MSC_WARNINGS_PUSH_( + 4251 GTEST_MAYBE_5046_ /* class A needs to have dll-interface to be used by + clients of class B */ + /* Symbol involving type with internal linkage not defined */) + +namespace testing { + +// To implement a matcher Foo for type T, define: +// 1. a class FooMatcherImpl that implements the +// MatcherInterface interface, and +// 2. a factory function that creates a Matcher object from a +// FooMatcherImpl*. +// +// The two-level delegation design makes it possible to allow a user +// to write "v" instead of "Eq(v)" where a Matcher is expected, which +// is impossible if we pass matchers by pointers. It also eases +// ownership management as Matcher objects can now be copied like +// plain values. + +// MatchResultListener is an abstract class. Its << operator can be +// used by a matcher to explain why a value matches or doesn't match. +// +class MatchResultListener { + public: + // Creates a listener object with the given underlying ostream. The + // listener does not own the ostream, and does not dereference it + // in the constructor or destructor. + explicit MatchResultListener(::std::ostream* os) : stream_(os) {} + virtual ~MatchResultListener() = 0; // Makes this class abstract. + + // Streams x to the underlying ostream; does nothing if the ostream + // is NULL. + template + MatchResultListener& operator<<(const T& x) { + if (stream_ != nullptr) *stream_ << x; + return *this; + } + + // Returns the underlying ostream. + ::std::ostream* stream() { return stream_; } + + // Returns true if and only if the listener is interested in an explanation + // of the match result. A matcher's MatchAndExplain() method can use + // this information to avoid generating the explanation when no one + // intends to hear it. + bool IsInterested() const { return stream_ != nullptr; } + + private: + ::std::ostream* const stream_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener); +}; + +inline MatchResultListener::~MatchResultListener() { +} + +// An instance of a subclass of this knows how to describe itself as a +// matcher. +class MatcherDescriberInterface { + public: + virtual ~MatcherDescriberInterface() {} + + // Describes this matcher to an ostream. The function should print + // a verb phrase that describes the property a value matching this + // matcher should have. The subject of the verb phrase is the value + // being matched. For example, the DescribeTo() method of the Gt(7) + // matcher prints "is greater than 7". + virtual void DescribeTo(::std::ostream* os) const = 0; + + // Describes the negation of this matcher to an ostream. For + // example, if the description of this matcher is "is greater than + // 7", the negated description could be "is not greater than 7". + // You are not required to override this when implementing + // MatcherInterface, but it is highly advised so that your matcher + // can produce good error messages. + virtual void DescribeNegationTo(::std::ostream* os) const { + *os << "not ("; + DescribeTo(os); + *os << ")"; + } +}; + +// The implementation of a matcher. +template +class MatcherInterface : public MatcherDescriberInterface { + public: + // Returns true if and only if the matcher matches x; also explains the + // match result to 'listener' if necessary (see the next paragraph), in + // the form of a non-restrictive relative clause ("which ...", + // "whose ...", etc) that describes x. For example, the + // MatchAndExplain() method of the Pointee(...) matcher should + // generate an explanation like "which points to ...". + // + // Implementations of MatchAndExplain() should add an explanation of + // the match result *if and only if* they can provide additional + // information that's not already present (or not obvious) in the + // print-out of x and the matcher's description. Whether the match + // succeeds is not a factor in deciding whether an explanation is + // needed, as sometimes the caller needs to print a failure message + // when the match succeeds (e.g. when the matcher is used inside + // Not()). + // + // For example, a "has at least 10 elements" matcher should explain + // what the actual element count is, regardless of the match result, + // as it is useful information to the reader; on the other hand, an + // "is empty" matcher probably only needs to explain what the actual + // size is when the match fails, as it's redundant to say that the + // size is 0 when the value is already known to be empty. + // + // You should override this method when defining a new matcher. + // + // It's the responsibility of the caller (Google Test) to guarantee + // that 'listener' is not NULL. This helps to simplify a matcher's + // implementation when it doesn't care about the performance, as it + // can talk to 'listener' without checking its validity first. + // However, in order to implement dummy listeners efficiently, + // listener->stream() may be NULL. + virtual bool MatchAndExplain(T x, MatchResultListener* listener) const = 0; + + // Inherits these methods from MatcherDescriberInterface: + // virtual void DescribeTo(::std::ostream* os) const = 0; + // virtual void DescribeNegationTo(::std::ostream* os) const; +}; + +namespace internal { + +// Converts a MatcherInterface to a MatcherInterface. +template +class MatcherInterfaceAdapter : public MatcherInterface { + public: + explicit MatcherInterfaceAdapter(const MatcherInterface* impl) + : impl_(impl) {} + ~MatcherInterfaceAdapter() override { delete impl_; } + + void DescribeTo(::std::ostream* os) const override { impl_->DescribeTo(os); } + + void DescribeNegationTo(::std::ostream* os) const override { + impl_->DescribeNegationTo(os); + } + + bool MatchAndExplain(const T& x, + MatchResultListener* listener) const override { + return impl_->MatchAndExplain(x, listener); + } + + private: + const MatcherInterface* const impl_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(MatcherInterfaceAdapter); +}; + +struct AnyEq { + template + bool operator()(const A& a, const B& b) const { return a == b; } +}; +struct AnyNe { + template + bool operator()(const A& a, const B& b) const { return a != b; } +}; +struct AnyLt { + template + bool operator()(const A& a, const B& b) const { return a < b; } +}; +struct AnyGt { + template + bool operator()(const A& a, const B& b) const { return a > b; } +}; +struct AnyLe { + template + bool operator()(const A& a, const B& b) const { return a <= b; } +}; +struct AnyGe { + template + bool operator()(const A& a, const B& b) const { return a >= b; } +}; + +// A match result listener that ignores the explanation. +class DummyMatchResultListener : public MatchResultListener { + public: + DummyMatchResultListener() : MatchResultListener(nullptr) {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener); +}; + +// A match result listener that forwards the explanation to a given +// ostream. The difference between this and MatchResultListener is +// that the former is concrete. +class StreamMatchResultListener : public MatchResultListener { + public: + explicit StreamMatchResultListener(::std::ostream* os) + : MatchResultListener(os) {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener); +}; + +// An internal class for implementing Matcher, which will derive +// from it. We put functionalities common to all Matcher +// specializations here to avoid code duplication. +template +class MatcherBase { + public: + // Returns true if and only if the matcher matches x; also explains the + // match result to 'listener'. + bool MatchAndExplain(const T& x, MatchResultListener* listener) const { + return impl_->MatchAndExplain(x, listener); + } + + // Returns true if and only if this matcher matches x. + bool Matches(const T& x) const { + DummyMatchResultListener dummy; + return MatchAndExplain(x, &dummy); + } + + // Describes this matcher to an ostream. + void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); } + + // Describes the negation of this matcher to an ostream. + void DescribeNegationTo(::std::ostream* os) const { + impl_->DescribeNegationTo(os); + } + + // Explains why x matches, or doesn't match, the matcher. + void ExplainMatchResultTo(const T& x, ::std::ostream* os) const { + StreamMatchResultListener listener(os); + MatchAndExplain(x, &listener); + } + + // Returns the describer for this matcher object; retains ownership + // of the describer, which is only guaranteed to be alive when + // this matcher object is alive. + const MatcherDescriberInterface* GetDescriber() const { + return impl_.get(); + } + + protected: + MatcherBase() {} + + // Constructs a matcher from its implementation. + explicit MatcherBase(const MatcherInterface* impl) : impl_(impl) {} + + template + explicit MatcherBase( + const MatcherInterface* impl, + typename std::enable_if::value>::type* = + nullptr) + : impl_(new internal::MatcherInterfaceAdapter(impl)) {} + + MatcherBase(const MatcherBase&) = default; + MatcherBase& operator=(const MatcherBase&) = default; + MatcherBase(MatcherBase&&) = default; + MatcherBase& operator=(MatcherBase&&) = default; + + virtual ~MatcherBase() {} + + private: + std::shared_ptr> impl_; +}; + +} // namespace internal + +// A Matcher is a copyable and IMMUTABLE (except by assignment) +// object that can check whether a value of type T matches. The +// implementation of Matcher is just a std::shared_ptr to const +// MatcherInterface. Don't inherit from Matcher! +template +class Matcher : public internal::MatcherBase { + public: + // Constructs a null matcher. Needed for storing Matcher objects in STL + // containers. A default-constructed matcher is not yet initialized. You + // cannot use it until a valid value has been assigned to it. + explicit Matcher() {} // NOLINT + + // Constructs a matcher from its implementation. + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + template + explicit Matcher( + const MatcherInterface* impl, + typename std::enable_if::value>::type* = + nullptr) + : internal::MatcherBase(impl) {} + + // Implicit constructor here allows people to write + // EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes + Matcher(T value); // NOLINT +}; + +// The following two specializations allow the user to write str +// instead of Eq(str) and "foo" instead of Eq("foo") when a std::string +// matcher is expected. +template <> +class GTEST_API_ Matcher + : public internal::MatcherBase { + public: + Matcher() {} + + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Allows the user to write str instead of Eq(str) sometimes, where + // str is a std::string object. + Matcher(const std::string& s); // NOLINT + + // Allows the user to write "foo" instead of Eq("foo") sometimes. + Matcher(const char* s); // NOLINT +}; + +template <> +class GTEST_API_ Matcher + : public internal::MatcherBase { + public: + Matcher() {} + + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Allows the user to write str instead of Eq(str) sometimes, where + // str is a string object. + Matcher(const std::string& s); // NOLINT + + // Allows the user to write "foo" instead of Eq("foo") sometimes. + Matcher(const char* s); // NOLINT +}; + +#if GTEST_HAS_ABSL +// The following two specializations allow the user to write str +// instead of Eq(str) and "foo" instead of Eq("foo") when a absl::string_view +// matcher is expected. +template <> +class GTEST_API_ Matcher + : public internal::MatcherBase { + public: + Matcher() {} + + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Allows the user to write str instead of Eq(str) sometimes, where + // str is a std::string object. + Matcher(const std::string& s); // NOLINT + + // Allows the user to write "foo" instead of Eq("foo") sometimes. + Matcher(const char* s); // NOLINT + + // Allows the user to pass absl::string_views directly. + Matcher(absl::string_view s); // NOLINT +}; + +template <> +class GTEST_API_ Matcher + : public internal::MatcherBase { + public: + Matcher() {} + + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Allows the user to write str instead of Eq(str) sometimes, where + // str is a std::string object. + Matcher(const std::string& s); // NOLINT + + // Allows the user to write "foo" instead of Eq("foo") sometimes. + Matcher(const char* s); // NOLINT + + // Allows the user to pass absl::string_views directly. + Matcher(absl::string_view s); // NOLINT +}; +#endif // GTEST_HAS_ABSL + +// Prints a matcher in a human-readable format. +template +std::ostream& operator<<(std::ostream& os, const Matcher& matcher) { + matcher.DescribeTo(&os); + return os; +} + +// The PolymorphicMatcher class template makes it easy to implement a +// polymorphic matcher (i.e. a matcher that can match values of more +// than one type, e.g. Eq(n) and NotNull()). +// +// To define a polymorphic matcher, a user should provide an Impl +// class that has a DescribeTo() method and a DescribeNegationTo() +// method, and define a member function (or member function template) +// +// bool MatchAndExplain(const Value& value, +// MatchResultListener* listener) const; +// +// See the definition of NotNull() for a complete example. +template +class PolymorphicMatcher { + public: + explicit PolymorphicMatcher(const Impl& an_impl) : impl_(an_impl) {} + + // Returns a mutable reference to the underlying matcher + // implementation object. + Impl& mutable_impl() { return impl_; } + + // Returns an immutable reference to the underlying matcher + // implementation object. + const Impl& impl() const { return impl_; } + + template + operator Matcher() const { + return Matcher(new MonomorphicImpl(impl_)); + } + + private: + template + class MonomorphicImpl : public MatcherInterface { + public: + explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {} + + void DescribeTo(::std::ostream* os) const override { impl_.DescribeTo(os); } + + void DescribeNegationTo(::std::ostream* os) const override { + impl_.DescribeNegationTo(os); + } + + bool MatchAndExplain(T x, MatchResultListener* listener) const override { + return impl_.MatchAndExplain(x, listener); + } + + private: + const Impl impl_; + }; + + Impl impl_; +}; + +// Creates a matcher from its implementation. +// DEPRECATED: Especially in the generic code, prefer: +// Matcher(new MyMatcherImpl(...)); +// +// MakeMatcher may create a Matcher that accepts its argument by value, which +// leads to unnecessary copies & lack of support for non-copyable types. +template +inline Matcher MakeMatcher(const MatcherInterface* impl) { + return Matcher(impl); +} + +// Creates a polymorphic matcher from its implementation. This is +// easier to use than the PolymorphicMatcher constructor as it +// doesn't require you to explicitly write the template argument, e.g. +// +// MakePolymorphicMatcher(foo); +// vs +// PolymorphicMatcher(foo); +template +inline PolymorphicMatcher MakePolymorphicMatcher(const Impl& impl) { + return PolymorphicMatcher(impl); +} + +namespace internal { +// Implements a matcher that compares a given value with a +// pre-supplied value using one of the ==, <=, <, etc, operators. The +// two values being compared don't have to have the same type. +// +// The matcher defined here is polymorphic (for example, Eq(5) can be +// used to match an int, a short, a double, etc). Therefore we use +// a template type conversion operator in the implementation. +// +// The following template definition assumes that the Rhs parameter is +// a "bare" type (i.e. neither 'const T' nor 'T&'). +template +class ComparisonBase { + public: + explicit ComparisonBase(const Rhs& rhs) : rhs_(rhs) {} + template + operator Matcher() const { + return Matcher(new Impl(rhs_)); + } + + private: + template + static const T& Unwrap(const T& v) { return v; } + template + static const T& Unwrap(std::reference_wrapper v) { return v; } + + template + class Impl : public MatcherInterface { + public: + explicit Impl(const Rhs& rhs) : rhs_(rhs) {} + bool MatchAndExplain(Lhs lhs, + MatchResultListener* /* listener */) const override { + return Op()(lhs, Unwrap(rhs_)); + } + void DescribeTo(::std::ostream* os) const override { + *os << D::Desc() << " "; + UniversalPrint(Unwrap(rhs_), os); + } + void DescribeNegationTo(::std::ostream* os) const override { + *os << D::NegatedDesc() << " "; + UniversalPrint(Unwrap(rhs_), os); + } + + private: + Rhs rhs_; + }; + Rhs rhs_; +}; + +template +class EqMatcher : public ComparisonBase, Rhs, AnyEq> { + public: + explicit EqMatcher(const Rhs& rhs) + : ComparisonBase, Rhs, AnyEq>(rhs) { } + static const char* Desc() { return "is equal to"; } + static const char* NegatedDesc() { return "isn't equal to"; } +}; +template +class NeMatcher : public ComparisonBase, Rhs, AnyNe> { + public: + explicit NeMatcher(const Rhs& rhs) + : ComparisonBase, Rhs, AnyNe>(rhs) { } + static const char* Desc() { return "isn't equal to"; } + static const char* NegatedDesc() { return "is equal to"; } +}; +template +class LtMatcher : public ComparisonBase, Rhs, AnyLt> { + public: + explicit LtMatcher(const Rhs& rhs) + : ComparisonBase, Rhs, AnyLt>(rhs) { } + static const char* Desc() { return "is <"; } + static const char* NegatedDesc() { return "isn't <"; } +}; +template +class GtMatcher : public ComparisonBase, Rhs, AnyGt> { + public: + explicit GtMatcher(const Rhs& rhs) + : ComparisonBase, Rhs, AnyGt>(rhs) { } + static const char* Desc() { return "is >"; } + static const char* NegatedDesc() { return "isn't >"; } +}; +template +class LeMatcher : public ComparisonBase, Rhs, AnyLe> { + public: + explicit LeMatcher(const Rhs& rhs) + : ComparisonBase, Rhs, AnyLe>(rhs) { } + static const char* Desc() { return "is <="; } + static const char* NegatedDesc() { return "isn't <="; } +}; +template +class GeMatcher : public ComparisonBase, Rhs, AnyGe> { + public: + explicit GeMatcher(const Rhs& rhs) + : ComparisonBase, Rhs, AnyGe>(rhs) { } + static const char* Desc() { return "is >="; } + static const char* NegatedDesc() { return "isn't >="; } +}; + +// Implements polymorphic matchers MatchesRegex(regex) and +// ContainsRegex(regex), which can be used as a Matcher as long as +// T can be converted to a string. +class MatchesRegexMatcher { + public: + MatchesRegexMatcher(const RE* regex, bool full_match) + : regex_(regex), full_match_(full_match) {} + +#if GTEST_HAS_ABSL + bool MatchAndExplain(const absl::string_view& s, + MatchResultListener* listener) const { + return MatchAndExplain(std::string(s), listener); + } +#endif // GTEST_HAS_ABSL + + // Accepts pointer types, particularly: + // const char* + // char* + // const wchar_t* + // wchar_t* + template + bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { + return s != nullptr && MatchAndExplain(std::string(s), listener); + } + + // Matches anything that can convert to std::string. + // + // This is a template, not just a plain function with const std::string&, + // because absl::string_view has some interfering non-explicit constructors. + template + bool MatchAndExplain(const MatcheeStringType& s, + MatchResultListener* /* listener */) const { + const std::string& s2(s); + return full_match_ ? RE::FullMatch(s2, *regex_) + : RE::PartialMatch(s2, *regex_); + } + + void DescribeTo(::std::ostream* os) const { + *os << (full_match_ ? "matches" : "contains") << " regular expression "; + UniversalPrinter::Print(regex_->pattern(), os); + } + + void DescribeNegationTo(::std::ostream* os) const { + *os << "doesn't " << (full_match_ ? "match" : "contain") + << " regular expression "; + UniversalPrinter::Print(regex_->pattern(), os); + } + + private: + const std::shared_ptr regex_; + const bool full_match_; +}; +} // namespace internal + +// Matches a string that fully matches regular expression 'regex'. +// The matcher takes ownership of 'regex'. +inline PolymorphicMatcher MatchesRegex( + const internal::RE* regex) { + return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, true)); +} +inline PolymorphicMatcher MatchesRegex( + const std::string& regex) { + return MatchesRegex(new internal::RE(regex)); +} + +// Matches a string that contains regular expression 'regex'. +// The matcher takes ownership of 'regex'. +inline PolymorphicMatcher ContainsRegex( + const internal::RE* regex) { + return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, false)); +} +inline PolymorphicMatcher ContainsRegex( + const std::string& regex) { + return ContainsRegex(new internal::RE(regex)); +} + +// Creates a polymorphic matcher that matches anything equal to x. +// Note: if the parameter of Eq() were declared as const T&, Eq("foo") +// wouldn't compile. +template +inline internal::EqMatcher Eq(T x) { return internal::EqMatcher(x); } + +// Constructs a Matcher from a 'value' of type T. The constructed +// matcher matches any value that's equal to 'value'. +template +Matcher::Matcher(T value) { *this = Eq(value); } + +// Creates a monomorphic matcher that matches anything with type Lhs +// and equal to rhs. A user may need to use this instead of Eq(...) +// in order to resolve an overloading ambiguity. +// +// TypedEq(x) is just a convenient short-hand for Matcher(Eq(x)) +// or Matcher(x), but more readable than the latter. +// +// We could define similar monomorphic matchers for other comparison +// operations (e.g. TypedLt, TypedGe, and etc), but decided not to do +// it yet as those are used much less than Eq() in practice. A user +// can always write Matcher(Lt(5)) to be explicit about the type, +// for example. +template +inline Matcher TypedEq(const Rhs& rhs) { return Eq(rhs); } + +// Creates a polymorphic matcher that matches anything >= x. +template +inline internal::GeMatcher Ge(Rhs x) { + return internal::GeMatcher(x); +} + +// Creates a polymorphic matcher that matches anything > x. +template +inline internal::GtMatcher Gt(Rhs x) { + return internal::GtMatcher(x); +} + +// Creates a polymorphic matcher that matches anything <= x. +template +inline internal::LeMatcher Le(Rhs x) { + return internal::LeMatcher(x); +} + +// Creates a polymorphic matcher that matches anything < x. +template +inline internal::LtMatcher Lt(Rhs x) { + return internal::LtMatcher(x); +} + +// Creates a polymorphic matcher that matches anything != x. +template +inline internal::NeMatcher Ne(Rhs x) { + return internal::NeMatcher(x); +} +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 5046 + +#endif // GTEST_INCLUDE_GTEST_GTEST_MATCHERS_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest-message.h b/source/3rdparty/gtest/include/gtest/gtest-message.h new file mode 100644 index 0000000..2189923 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest-message.h @@ -0,0 +1,219 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This header file defines the Message class. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ + +#include +#include +#include + +#include "gtest/internal/gtest-port.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +// Ensures that there is at least one operator<< in the global namespace. +// See Message& operator<<(...) below for why. +void operator<<(const testing::internal::Secret&, int); + +namespace testing { + +// The Message class works like an ostream repeater. +// +// Typical usage: +// +// 1. You stream a bunch of values to a Message object. +// It will remember the text in a stringstream. +// 2. Then you stream the Message object to an ostream. +// This causes the text in the Message to be streamed +// to the ostream. +// +// For example; +// +// testing::Message foo; +// foo << 1 << " != " << 2; +// std::cout << foo; +// +// will print "1 != 2". +// +// Message is not intended to be inherited from. In particular, its +// destructor is not virtual. +// +// Note that stringstream behaves differently in gcc and in MSVC. You +// can stream a NULL char pointer to it in the former, but not in the +// latter (it causes an access violation if you do). The Message +// class hides this difference by treating a NULL char pointer as +// "(null)". +class GTEST_API_ Message { + private: + // The type of basic IO manipulators (endl, ends, and flush) for + // narrow streams. + typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&); + + public: + // Constructs an empty Message. + Message(); + + // Copy constructor. + Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT + *ss_ << msg.GetString(); + } + + // Constructs a Message from a C-string. + explicit Message(const char* str) : ss_(new ::std::stringstream) { + *ss_ << str; + } + + // Streams a non-pointer value to this object. + template + inline Message& operator <<(const T& val) { + // Some libraries overload << for STL containers. These + // overloads are defined in the global namespace instead of ::std. + // + // C++'s symbol lookup rule (i.e. Koenig lookup) says that these + // overloads are visible in either the std namespace or the global + // namespace, but not other namespaces, including the testing + // namespace which Google Test's Message class is in. + // + // To allow STL containers (and other types that has a << operator + // defined in the global namespace) to be used in Google Test + // assertions, testing::Message must access the custom << operator + // from the global namespace. With this using declaration, + // overloads of << defined in the global namespace and those + // visible via Koenig lookup are both exposed in this function. + using ::operator <<; + *ss_ << val; + return *this; + } + + // Streams a pointer value to this object. + // + // This function is an overload of the previous one. When you + // stream a pointer to a Message, this definition will be used as it + // is more specialized. (The C++ Standard, section + // [temp.func.order].) If you stream a non-pointer, then the + // previous definition will be used. + // + // The reason for this overload is that streaming a NULL pointer to + // ostream is undefined behavior. Depending on the compiler, you + // may get "0", "(nil)", "(null)", or an access violation. To + // ensure consistent result across compilers, we always treat NULL + // as "(null)". + template + inline Message& operator <<(T* const& pointer) { // NOLINT + if (pointer == nullptr) { + *ss_ << "(null)"; + } else { + *ss_ << pointer; + } + return *this; + } + + // Since the basic IO manipulators are overloaded for both narrow + // and wide streams, we have to provide this specialized definition + // of operator <<, even though its body is the same as the + // templatized version above. Without this definition, streaming + // endl or other basic IO manipulators to Message will confuse the + // compiler. + Message& operator <<(BasicNarrowIoManip val) { + *ss_ << val; + return *this; + } + + // Instead of 1/0, we want to see true/false for bool values. + Message& operator <<(bool b) { + return *this << (b ? "true" : "false"); + } + + // These two overloads allow streaming a wide C string to a Message + // using the UTF-8 encoding. + Message& operator <<(const wchar_t* wide_c_str); + Message& operator <<(wchar_t* wide_c_str); + +#if GTEST_HAS_STD_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::std::wstring& wstr); +#endif // GTEST_HAS_STD_WSTRING + + // Gets the text streamed to this object so far as an std::string. + // Each '\0' character in the buffer is replaced with "\\0". + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + std::string GetString() const; + + private: + // We'll hold the text streamed to this object here. + const std::unique_ptr< ::std::stringstream> ss_; + + // We declare (but don't implement) this to prevent the compiler + // from implementing the assignment operator. + void operator=(const Message&); +}; + +// Streams a Message to an ostream. +inline std::ostream& operator <<(std::ostream& os, const Message& sb) { + return os << sb.GetString(); +} + +namespace internal { + +// Converts a streamable value to an std::string. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +template +std::string StreamableToString(const T& streamable) { + return (Message() << streamable).GetString(); +} + +} // namespace internal +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest-param-test.h b/source/3rdparty/gtest/include/gtest/gtest-param-test.h new file mode 100644 index 0000000..5b039df --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest-param-test.h @@ -0,0 +1,511 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Macros and functions for implementing parameterized tests +// in Google C++ Testing and Mocking Framework (Google Test) +// +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// GOOGLETEST_CM0001 DO NOT DELETE +#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ + + +// Value-parameterized tests allow you to test your code with different +// parameters without writing multiple copies of the same test. +// +// Here is how you use value-parameterized tests: + +#if 0 + +// To write value-parameterized tests, first you should define a fixture +// class. It is usually derived from testing::TestWithParam (see below for +// another inheritance scheme that's sometimes useful in more complicated +// class hierarchies), where the type of your parameter values. +// TestWithParam is itself derived from testing::Test. T can be any +// copyable type. If it's a raw pointer, you are responsible for managing the +// lifespan of the pointed values. + +class FooTest : public ::testing::TestWithParam { + // You can implement all the usual class fixture members here. +}; + +// Then, use the TEST_P macro to define as many parameterized tests +// for this fixture as you want. The _P suffix is for "parameterized" +// or "pattern", whichever you prefer to think. + +TEST_P(FooTest, DoesBlah) { + // Inside a test, access the test parameter with the GetParam() method + // of the TestWithParam class: + EXPECT_TRUE(foo.Blah(GetParam())); + ... +} + +TEST_P(FooTest, HasBlahBlah) { + ... +} + +// Finally, you can use INSTANTIATE_TEST_SUITE_P to instantiate the test +// case with any set of parameters you want. Google Test defines a number +// of functions for generating test parameters. They return what we call +// (surprise!) parameter generators. Here is a summary of them, which +// are all in the testing namespace: +// +// +// Range(begin, end [, step]) - Yields values {begin, begin+step, +// begin+step+step, ...}. The values do not +// include end. step defaults to 1. +// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}. +// ValuesIn(container) - Yields values from a C-style array, an STL +// ValuesIn(begin,end) container, or an iterator range [begin, end). +// Bool() - Yields sequence {false, true}. +// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product +// for the math savvy) of the values generated +// by the N generators. +// +// For more details, see comments at the definitions of these functions below +// in this file. +// +// The following statement will instantiate tests from the FooTest test suite +// each with parameter values "meeny", "miny", and "moe". + +INSTANTIATE_TEST_SUITE_P(InstantiationName, + FooTest, + Values("meeny", "miny", "moe")); + +// To distinguish different instances of the pattern, (yes, you +// can instantiate it more than once) the first argument to the +// INSTANTIATE_TEST_SUITE_P macro is a prefix that will be added to the +// actual test suite name. Remember to pick unique prefixes for different +// instantiations. The tests from the instantiation above will have +// these names: +// +// * InstantiationName/FooTest.DoesBlah/0 for "meeny" +// * InstantiationName/FooTest.DoesBlah/1 for "miny" +// * InstantiationName/FooTest.DoesBlah/2 for "moe" +// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny" +// * InstantiationName/FooTest.HasBlahBlah/1 for "miny" +// * InstantiationName/FooTest.HasBlahBlah/2 for "moe" +// +// You can use these names in --gtest_filter. +// +// This statement will instantiate all tests from FooTest again, each +// with parameter values "cat" and "dog": + +const char* pets[] = {"cat", "dog"}; +INSTANTIATE_TEST_SUITE_P(AnotherInstantiationName, FooTest, ValuesIn(pets)); + +// The tests from the instantiation above will have these names: +// +// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog" +// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog" +// +// Please note that INSTANTIATE_TEST_SUITE_P will instantiate all tests +// in the given test suite, whether their definitions come before or +// AFTER the INSTANTIATE_TEST_SUITE_P statement. +// +// Please also note that generator expressions (including parameters to the +// generators) are evaluated in InitGoogleTest(), after main() has started. +// This allows the user on one hand, to adjust generator parameters in order +// to dynamically determine a set of tests to run and on the other hand, +// give the user a chance to inspect the generated tests with Google Test +// reflection API before RUN_ALL_TESTS() is executed. +// +// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc +// for more examples. +// +// In the future, we plan to publish the API for defining new parameter +// generators. But for now this interface remains part of the internal +// implementation and is subject to change. +// +// +// A parameterized test fixture must be derived from testing::Test and from +// testing::WithParamInterface, where T is the type of the parameter +// values. Inheriting from TestWithParam satisfies that requirement because +// TestWithParam inherits from both Test and WithParamInterface. In more +// complicated hierarchies, however, it is occasionally useful to inherit +// separately from Test and WithParamInterface. For example: + +class BaseTest : public ::testing::Test { + // You can inherit all the usual members for a non-parameterized test + // fixture here. +}; + +class DerivedTest : public BaseTest, public ::testing::WithParamInterface { + // The usual test fixture members go here too. +}; + +TEST_F(BaseTest, HasFoo) { + // This is an ordinary non-parameterized test. +} + +TEST_P(DerivedTest, DoesBlah) { + // GetParam works just the same here as if you inherit from TestWithParam. + EXPECT_TRUE(foo.Blah(GetParam())); +} + +#endif // 0 + +#include +#include + +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-param-util.h" +#include "gtest/internal/gtest-port.h" + +namespace testing { + +// Functions producing parameter generators. +// +// Google Test uses these generators to produce parameters for value- +// parameterized tests. When a parameterized test suite is instantiated +// with a particular generator, Google Test creates and runs tests +// for each element in the sequence produced by the generator. +// +// In the following sample, tests from test suite FooTest are instantiated +// each three times with parameter values 3, 5, and 8: +// +// class FooTest : public TestWithParam { ... }; +// +// TEST_P(FooTest, TestThis) { +// } +// TEST_P(FooTest, TestThat) { +// } +// INSTANTIATE_TEST_SUITE_P(TestSequence, FooTest, Values(3, 5, 8)); +// + +// Range() returns generators providing sequences of values in a range. +// +// Synopsis: +// Range(start, end) +// - returns a generator producing a sequence of values {start, start+1, +// start+2, ..., }. +// Range(start, end, step) +// - returns a generator producing a sequence of values {start, start+step, +// start+step+step, ..., }. +// Notes: +// * The generated sequences never include end. For example, Range(1, 5) +// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2) +// returns a generator producing {1, 3, 5, 7}. +// * start and end must have the same type. That type may be any integral or +// floating-point type or a user defined type satisfying these conditions: +// * It must be assignable (have operator=() defined). +// * It must have operator+() (operator+(int-compatible type) for +// two-operand version). +// * It must have operator<() defined. +// Elements in the resulting sequences will also have that type. +// * Condition start < end must be satisfied in order for resulting sequences +// to contain any elements. +// +template +internal::ParamGenerator Range(T start, T end, IncrementT step) { + return internal::ParamGenerator( + new internal::RangeGenerator(start, end, step)); +} + +template +internal::ParamGenerator Range(T start, T end) { + return Range(start, end, 1); +} + +// ValuesIn() function allows generation of tests with parameters coming from +// a container. +// +// Synopsis: +// ValuesIn(const T (&array)[N]) +// - returns a generator producing sequences with elements from +// a C-style array. +// ValuesIn(const Container& container) +// - returns a generator producing sequences with elements from +// an STL-style container. +// ValuesIn(Iterator begin, Iterator end) +// - returns a generator producing sequences with elements from +// a range [begin, end) defined by a pair of STL-style iterators. These +// iterators can also be plain C pointers. +// +// Please note that ValuesIn copies the values from the containers +// passed in and keeps them to generate tests in RUN_ALL_TESTS(). +// +// Examples: +// +// This instantiates tests from test suite StringTest +// each with C-string values of "foo", "bar", and "baz": +// +// const char* strings[] = {"foo", "bar", "baz"}; +// INSTANTIATE_TEST_SUITE_P(StringSequence, StringTest, ValuesIn(strings)); +// +// This instantiates tests from test suite StlStringTest +// each with STL strings with values "a" and "b": +// +// ::std::vector< ::std::string> GetParameterStrings() { +// ::std::vector< ::std::string> v; +// v.push_back("a"); +// v.push_back("b"); +// return v; +// } +// +// INSTANTIATE_TEST_SUITE_P(CharSequence, +// StlStringTest, +// ValuesIn(GetParameterStrings())); +// +// +// This will also instantiate tests from CharTest +// each with parameter values 'a' and 'b': +// +// ::std::list GetParameterChars() { +// ::std::list list; +// list.push_back('a'); +// list.push_back('b'); +// return list; +// } +// ::std::list l = GetParameterChars(); +// INSTANTIATE_TEST_SUITE_P(CharSequence2, +// CharTest, +// ValuesIn(l.begin(), l.end())); +// +template +internal::ParamGenerator< + typename std::iterator_traits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end) { + typedef typename std::iterator_traits::value_type ParamType; + return internal::ParamGenerator( + new internal::ValuesInIteratorRangeGenerator(begin, end)); +} + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]) { + return ValuesIn(array, array + N); +} + +template +internal::ParamGenerator ValuesIn( + const Container& container) { + return ValuesIn(container.begin(), container.end()); +} + +// Values() allows generating tests from explicitly specified list of +// parameters. +// +// Synopsis: +// Values(T v1, T v2, ..., T vN) +// - returns a generator producing sequences with elements v1, v2, ..., vN. +// +// For example, this instantiates tests from test suite BarTest each +// with values "one", "two", and "three": +// +// INSTANTIATE_TEST_SUITE_P(NumSequence, +// BarTest, +// Values("one", "two", "three")); +// +// This instantiates tests from test suite BazTest each with values 1, 2, 3.5. +// The exact type of values will depend on the type of parameter in BazTest. +// +// INSTANTIATE_TEST_SUITE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5)); +// +// +template +internal::ValueArray Values(T... v) { + return internal::ValueArray(std::move(v)...); +} + +// Bool() allows generating tests with parameters in a set of (false, true). +// +// Synopsis: +// Bool() +// - returns a generator producing sequences with elements {false, true}. +// +// It is useful when testing code that depends on Boolean flags. Combinations +// of multiple flags can be tested when several Bool()'s are combined using +// Combine() function. +// +// In the following example all tests in the test suite FlagDependentTest +// will be instantiated twice with parameters false and true. +// +// class FlagDependentTest : public testing::TestWithParam { +// virtual void SetUp() { +// external_flag = GetParam(); +// } +// } +// INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool()); +// +inline internal::ParamGenerator Bool() { + return Values(false, true); +} + +// Combine() allows the user to combine two or more sequences to produce +// values of a Cartesian product of those sequences' elements. +// +// Synopsis: +// Combine(gen1, gen2, ..., genN) +// - returns a generator producing sequences with elements coming from +// the Cartesian product of elements from the sequences generated by +// gen1, gen2, ..., genN. The sequence elements will have a type of +// std::tuple where T1, T2, ..., TN are the types +// of elements from sequences produces by gen1, gen2, ..., genN. +// +// Combine can have up to 10 arguments. +// +// Example: +// +// This will instantiate tests in test suite AnimalTest each one with +// the parameter values tuple("cat", BLACK), tuple("cat", WHITE), +// tuple("dog", BLACK), and tuple("dog", WHITE): +// +// enum Color { BLACK, GRAY, WHITE }; +// class AnimalTest +// : public testing::TestWithParam > {...}; +// +// TEST_P(AnimalTest, AnimalLooksNice) {...} +// +// INSTANTIATE_TEST_SUITE_P(AnimalVariations, AnimalTest, +// Combine(Values("cat", "dog"), +// Values(BLACK, WHITE))); +// +// This will instantiate tests in FlagDependentTest with all variations of two +// Boolean flags: +// +// class FlagDependentTest +// : public testing::TestWithParam > { +// virtual void SetUp() { +// // Assigns external_flag_1 and external_flag_2 values from the tuple. +// std::tie(external_flag_1, external_flag_2) = GetParam(); +// } +// }; +// +// TEST_P(FlagDependentTest, TestFeature1) { +// // Test your code using external_flag_1 and external_flag_2 here. +// } +// INSTANTIATE_TEST_SUITE_P(TwoBoolSequence, FlagDependentTest, +// Combine(Bool(), Bool())); +// +template +internal::CartesianProductHolder Combine(const Generator&... g) { + return internal::CartesianProductHolder(g...); +} + +#define TEST_P(test_suite_name, test_name) \ + class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ + : public test_suite_name { \ + public: \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() {} \ + void TestBody() override; \ + \ + private: \ + static int AddToRegistry() { \ + ::testing::UnitTest::GetInstance() \ + ->parameterized_test_registry() \ + .GetTestSuitePatternHolder( \ + GTEST_STRINGIFY_(test_suite_name), \ + ::testing::internal::CodeLocation(__FILE__, __LINE__)) \ + ->AddTestPattern( \ + GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \ + new ::testing::internal::TestMetaFactory()); \ + return 0; \ + } \ + static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \ + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name)); \ + }; \ + int GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name)::gtest_registering_dummy_ = \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \ + void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody() + +// The last argument to INSTANTIATE_TEST_SUITE_P allows the user to specify +// generator and an optional function or functor that generates custom test name +// suffixes based on the test parameters. Such a function or functor should +// accept one argument of type testing::TestParamInfo, and +// return std::string. +// +// testing::PrintToStringParamName is a builtin test suffix generator that +// returns the value of testing::PrintToString(GetParam()). +// +// Note: test names must be non-empty, unique, and may only contain ASCII +// alphanumeric characters or underscore. Because PrintToString adds quotes +// to std::string and C strings, it won't work for these types. + +#define GTEST_EXPAND_(arg) arg +#define GTEST_GET_FIRST_(first, ...) first +#define GTEST_GET_SECOND_(first, second, ...) second + +#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \ + static ::testing::internal::ParamGenerator \ + gtest_##prefix##test_suite_name##_EvalGenerator_() { \ + return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \ + } \ + static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \ + const ::testing::TestParamInfo& info) { \ + if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \ + __VA_ARGS__, \ + ::testing::internal::DefaultParamName, \ + DUMMY_PARAM_))); \ + auto t = std::make_tuple(__VA_ARGS__); \ + static_assert(std::tuple_size::value <= 2, \ + "Too Many Args!"); \ + } \ + return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \ + __VA_ARGS__, \ + ::testing::internal::DefaultParamName, \ + DUMMY_PARAM_))))(info); \ + } \ + static int gtest_##prefix##test_suite_name##_dummy_ \ + GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::UnitTest::GetInstance() \ + ->parameterized_test_registry() \ + .GetTestSuitePatternHolder( \ + GTEST_STRINGIFY_(test_suite_name), \ + ::testing::internal::CodeLocation(__FILE__, __LINE__)) \ + ->AddTestSuiteInstantiation( \ + GTEST_STRINGIFY_(prefix), \ + >est_##prefix##test_suite_name##_EvalGenerator_, \ + >est_##prefix##test_suite_name##_EvalGenerateName_, \ + __FILE__, __LINE__) + + +// Allow Marking a Parameterized test class as not needing to be instantiated. +#define GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(T) \ + namespace gtest_do_not_use_outside_namespace_scope {} \ + static const ::testing::internal::MarkAsIgnored gtest_allow_ignore_##T( \ + GTEST_STRINGIFY_(T)) + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +#define INSTANTIATE_TEST_CASE_P \ + static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), \ + ""); \ + INSTANTIATE_TEST_SUITE_P +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest-printers.h b/source/3rdparty/gtest/include/gtest/gtest-printers.h new file mode 100644 index 0000000..c443625 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest-printers.h @@ -0,0 +1,926 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Google Test - The Google C++ Testing and Mocking Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// A user can teach this function how to print a class type T by +// defining either operator<<() or PrintTo() in the namespace that +// defines T. More specifically, the FIRST defined function in the +// following list will be used (assuming T is defined in namespace +// foo): +// +// 1. foo::PrintTo(const T&, ostream*) +// 2. operator<<(ostream&, const T&) defined in either foo or the +// global namespace. +// +// However if T is an STL-style container then it is printed element-wise +// unless foo::PrintTo(const T&, ostream*) is defined. Note that +// operator<<() is ignored for container types. +// +// If none of the above is defined, it will print the debug string of +// the value if it is a protocol buffer, or print the raw bytes in the +// value otherwise. +// +// To aid debugging: when T is a reference type, the address of the +// value is also printed; when T is a (const) char pointer, both the +// pointer value and the NUL-terminated string it points to are +// printed. +// +// We also provide some convenient wrappers: +// +// // Prints a value to a string. For a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// std::string ::testing::PrintToString(const T& value); +// +// // Prints a value tersely: for a reference type, the referenced +// // value (but not the address) is printed; for a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// void ::testing::internal::UniversalTersePrint(const T& value, ostream*); +// +// // Prints value using the type inferred by the compiler. The difference +// // from UniversalTersePrint() is that this function prints both the +// // pointer and the NUL-terminated string for a (const or not) char pointer. +// void ::testing::internal::UniversalPrint(const T& value, ostream*); +// +// // Prints the fields of a tuple tersely to a string vector, one +// // element for each field. Tuple support must be enabled in +// // gtest-port.h. +// std::vector UniversalTersePrintTupleFieldsToStrings( +// const Tuple& value); +// +// Known limitation: +// +// The print primitives print the elements of an STL-style container +// using the compiler-inferred type of *iter where iter is a +// const_iterator of the container. When const_iterator is an input +// iterator but not a forward iterator, this inferred type may not +// match value_type, and the print output may be incorrect. In +// practice, this is rarely a problem as for most containers +// const_iterator is a forward iterator. We'll fix this if there's an +// actual need for it. Note that this fix cannot rely on value_type +// being defined as many user-defined container types don't have +// value_type. + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#include +#include // NOLINT +#include +#include +#include +#include +#include +#include +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" + +#if GTEST_HAS_ABSL +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "absl/types/variant.h" +#endif // GTEST_HAS_ABSL + +namespace testing { + +// Definitions in the 'internal' and 'internal2' name spaces are +// subject to change without notice. DO NOT USE THEM IN USER CODE! +namespace internal2 { + +// Prints the given number of bytes in the given object to the given +// ostream. +GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes, + size_t count, + ::std::ostream* os); + +// For selecting which printer to use when a given type has neither << +// nor PrintTo(). +enum TypeKind { + kProtobuf, // a protobuf type + kConvertibleToInteger, // a type implicitly convertible to BiggestInt + // (e.g. a named or unnamed enum type) +#if GTEST_HAS_ABSL + kConvertibleToStringView, // a type implicitly convertible to + // absl::string_view +#endif + kOtherType // anything else +}; + +// TypeWithoutFormatter::PrintValue(value, os) is called +// by the universal printer to print a value of type T when neither +// operator<< nor PrintTo() is defined for T, where kTypeKind is the +// "kind" of T as defined by enum TypeKind. +template +class TypeWithoutFormatter { + public: + // This default version is called when kTypeKind is kOtherType. + static void PrintValue(const T& value, ::std::ostream* os) { + PrintBytesInObjectTo( + static_cast( + reinterpret_cast(std::addressof(value))), + sizeof(value), os); + } +}; + +// We print a protobuf using its ShortDebugString() when the string +// doesn't exceed this many characters; otherwise we print it using +// DebugString() for better readability. +const size_t kProtobufOneLinerMaxLength = 50; + +template +class TypeWithoutFormatter { + public: + static void PrintValue(const T& value, ::std::ostream* os) { + std::string pretty_str = value.ShortDebugString(); + if (pretty_str.length() > kProtobufOneLinerMaxLength) { + pretty_str = "\n" + value.DebugString(); + } + *os << ("<" + pretty_str + ">"); + } +}; + +template +class TypeWithoutFormatter { + public: + // Since T has no << operator or PrintTo() but can be implicitly + // converted to BiggestInt, we print it as a BiggestInt. + // + // Most likely T is an enum type (either named or unnamed), in which + // case printing it as an integer is the desired behavior. In case + // T is not an enum, printing it as an integer is the best we can do + // given that it has no user-defined printer. + static void PrintValue(const T& value, ::std::ostream* os) { + const internal::BiggestInt kBigInt = value; + *os << kBigInt; + } +}; + +#if GTEST_HAS_ABSL +template +class TypeWithoutFormatter { + public: + // Since T has neither operator<< nor PrintTo() but can be implicitly + // converted to absl::string_view, we print it as a absl::string_view. + // + // Note: the implementation is further below, as it depends on + // internal::PrintTo symbol which is defined later in the file. + static void PrintValue(const T& value, ::std::ostream* os); +}; +#endif + +// Prints the given value to the given ostream. If the value is a +// protocol message, its debug string is printed; if it's an enum or +// of a type implicitly convertible to BiggestInt, it's printed as an +// integer; otherwise the bytes in the value are printed. This is +// what UniversalPrinter::Print() does when it knows nothing about +// type T and T has neither << operator nor PrintTo(). +// +// A user can override this behavior for a class type Foo by defining +// a << operator in the namespace where Foo is defined. +// +// We put this operator in namespace 'internal2' instead of 'internal' +// to simplify the implementation, as much code in 'internal' needs to +// use << in STL, which would conflict with our own << were it defined +// in 'internal'. +// +// Note that this operator<< takes a generic std::basic_ostream type instead of the more restricted std::ostream. If +// we define it to take an std::ostream instead, we'll get an +// "ambiguous overloads" compiler error when trying to print a type +// Foo that supports streaming to std::basic_ostream, as the compiler cannot tell whether +// operator<<(std::ostream&, const T&) or +// operator<<(std::basic_stream, const Foo&) is more +// specific. +template +::std::basic_ostream& operator<<( + ::std::basic_ostream& os, const T& x) { + TypeWithoutFormatter::value + ? kProtobuf + : std::is_convertible< + const T&, internal::BiggestInt>::value + ? kConvertibleToInteger + : +#if GTEST_HAS_ABSL + std::is_convertible< + const T&, absl::string_view>::value + ? kConvertibleToStringView + : +#endif + kOtherType)>::PrintValue(x, &os); + return os; +} + +} // namespace internal2 +} // namespace testing + +// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up +// magic needed for implementing UniversalPrinter won't work. +namespace testing_internal { + +// Used to print a value that is not an STL-style container when the +// user doesn't define PrintTo() for it. +template +void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) { + // With the following statement, during unqualified name lookup, + // testing::internal2::operator<< appears as if it was declared in + // the nearest enclosing namespace that contains both + // ::testing_internal and ::testing::internal2, i.e. the global + // namespace. For more details, refer to the C++ Standard section + // 7.3.4-1 [namespace.udir]. This allows us to fall back onto + // testing::internal2::operator<< in case T doesn't come with a << + // operator. + + using ::testing::internal2::operator<<; + + // Assuming T is defined in namespace foo, in the next statement, + // the compiler will consider all of: + // + // 1. foo::operator<< (thanks to Koenig look-up), + // 2. ::operator<< (as the current namespace is enclosed in ::), + // 3. testing::internal2::operator<< (thanks to the using statement above). + // + // The operator<< whose type matches T best will be picked. + // + // We deliberately allow #2 to be a candidate, as sometimes it's + // impossible to define #1 (e.g. when foo is ::std, defining + // anything in it is undefined behavior unless you are a compiler + // vendor.). + *os << value; +} + +} // namespace testing_internal + +namespace testing { +namespace internal { + +// FormatForComparison::Format(value) formats a +// value of type ToPrint that is an operand of a comparison assertion +// (e.g. ASSERT_EQ). OtherOperand is the type of the other operand in +// the comparison, and is used to help determine the best way to +// format the value. In particular, when the value is a C string +// (char pointer) and the other operand is an STL string object, we +// want to format the C string as a string, since we know it is +// compared by value with the string object. If the value is a char +// pointer but the other operand is not an STL string object, we don't +// know whether the pointer is supposed to point to a NUL-terminated +// string, and thus want to print it as a pointer to be safe. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// The default case. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint& value) { + return ::testing::PrintToString(value); + } +}; + +// Array. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint* value) { + return FormatForComparison::Format(value); + } +}; + +// By default, print C string as pointers to be safe, as we don't know +// whether they actually point to a NUL-terminated string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \ + template \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(static_cast(value)); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t); + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_ + +// If a C string is compared with an STL string object, we know it's meant +// to point to a NUL-terminated string, and thus can print it as a string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \ + template <> \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(value); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string); + +#if GTEST_HAS_STD_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring); +#endif + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_ + +// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc) +// operand to be used in a failure message. The type (but not value) +// of the other operand may affect the format. This allows us to +// print a char* as a raw pointer when it is compared against another +// char* or void*, and print it as a C string when it is compared +// against an std::string object, for example. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +std::string FormatForComparisonFailureMessage( + const T1& value, const T2& /* other_operand */) { + return FormatForComparison::Format(value); +} + +// UniversalPrinter::Print(value, ostream_ptr) prints the given +// value to the given ostream. The caller must ensure that +// 'ostream_ptr' is not NULL, or the behavior is undefined. +// +// We define UniversalPrinter as a class template (as opposed to a +// function template), as we need to partially specialize it for +// reference types, which cannot be done with function templates. +template +class UniversalPrinter; + +template +void UniversalPrint(const T& value, ::std::ostream* os); + +enum DefaultPrinterType { + kPrintContainer, + kPrintPointer, + kPrintFunctionPointer, + kPrintOther, +}; +template struct WrapPrinterType {}; + +// Used to print an STL-style container when the user doesn't define +// a PrintTo() for it. +template +void DefaultPrintTo(WrapPrinterType /* dummy */, + const C& container, ::std::ostream* os) { + const size_t kMaxCount = 32; // The maximum number of elements to print. + *os << '{'; + size_t count = 0; + for (typename C::const_iterator it = container.begin(); + it != container.end(); ++it, ++count) { + if (count > 0) { + *os << ','; + if (count == kMaxCount) { // Enough has been printed. + *os << " ..."; + break; + } + } + *os << ' '; + // We cannot call PrintTo(*it, os) here as PrintTo() doesn't + // handle *it being a native array. + internal::UniversalPrint(*it, os); + } + + if (count > 0) { + *os << ' '; + } + *os << '}'; +} + +// Used to print a pointer that is neither a char pointer nor a member +// pointer, when the user doesn't define PrintTo() for it. (A member +// variable pointer or member function pointer doesn't really point to +// a location in the address space. Their representation is +// implementation-defined. Therefore they will be printed as raw +// bytes.) +template +void DefaultPrintTo(WrapPrinterType /* dummy */, + T* p, ::std::ostream* os) { + if (p == nullptr) { + *os << "NULL"; + } else { + // T is not a function type. We just call << to print p, + // relying on ADL to pick up user-defined << for their pointer + // types, if any. + *os << p; + } +} +template +void DefaultPrintTo(WrapPrinterType /* dummy */, + T* p, ::std::ostream* os) { + if (p == nullptr) { + *os << "NULL"; + } else { + // T is a function type, so '*os << p' doesn't do what we want + // (it just prints p as bool). We want to print p as a const + // void*. + *os << reinterpret_cast(p); + } +} + +// Used to print a non-container, non-pointer value when the user +// doesn't define PrintTo() for it. +template +void DefaultPrintTo(WrapPrinterType /* dummy */, + const T& value, ::std::ostream* os) { + ::testing_internal::DefaultPrintNonContainerTo(value, os); +} + +// Prints the given value using the << operator if it has one; +// otherwise prints the bytes in it. This is what +// UniversalPrinter::Print() does when PrintTo() is not specialized +// or overloaded for type T. +// +// A user can override this behavior for a class type Foo by defining +// an overload of PrintTo() in the namespace where Foo is defined. We +// give the user this option as sometimes defining a << operator for +// Foo is not desirable (e.g. the coding style may prevent doing it, +// or there is already a << operator but it doesn't do what the user +// wants). +template +void PrintTo(const T& value, ::std::ostream* os) { + // DefaultPrintTo() is overloaded. The type of its first argument + // determines which version will be picked. + // + // Note that we check for container types here, prior to we check + // for protocol message types in our operator<<. The rationale is: + // + // For protocol messages, we want to give people a chance to + // override Google Mock's format by defining a PrintTo() or + // operator<<. For STL containers, other formats can be + // incompatible with Google Mock's format for the container + // elements; therefore we check for container types here to ensure + // that our format is used. + // + // Note that MSVC and clang-cl do allow an implicit conversion from + // pointer-to-function to pointer-to-object, but clang-cl warns on it. + // So don't use ImplicitlyConvertible if it can be helped since it will + // cause this warning, and use a separate overload of DefaultPrintTo for + // function pointers so that the `*os << p` in the object pointer overload + // doesn't cause that warning either. + DefaultPrintTo( + WrapPrinterType < + (sizeof(IsContainerTest(0)) == sizeof(IsContainer)) && + !IsRecursiveContainer::value + ? kPrintContainer + : !std::is_pointer::value + ? kPrintOther + : std::is_function::type>::value + ? kPrintFunctionPointer + : kPrintPointer > (), + value, os); +} + +// The following list of PrintTo() overloads tells +// UniversalPrinter::Print() how to print standard types (built-in +// types, strings, plain arrays, and pointers). + +// Overloads for various char types. +GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os); +GTEST_API_ void PrintTo(signed char c, ::std::ostream* os); +inline void PrintTo(char c, ::std::ostream* os) { + // When printing a plain char, we always treat it as unsigned. This + // way, the output won't be affected by whether the compiler thinks + // char is signed or not. + PrintTo(static_cast(c), os); +} + +// Overloads for other simple built-in types. +inline void PrintTo(bool x, ::std::ostream* os) { + *os << (x ? "true" : "false"); +} + +// Overload for wchar_t type. +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its decimal code (except for L'\0'). +// The L'\0' char is printed as "L'\\0'". The decimal code is printed +// as signed integer when wchar_t is implemented by the compiler +// as a signed type and is printed as an unsigned integer when wchar_t +// is implemented as an unsigned type. +GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os); + +// Overloads for C strings. +GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); +inline void PrintTo(char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// signed/unsigned char is often used for representing binary data, so +// we print pointers to it as void* to be safe. +inline void PrintTo(const signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(const unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// MSVC can be configured to define wchar_t as a typedef of unsigned +// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native +// type. When wchar_t is a typedef, defining an overload for const +// wchar_t* would cause unsigned short* be printed as a wide string, +// possibly causing invalid memory accesses. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Overloads for wide C strings +GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os); +inline void PrintTo(wchar_t* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +#endif + +// Overload for C arrays. Multi-dimensional arrays are printed +// properly. + +// Prints the given number of elements in an array, without printing +// the curly braces. +template +void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) { + UniversalPrint(a[0], os); + for (size_t i = 1; i != count; i++) { + *os << ", "; + UniversalPrint(a[i], os); + } +} + +// Overloads for ::std::string. +GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os); +inline void PrintTo(const ::std::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} + +// Overloads for ::std::wstring. +#if GTEST_HAS_STD_WSTRING +GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_ABSL +// Overload for absl::string_view. +inline void PrintTo(absl::string_view sp, ::std::ostream* os) { + PrintTo(::std::string(sp), os); +} +#endif // GTEST_HAS_ABSL + +inline void PrintTo(std::nullptr_t, ::std::ostream* os) { *os << "(nullptr)"; } + +template +void PrintTo(std::reference_wrapper ref, ::std::ostream* os) { + UniversalPrinter::Print(ref.get(), os); +} + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T&, std::integral_constant, + ::std::ostream*) {} + +template +void PrintTupleTo(const T& t, std::integral_constant, + ::std::ostream* os) { + PrintTupleTo(t, std::integral_constant(), os); + GTEST_INTENTIONAL_CONST_COND_PUSH_() + if (I > 1) { + GTEST_INTENTIONAL_CONST_COND_POP_() + *os << ", "; + } + UniversalPrinter::type>::Print( + std::get(t), os); +} + +template +void PrintTo(const ::std::tuple& t, ::std::ostream* os) { + *os << "("; + PrintTupleTo(t, std::integral_constant(), os); + *os << ")"; +} + +// Overload for std::pair. +template +void PrintTo(const ::std::pair& value, ::std::ostream* os) { + *os << '('; + // We cannot use UniversalPrint(value.first, os) here, as T1 may be + // a reference type. The same for printing value.second. + UniversalPrinter::Print(value.first, os); + *os << ", "; + UniversalPrinter::Print(value.second, os); + *os << ')'; +} + +// Implements printing a non-reference type T by letting the compiler +// pick the right overload of PrintTo() for T. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180) + + // Note: we deliberately don't call this PrintTo(), as that name + // conflicts with ::testing::internal::PrintTo in the body of the + // function. + static void Print(const T& value, ::std::ostream* os) { + // By default, ::testing::internal::PrintTo() is used for printing + // the value. + // + // Thanks to Koenig look-up, if T is a class and has its own + // PrintTo() function defined in its namespace, that function will + // be visible here. Since it is more specific than the generic ones + // in ::testing::internal, it will be picked by the compiler in the + // following statement - exactly what we want. + PrintTo(value, os); + } + + GTEST_DISABLE_MSC_WARNINGS_POP_() +}; + +#if GTEST_HAS_ABSL + +// Printer for absl::optional + +template +class UniversalPrinter<::absl::optional> { + public: + static void Print(const ::absl::optional& value, ::std::ostream* os) { + *os << '('; + if (!value) { + *os << "nullopt"; + } else { + UniversalPrint(*value, os); + } + *os << ')'; + } +}; + +// Printer for absl::variant + +template +class UniversalPrinter<::absl::variant> { + public: + static void Print(const ::absl::variant& value, ::std::ostream* os) { + *os << '('; + absl::visit(Visitor{os}, value); + *os << ')'; + } + + private: + struct Visitor { + template + void operator()(const U& u) const { + *os << "'" << GetTypeName() << "' with value "; + UniversalPrint(u, os); + } + ::std::ostream* os; + }; +}; + +#endif // GTEST_HAS_ABSL + +// UniversalPrintArray(begin, len, os) prints an array of 'len' +// elements, starting at address 'begin'. +template +void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { + if (len == 0) { + *os << "{}"; + } else { + *os << "{ "; + const size_t kThreshold = 18; + const size_t kChunkSize = 8; + // If the array has more than kThreshold elements, we'll have to + // omit some details by printing only the first and the last + // kChunkSize elements. + if (len <= kThreshold) { + PrintRawArrayTo(begin, len, os); + } else { + PrintRawArrayTo(begin, kChunkSize, os); + *os << ", ..., "; + PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os); + } + *os << " }"; + } +} +// This overload prints a (const) char array compactly. +GTEST_API_ void UniversalPrintArray( + const char* begin, size_t len, ::std::ostream* os); + +// This overload prints a (const) wchar_t array compactly. +GTEST_API_ void UniversalPrintArray( + const wchar_t* begin, size_t len, ::std::ostream* os); + +// Implements printing an array type T[N]. +template +class UniversalPrinter { + public: + // Prints the given array, omitting some elements when there are too + // many. + static void Print(const T (&a)[N], ::std::ostream* os) { + UniversalPrintArray(a, N, os); + } +}; + +// Implements printing a reference type T&. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180) + + static void Print(const T& value, ::std::ostream* os) { + // Prints the address of the value. We use reinterpret_cast here + // as static_cast doesn't compile when T is a function type. + *os << "@" << reinterpret_cast(&value) << " "; + + // Then prints the value itself. + UniversalPrint(value, os); + } + + GTEST_DISABLE_MSC_WARNINGS_POP_() +}; + +// Prints a value tersely: for a reference type, the referenced value +// (but not the address) is printed; for a (const) char pointer, the +// NUL-terminated string (but not the pointer) is printed. + +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T (&value)[N], ::std::ostream* os) { + UniversalPrinter::Print(value, os); + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(const char* str, ::std::ostream* os) { + if (str == nullptr) { + *os << "NULL"; + } else { + UniversalPrint(std::string(str), os); + } + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(char* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +#if GTEST_HAS_STD_WSTRING +template <> +class UniversalTersePrinter { + public: + static void Print(const wchar_t* str, ::std::ostream* os) { + if (str == nullptr) { + *os << "NULL"; + } else { + UniversalPrint(::std::wstring(str), os); + } + } +}; +#endif + +template <> +class UniversalTersePrinter { + public: + static void Print(wchar_t* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +template +void UniversalTersePrint(const T& value, ::std::ostream* os) { + UniversalTersePrinter::Print(value, os); +} + +// Prints a value using the type inferred by the compiler. The +// difference between this and UniversalTersePrint() is that for a +// (const) char pointer, this prints both the pointer and the +// NUL-terminated string. +template +void UniversalPrint(const T& value, ::std::ostream* os) { + // A workarond for the bug in VC++ 7.1 that prevents us from instantiating + // UniversalPrinter with T directly. + typedef T T1; + UniversalPrinter::Print(value, os); +} + +typedef ::std::vector< ::std::string> Strings; + + // Tersely prints the first N fields of a tuple to a string vector, + // one element for each field. +template +void TersePrintPrefixToStrings(const Tuple&, std::integral_constant, + Strings*) {} +template +void TersePrintPrefixToStrings(const Tuple& t, + std::integral_constant, + Strings* strings) { + TersePrintPrefixToStrings(t, std::integral_constant(), + strings); + ::std::stringstream ss; + UniversalTersePrint(std::get(t), &ss); + strings->push_back(ss.str()); +} + +// Prints the fields of a tuple tersely to a string vector, one +// element for each field. See the comment before +// UniversalTersePrint() for how we define "tersely". +template +Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) { + Strings result; + TersePrintPrefixToStrings( + value, std::integral_constant::value>(), + &result); + return result; +} + +} // namespace internal + +#if GTEST_HAS_ABSL +namespace internal2 { +template +void TypeWithoutFormatter::PrintValue( + const T& value, ::std::ostream* os) { + internal::PrintTo(absl::string_view(value), os); +} +} // namespace internal2 +#endif + +template +::std::string PrintToString(const T& value) { + ::std::stringstream ss; + internal::UniversalTersePrinter::Print(value, &ss); + return ss.str(); +} + +} // namespace testing + +// Include any custom printer added by the local installation. +// We must include this header at the end to make sure it can use the +// declarations from this file. +#include "gtest/internal/custom/gtest-printers.h" + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest-spi.h b/source/3rdparty/gtest/include/gtest/gtest-spi.h new file mode 100644 index 0000000..aa38870 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest-spi.h @@ -0,0 +1,238 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// Utilities for testing Google Test itself and code that uses Google Test +// (e.g. frameworks built on top of Google Test). + +// GOOGLETEST_CM0004 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_ +#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + +#include "gtest/gtest.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +namespace testing { + +// This helper class can be used to mock out Google Test failure reporting +// so that we can test Google Test or code that builds on Google Test. +// +// An object of this class appends a TestPartResult object to the +// TestPartResultArray object given in the constructor whenever a Google Test +// failure is reported. It can either intercept only failures that are +// generated in the same thread that created this object or it can intercept +// all generated failures. The scope of this mock object can be controlled with +// the second argument to the two arguments constructor. +class GTEST_API_ ScopedFakeTestPartResultReporter + : public TestPartResultReporterInterface { + public: + // The two possible mocking modes of this object. + enum InterceptMode { + INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures. + INTERCEPT_ALL_THREADS // Intercepts all failures. + }; + + // The c'tor sets this object as the test part result reporter used + // by Google Test. The 'result' parameter specifies where to report the + // results. This reporter will only catch failures generated in the current + // thread. DEPRECATED + explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result); + + // Same as above, but you can choose the interception scope of this object. + ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, + TestPartResultArray* result); + + // The d'tor restores the previous test part result reporter. + ~ScopedFakeTestPartResultReporter() override; + + // Appends the TestPartResult object to the TestPartResultArray + // received in the constructor. + // + // This method is from the TestPartResultReporterInterface + // interface. + void ReportTestPartResult(const TestPartResult& result) override; + + private: + void Init(); + + const InterceptMode intercept_mode_; + TestPartResultReporterInterface* old_reporter_; + TestPartResultArray* const result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); +}; + +namespace internal { + +// A helper class for implementing EXPECT_FATAL_FAILURE() and +// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +class GTEST_API_ SingleFailureChecker { + public: + // The constructor remembers the arguments. + SingleFailureChecker(const TestPartResultArray* results, + TestPartResult::Type type, const std::string& substr); + ~SingleFailureChecker(); + private: + const TestPartResultArray* const results_; + const TestPartResult::Type type_; + const std::string substr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker); +}; + +} // namespace internal + +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +// A set of macros for testing Google Test assertions or code that's expected +// to generate Google Test fatal failures. It verifies that the given +// statement will cause exactly one fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_FATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - 'statement' cannot reference local non-static variables or +// non-static members of the current object. +// - 'statement' cannot return a value. +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. The AcceptsMacroThatExpandsToUnprotectedComma test in +// gtest_unittest.cc will fail to compile if we do that. +#define EXPECT_FATAL_FAILURE(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ALL_THREADS, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +// A macro for testing Google Test assertions or code that's expected to +// generate Google Test non-fatal failures. It asserts that the given +// statement will cause exactly one non-fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// 'statement' is allowed to reference local variables and members of +// the current object. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. If we do that, the code won't compile when the user gives +// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that +// expands to code containing an unprotected comma. The +// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc +// catches that. +// +// For the same reason, we have to write +// if (::testing::internal::AlwaysTrue()) { statement; } +// instead of +// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) +// to avoid an MSVC warning on unreachable code. +#define EXPECT_NONFATAL_FAILURE(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ + >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest-test-part.h b/source/3rdparty/gtest/include/gtest/gtest-test-part.h new file mode 100644 index 0000000..05a7985 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest-test-part.h @@ -0,0 +1,184 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ + +#include +#include +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-string.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +namespace testing { + +// A copyable object representing the result of a test part (i.e. an +// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()). +// +// Don't inherit from TestPartResult as its destructor is not virtual. +class GTEST_API_ TestPartResult { + public: + // The possible outcomes of a test part (i.e. an assertion or an + // explicit SUCCEED(), FAIL(), or ADD_FAILURE()). + enum Type { + kSuccess, // Succeeded. + kNonFatalFailure, // Failed but the test can continue. + kFatalFailure, // Failed and the test should be terminated. + kSkip // Skipped. + }; + + // C'tor. TestPartResult does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestPartResult object. + TestPartResult(Type a_type, const char* a_file_name, int a_line_number, + const char* a_message) + : type_(a_type), + file_name_(a_file_name == nullptr ? "" : a_file_name), + line_number_(a_line_number), + summary_(ExtractSummary(a_message)), + message_(a_message) {} + + // Gets the outcome of the test part. + Type type() const { return type_; } + + // Gets the name of the source file where the test part took place, or + // NULL if it's unknown. + const char* file_name() const { + return file_name_.empty() ? nullptr : file_name_.c_str(); + } + + // Gets the line in the source file where the test part took place, + // or -1 if it's unknown. + int line_number() const { return line_number_; } + + // Gets the summary of the failure message. + const char* summary() const { return summary_.c_str(); } + + // Gets the message associated with the test part. + const char* message() const { return message_.c_str(); } + + // Returns true if and only if the test part was skipped. + bool skipped() const { return type_ == kSkip; } + + // Returns true if and only if the test part passed. + bool passed() const { return type_ == kSuccess; } + + // Returns true if and only if the test part non-fatally failed. + bool nonfatally_failed() const { return type_ == kNonFatalFailure; } + + // Returns true if and only if the test part fatally failed. + bool fatally_failed() const { return type_ == kFatalFailure; } + + // Returns true if and only if the test part failed. + bool failed() const { return fatally_failed() || nonfatally_failed(); } + + private: + Type type_; + + // Gets the summary of the failure message by omitting the stack + // trace in it. + static std::string ExtractSummary(const char* message); + + // The name of the source file where the test part took place, or + // "" if the source file is unknown. + std::string file_name_; + // The line in the source file where the test part took place, or -1 + // if the line number is unknown. + int line_number_; + std::string summary_; // The test failure summary. + std::string message_; // The test failure message. +}; + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result); + +// An array of TestPartResult objects. +// +// Don't inherit from TestPartResultArray as its destructor is not +// virtual. +class GTEST_API_ TestPartResultArray { + public: + TestPartResultArray() {} + + // Appends the given TestPartResult to the array. + void Append(const TestPartResult& result); + + // Returns the TestPartResult at the given index (0-based). + const TestPartResult& GetTestPartResult(int index) const; + + // Returns the number of TestPartResult objects in the array. + int size() const; + + private: + std::vector array_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray); +}; + +// This interface knows how to report a test part result. +class GTEST_API_ TestPartResultReporterInterface { + public: + virtual ~TestPartResultReporterInterface() {} + + virtual void ReportTestPartResult(const TestPartResult& result) = 0; +}; + +namespace internal { + +// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a +// statement generates new fatal failures. To do so it registers itself as the +// current test part result reporter. Besides checking if fatal failures were +// reported, it only delegates the reporting to the former result reporter. +// The original result reporter is restored in the destructor. +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +class GTEST_API_ HasNewFatalFailureHelper + : public TestPartResultReporterInterface { + public: + HasNewFatalFailureHelper(); + ~HasNewFatalFailureHelper() override; + void ReportTestPartResult(const TestPartResult& result) override; + bool has_new_fatal_failure() const { return has_new_fatal_failure_; } + private: + bool has_new_fatal_failure_; + TestPartResultReporterInterface* original_reporter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); +}; + +} // namespace internal + +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest-typed-test.h b/source/3rdparty/gtest/include/gtest/gtest-typed-test.h new file mode 100644 index 0000000..3ffa50b --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest-typed-test.h @@ -0,0 +1,337 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// This header implements typed tests and type-parameterized tests. + +// Typed (aka type-driven) tests repeat the same test for types in a +// list. You must know which types you want to test with when writing +// typed tests. Here's how you do it: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + public: + ... + typedef std::list List; + static T shared_; + T value_; +}; + +// Next, associate a list of types with the test suite, which will be +// repeated for each type in the list. The typedef is necessary for +// the macro to parse correctly. +typedef testing::Types MyTypes; +TYPED_TEST_SUITE(FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// TYPED_TEST_SUITE(FooTest, int); + +// Then, use TYPED_TEST() instead of TEST_F() to define as many typed +// tests for this test suite as you want. +TYPED_TEST(FooTest, DoesBlah) { + // Inside a test, refer to the special name TypeParam to get the type + // parameter. Since we are inside a derived class template, C++ requires + // us to visit the members of FooTest via 'this'. + TypeParam n = this->value_; + + // To visit static members of the fixture, add the TestFixture:: + // prefix. + n += TestFixture::shared_; + + // To refer to typedefs in the fixture, add the "typename + // TestFixture::" prefix. + typename TestFixture::List values; + values.push_back(n); + ... +} + +TYPED_TEST(FooTest, HasPropertyA) { ... } + +// TYPED_TEST_SUITE takes an optional third argument which allows to specify a +// class that generates custom test name suffixes based on the type. This should +// be a class which has a static template function GetName(int index) returning +// a string for each type. The provided integer index equals the index of the +// type in the provided type list. In many cases the index can be ignored. +// +// For example: +// class MyTypeNames { +// public: +// template +// static std::string GetName(int) { +// if (std::is_same()) return "char"; +// if (std::is_same()) return "int"; +// if (std::is_same()) return "unsignedInt"; +// } +// }; +// TYPED_TEST_SUITE(FooTest, MyTypes, MyTypeNames); + +#endif // 0 + +// Type-parameterized tests are abstract test patterns parameterized +// by a type. Compared with typed tests, type-parameterized tests +// allow you to define the test pattern without knowing what the type +// parameters are. The defined pattern can be instantiated with +// different types any number of times, in any number of translation +// units. +// +// If you are designing an interface or concept, you can define a +// suite of type-parameterized tests to verify properties that any +// valid implementation of the interface/concept should have. Then, +// each implementation can easily instantiate the test suite to verify +// that it conforms to the requirements, without having to write +// similar tests repeatedly. Here's an example: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + ... +}; + +// Next, declare that you will define a type-parameterized test suite +// (the _P suffix is for "parameterized" or "pattern", whichever you +// prefer): +TYPED_TEST_SUITE_P(FooTest); + +// Then, use TYPED_TEST_P() to define as many type-parameterized tests +// for this type-parameterized test suite as you want. +TYPED_TEST_P(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + TypeParam n = 0; + ... +} + +TYPED_TEST_P(FooTest, HasPropertyA) { ... } + +// Now the tricky part: you need to register all test patterns before +// you can instantiate them. The first argument of the macro is the +// test suite name; the rest are the names of the tests in this test +// case. +REGISTER_TYPED_TEST_SUITE_P(FooTest, + DoesBlah, HasPropertyA); + +// Finally, you are free to instantiate the pattern with the types you +// want. If you put the above code in a header file, you can #include +// it in multiple C++ source files and instantiate it multiple times. +// +// To distinguish different instances of the pattern, the first +// argument to the INSTANTIATE_* macro is a prefix that will be added +// to the actual test suite name. Remember to pick unique prefixes for +// different instances. +typedef testing::Types MyTypes; +INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, int); +// +// Similar to the optional argument of TYPED_TEST_SUITE above, +// INSTANTIATE_TEST_SUITE_P takes an optional fourth argument which allows to +// generate custom names. +// INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes, MyTypeNames); + +#endif // 0 + +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" +#include "gtest/internal/gtest-type-util.h" + +// Implements typed tests. + +#if GTEST_HAS_TYPED_TEST + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the typedef for the type parameters of the +// given test suite. +#define GTEST_TYPE_PARAMS_(TestSuiteName) gtest_type_params_##TestSuiteName##_ + +// Expands to the name of the typedef for the NameGenerator, responsible for +// creating the suffixes of the name. +#define GTEST_NAME_GENERATOR_(TestSuiteName) \ + gtest_type_params_##TestSuiteName##_NameGenerator + +#define TYPED_TEST_SUITE(CaseName, Types, ...) \ + typedef ::testing::internal::GenerateTypeList::type \ + GTEST_TYPE_PARAMS_(CaseName); \ + typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type \ + GTEST_NAME_GENERATOR_(CaseName) + +#define TYPED_TEST(CaseName, TestName) \ + static_assert(sizeof(GTEST_STRINGIFY_(TestName)) > 1, \ + "test-name must not be empty"); \ + template \ + class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \ + : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + void TestBody() override; \ + }; \ + static bool gtest_##CaseName##_##TestName##_registered_ \ + GTEST_ATTRIBUTE_UNUSED_ = ::testing::internal::TypeParameterizedTest< \ + CaseName, \ + ::testing::internal::TemplateSel, \ + GTEST_TYPE_PARAMS_( \ + CaseName)>::Register("", \ + ::testing::internal::CodeLocation( \ + __FILE__, __LINE__), \ + GTEST_STRINGIFY_(CaseName), \ + GTEST_STRINGIFY_(TestName), 0, \ + ::testing::internal::GenerateNames< \ + GTEST_NAME_GENERATOR_(CaseName), \ + GTEST_TYPE_PARAMS_(CaseName)>()); \ + template \ + void GTEST_TEST_CLASS_NAME_(CaseName, \ + TestName)::TestBody() + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +#define TYPED_TEST_CASE \ + static_assert(::testing::internal::TypedTestCaseIsDeprecated(), ""); \ + TYPED_TEST_SUITE +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +#endif // GTEST_HAS_TYPED_TEST + +// Implements type-parameterized tests. + +#if GTEST_HAS_TYPED_TEST_P + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the namespace name that the type-parameterized tests for +// the given type-parameterized test suite are defined in. The exact +// name of the namespace is subject to change without notice. +#define GTEST_SUITE_NAMESPACE_(TestSuiteName) gtest_suite_##TestSuiteName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the variable used to remember the names of +// the defined tests in the given test suite. +#define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) \ + gtest_typed_test_suite_p_state_##TestSuiteName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY. +// +// Expands to the name of the variable used to remember the names of +// the registered tests in the given test suite. +#define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) \ + gtest_registered_test_names_##TestSuiteName##_ + +// The variables defined in the type-parameterized test macros are +// static as typically these macros are used in a .h file that can be +// #included in multiple translation units linked together. +#define TYPED_TEST_SUITE_P(SuiteName) \ + static ::testing::internal::TypedTestSuitePState \ + GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName) + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +#define TYPED_TEST_CASE_P \ + static_assert(::testing::internal::TypedTestCase_P_IsDeprecated(), ""); \ + TYPED_TEST_SUITE_P +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +#define TYPED_TEST_P(SuiteName, TestName) \ + namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \ + template \ + class TestName : public SuiteName { \ + private: \ + typedef SuiteName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + void TestBody() override; \ + }; \ + static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \ + GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName( \ + __FILE__, __LINE__, GTEST_STRINGIFY_(SuiteName), \ + GTEST_STRINGIFY_(TestName)); \ + } \ + template \ + void GTEST_SUITE_NAMESPACE_( \ + SuiteName)::TestName::TestBody() + +// Note: this won't work correctly if the trailing arguments are macros. +#define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \ + namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \ + typedef ::testing::internal::Templates<__VA_ARGS__> gtest_AllTests_; \ + } \ + static const char* const GTEST_REGISTERED_TEST_NAMES_( \ + SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \ + GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames( \ + GTEST_STRINGIFY_(SuiteName), __FILE__, __LINE__, #__VA_ARGS__) + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +#define REGISTER_TYPED_TEST_CASE_P \ + static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), \ + ""); \ + REGISTER_TYPED_TEST_SUITE_P +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \ + static_assert(sizeof(GTEST_STRINGIFY_(Prefix)) > 1, \ + "test-suit-prefix must not be empty"); \ + static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTestSuite< \ + SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \ + ::testing::internal::GenerateTypeList::type>:: \ + Register(GTEST_STRINGIFY_(Prefix), \ + ::testing::internal::CodeLocation(__FILE__, __LINE__), \ + >EST_TYPED_TEST_SUITE_P_STATE_(SuiteName), \ + GTEST_STRINGIFY_(SuiteName), \ + GTEST_REGISTERED_TEST_NAMES_(SuiteName), \ + ::testing::internal::GenerateNames< \ + ::testing::internal::NameGeneratorSelector< \ + __VA_ARGS__>::type, \ + ::testing::internal::GenerateTypeList::type>()) + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +#define INSTANTIATE_TYPED_TEST_CASE_P \ + static_assert( \ + ::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \ + INSTANTIATE_TYPED_TEST_SUITE_P +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +#endif // GTEST_HAS_TYPED_TEST_P + +#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest.h b/source/3rdparty/gtest/include/gtest/gtest.h new file mode 100644 index 0000000..464b316 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest.h @@ -0,0 +1,2477 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This header file defines the public API for Google Test. It should be +// included by any test program that uses Google Test. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! +// +// Acknowledgment: Google Test borrowed the idea of automatic test +// registration from Barthelemy Dagenais' (barthelemy@prologique.com) +// easyUnit framework. + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_H_ + +#include +#include +#include +#include +#include +#include + +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-string.h" +#include "gtest/gtest-death-test.h" +#include "gtest/gtest-matchers.h" +#include "gtest/gtest-message.h" +#include "gtest/gtest-param-test.h" +#include "gtest/gtest-printers.h" +#include "gtest/gtest_prod.h" +#include "gtest/gtest-test-part.h" +#include "gtest/gtest-typed-test.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +namespace testing { + +// Silence C4100 (unreferenced formal parameter) and 4805 +// unsafe mix of type 'const int' and type 'const bool' +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable:4805) +# pragma warning(disable:4100) +#endif + + +// Declares the flags. + +// This flag temporary enables the disabled tests. +GTEST_DECLARE_bool_(also_run_disabled_tests); + +// This flag brings the debugger on an assertion failure. +GTEST_DECLARE_bool_(break_on_failure); + +// This flag controls whether Google Test catches all test-thrown exceptions +// and logs them as failures. +GTEST_DECLARE_bool_(catch_exceptions); + +// This flag enables using colors in terminal output. Available values are +// "yes" to enable colors, "no" (disable colors), or "auto" (the default) +// to let Google Test decide. +GTEST_DECLARE_string_(color); + +// This flag sets up the filter to select by name using a glob pattern +// the tests to run. If the filter is not given all tests are executed. +GTEST_DECLARE_string_(filter); + +// This flag controls whether Google Test installs a signal handler that dumps +// debugging information when fatal signals are raised. +GTEST_DECLARE_bool_(install_failure_signal_handler); + +// This flag causes the Google Test to list tests. None of the tests listed +// are actually run if the flag is provided. +GTEST_DECLARE_bool_(list_tests); + +// This flag controls whether Google Test emits a detailed XML report to a file +// in addition to its normal textual output. +GTEST_DECLARE_string_(output); + +// This flags control whether Google Test prints the elapsed time for each +// test. +GTEST_DECLARE_bool_(print_time); + +// This flags control whether Google Test prints UTF8 characters as text. +GTEST_DECLARE_bool_(print_utf8); + +// This flag specifies the random number seed. +GTEST_DECLARE_int32_(random_seed); + +// This flag sets how many times the tests are repeated. The default value +// is 1. If the value is -1 the tests are repeating forever. +GTEST_DECLARE_int32_(repeat); + +// This flag controls whether Google Test includes Google Test internal +// stack frames in failure stack traces. +GTEST_DECLARE_bool_(show_internal_stack_frames); + +// When this flag is specified, tests' order is randomized on every iteration. +GTEST_DECLARE_bool_(shuffle); + +// This flag specifies the maximum number of stack frames to be +// printed in a failure message. +GTEST_DECLARE_int32_(stack_trace_depth); + +// When this flag is specified, a failed assertion will throw an +// exception if exceptions are enabled, or exit the program with a +// non-zero code otherwise. For use with an external test framework. +GTEST_DECLARE_bool_(throw_on_failure); + +// When this flag is set with a "host:port" string, on supported +// platforms test results are streamed to the specified port on +// the specified host machine. +GTEST_DECLARE_string_(stream_result_to); + +#if GTEST_USE_OWN_FLAGFILE_FLAG_ +GTEST_DECLARE_string_(flagfile); +#endif // GTEST_USE_OWN_FLAGFILE_FLAG_ + +// The upper limit for valid stack trace depths. +const int kMaxStackTraceDepth = 100; + +namespace internal { + +class AssertHelper; +class DefaultGlobalTestPartResultReporter; +class ExecDeathTest; +class NoExecDeathTest; +class FinalSuccessChecker; +class GTestFlagSaver; +class StreamingListenerTest; +class TestResultAccessor; +class TestEventListenersAccessor; +class TestEventRepeater; +class UnitTestRecordPropertyTestHelper; +class WindowsDeathTest; +class FuchsiaDeathTest; +class UnitTestImpl* GetUnitTestImpl(); +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message); +std::set* GetIgnoredParameterizedTestSuites(); + +} // namespace internal + +// The friend relationship of some of these classes is cyclic. +// If we don't forward declare them the compiler might confuse the classes +// in friendship clauses with same named classes on the scope. +class Test; +class TestSuite; + +// Old API is still available but deprecated +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +using TestCase = TestSuite; +#endif +class TestInfo; +class UnitTest; + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + +#if defined(_MSC_VER) && _MSC_VER < 1910 + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */) +#endif + + // Used in the EXPECT_TRUE/FALSE(bool_expression). + // + // T must be contextually convertible to bool. + // + // The second parameter prevents this overload from being considered if + // the argument is implicitly convertible to AssertionResult. In that case + // we want AssertionResult's copy constructor to be used. + template + explicit AssertionResult( + const T& success, + typename std::enable_if< + !std::is_convertible::value>::type* + /*enabler*/ + = nullptr) + : success_(success) {} + +#if defined(_MSC_VER) && _MSC_VER < 1910 + GTEST_DISABLE_MSC_WARNINGS_POP_() +#endif + + // Assignment operator. + AssertionResult& operator=(AssertionResult other) { + swap(other); + return *this; + } + + // Returns true if and only if the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != nullptr ? message_->c_str() : ""; + } + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == nullptr) message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Swap the contents of this AssertionResult with other. + void swap(AssertionResult& other); + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + std::unique_ptr< ::std::string> message_; +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +} // namespace testing + +// Includes the auto-generated header that implements a family of generic +// predicate assertion macros. This include comes late because it relies on +// APIs declared above. +#include "gtest/gtest_pred_impl.h" + +namespace testing { + +// The abstract class that all tests inherit from. +// +// In Google Test, a unit test program contains one or many TestSuites, and +// each TestSuite contains one or many Tests. +// +// When you define a test using the TEST macro, you don't need to +// explicitly derive from Test - the TEST macro automatically does +// this for you. +// +// The only time you derive from Test is when defining a test fixture +// to be used in a TEST_F. For example: +// +// class FooTest : public testing::Test { +// protected: +// void SetUp() override { ... } +// void TearDown() override { ... } +// ... +// }; +// +// TEST_F(FooTest, Bar) { ... } +// TEST_F(FooTest, Baz) { ... } +// +// Test is not copyable. +class GTEST_API_ Test { + public: + friend class TestInfo; + + // The d'tor is virtual as we intend to inherit from Test. + virtual ~Test(); + + // Sets up the stuff shared by all tests in this test case. + // + // Google Test will call Foo::SetUpTestSuite() before running the first + // test in test case Foo. Hence a sub-class can define its own + // SetUpTestSuite() method to shadow the one defined in the super + // class. + static void SetUpTestSuite() {} + + // Tears down the stuff shared by all tests in this test suite. + // + // Google Test will call Foo::TearDownTestSuite() after running the last + // test in test case Foo. Hence a sub-class can define its own + // TearDownTestSuite() method to shadow the one defined in the super + // class. + static void TearDownTestSuite() {} + + // Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + static void TearDownTestCase() {} + static void SetUpTestCase() {} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + // Returns true if and only if the current test has a fatal failure. + static bool HasFatalFailure(); + + // Returns true if and only if the current test has a non-fatal failure. + static bool HasNonfatalFailure(); + + // Returns true if and only if the current test was skipped. + static bool IsSkipped(); + + // Returns true if and only if the current test has a (either fatal or + // non-fatal) failure. + static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); } + + // Logs a property for the current test, test suite, or for the entire + // invocation of the test program when used outside of the context of a + // test suite. Only the last value for a given key is remembered. These + // are public static so they can be called from utility functions that are + // not members of the test fixture. Calls to RecordProperty made during + // lifespan of the test (from the moment its constructor starts to the + // moment its destructor finishes) will be output in XML as attributes of + // the element. Properties recorded from fixture's + // SetUpTestSuite or TearDownTestSuite are logged as attributes of the + // corresponding element. Calls to RecordProperty made in the + // global context (before or after invocation of RUN_ALL_TESTS and from + // SetUp/TearDown method of Environment objects registered with Google + // Test) will be output as attributes of the element. + static void RecordProperty(const std::string& key, const std::string& value); + static void RecordProperty(const std::string& key, int value); + + protected: + // Creates a Test object. + Test(); + + // Sets up the test fixture. + virtual void SetUp(); + + // Tears down the test fixture. + virtual void TearDown(); + + private: + // Returns true if and only if the current test has the same fixture class + // as the first test in the current test suite. + static bool HasSameFixtureClass(); + + // Runs the test after the test fixture has been set up. + // + // A sub-class must implement this to define the test logic. + // + // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM. + // Instead, use the TEST or TEST_F macro. + virtual void TestBody() = 0; + + // Sets up, executes, and tears down the test. + void Run(); + + // Deletes self. We deliberately pick an unusual name for this + // internal method to avoid clashing with names used in user TESTs. + void DeleteSelf_() { delete this; } + + const std::unique_ptr gtest_flag_saver_; + + // Often a user misspells SetUp() as Setup() and spends a long time + // wondering why it is never called by Google Test. The declaration of + // the following method is solely for catching such an error at + // compile time: + // + // - The return type is deliberately chosen to be not void, so it + // will be a conflict if void Setup() is declared in the user's + // test fixture. + // + // - This method is private, so it will be another compiler error + // if the method is called from the user's test fixture. + // + // DO NOT OVERRIDE THIS FUNCTION. + // + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return nullptr; } + + // We disallow copying Tests. + GTEST_DISALLOW_COPY_AND_ASSIGN_(Test); +}; + +typedef internal::TimeInMillis TimeInMillis; + +// A copyable object representing a user specified test property which can be +// output as a key/value string pair. +// +// Don't inherit from TestProperty as its destructor is not virtual. +class TestProperty { + public: + // C'tor. TestProperty does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestProperty object. + TestProperty(const std::string& a_key, const std::string& a_value) : + key_(a_key), value_(a_value) { + } + + // Gets the user supplied key. + const char* key() const { + return key_.c_str(); + } + + // Gets the user supplied value. + const char* value() const { + return value_.c_str(); + } + + // Sets a new value, overriding the one supplied in the constructor. + void SetValue(const std::string& new_value) { + value_ = new_value; + } + + private: + // The key supplied by the user. + std::string key_; + // The value supplied by the user. + std::string value_; +}; + +// The result of a single Test. This includes a list of +// TestPartResults, a list of TestProperties, a count of how many +// death tests there are in the Test, and how much time it took to run +// the Test. +// +// TestResult is not copyable. +class GTEST_API_ TestResult { + public: + // Creates an empty TestResult. + TestResult(); + + // D'tor. Do not inherit from TestResult. + ~TestResult(); + + // Gets the number of all test parts. This is the sum of the number + // of successful test parts and the number of failed test parts. + int total_part_count() const; + + // Returns the number of the test properties. + int test_property_count() const; + + // Returns true if and only if the test passed (i.e. no test part failed). + bool Passed() const { return !Skipped() && !Failed(); } + + // Returns true if and only if the test was skipped. + bool Skipped() const; + + // Returns true if and only if the test failed. + bool Failed() const; + + // Returns true if and only if the test fatally failed. + bool HasFatalFailure() const; + + // Returns true if and only if the test has a non-fatal failure. + bool HasNonfatalFailure() const; + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Gets the time of the test case start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const { return start_timestamp_; } + + // Returns the i-th test part result among all the results. i can range from 0 + // to total_part_count() - 1. If i is not in that range, aborts the program. + const TestPartResult& GetTestPartResult(int i) const; + + // Returns the i-th test property. i can range from 0 to + // test_property_count() - 1. If i is not in that range, aborts the + // program. + const TestProperty& GetTestProperty(int i) const; + + private: + friend class TestInfo; + friend class TestSuite; + friend class UnitTest; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::ExecDeathTest; + friend class internal::TestResultAccessor; + friend class internal::UnitTestImpl; + friend class internal::WindowsDeathTest; + friend class internal::FuchsiaDeathTest; + + // Gets the vector of TestPartResults. + const std::vector& test_part_results() const { + return test_part_results_; + } + + // Gets the vector of TestProperties. + const std::vector& test_properties() const { + return test_properties_; + } + + // Sets the start time. + void set_start_timestamp(TimeInMillis start) { start_timestamp_ = start; } + + // Sets the elapsed time. + void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; } + + // Adds a test property to the list. The property is validated and may add + // a non-fatal failure if invalid (e.g., if it conflicts with reserved + // key names). If a property is already recorded for the same key, the + // value will be updated, rather than storing multiple values for the same + // key. xml_element specifies the element for which the property is being + // recorded and is used for validation. + void RecordProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a failure if the key is a reserved attribute of Google Test + // testsuite tags. Returns true if the property is valid. + // FIXME: Validate attribute names are legal and human readable. + static bool ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a test part result to the list. + void AddTestPartResult(const TestPartResult& test_part_result); + + // Returns the death test count. + int death_test_count() const { return death_test_count_; } + + // Increments the death test count, returning the new count. + int increment_death_test_count() { return ++death_test_count_; } + + // Clears the test part results. + void ClearTestPartResults(); + + // Clears the object. + void Clear(); + + // Protects mutable state of the property vector and of owned + // properties, whose values may be updated. + internal::Mutex test_properites_mutex_; + + // The vector of TestPartResults + std::vector test_part_results_; + // The vector of TestProperties + std::vector test_properties_; + // Running count of death tests. + int death_test_count_; + // The start time, in milliseconds since UNIX Epoch. + TimeInMillis start_timestamp_; + // The elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestResult. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult); +}; // class TestResult + +// A TestInfo object stores the following information about a test: +// +// Test suite name +// Test name +// Whether the test should be run +// A function pointer that creates the test object when invoked +// Test result +// +// The constructor of TestInfo registers itself with the UnitTest +// singleton such that the RUN_ALL_TESTS() macro knows which tests to +// run. +class GTEST_API_ TestInfo { + public: + // Destructs a TestInfo object. This function is not virtual, so + // don't inherit from TestInfo. + ~TestInfo(); + + // Returns the test suite name. + const char* test_suite_name() const { return test_suite_name_.c_str(); } + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + const char* test_case_name() const { return test_suite_name(); } +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + // Returns the test name. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a typed + // or a type-parameterized test. + const char* type_param() const { + if (type_param_.get() != nullptr) return type_param_->c_str(); + return nullptr; + } + + // Returns the text representation of the value parameter, or NULL if this + // is not a value-parameterized test. + const char* value_param() const { + if (value_param_.get() != nullptr) return value_param_->c_str(); + return nullptr; + } + + // Returns the file name where this test is defined. + const char* file() const { return location_.file.c_str(); } + + // Returns the line where this test is defined. + int line() const { return location_.line; } + + // Return true if this test should not be run because it's in another shard. + bool is_in_another_shard() const { return is_in_another_shard_; } + + // Returns true if this test should run, that is if the test is not + // disabled (or it is disabled but the also_run_disabled_tests flag has + // been specified) and its full name matches the user-specified filter. + // + // Google Test allows the user to filter the tests by their full names. + // The full name of a test Bar in test suite Foo is defined as + // "Foo.Bar". Only the tests that match the filter will run. + // + // A filter is a colon-separated list of glob (not regex) patterns, + // optionally followed by a '-' and a colon-separated list of + // negative patterns (tests to exclude). A test is run if it + // matches one of the positive patterns and does not match any of + // the negative patterns. + // + // For example, *A*:Foo.* is a filter that matches any string that + // contains the character 'A' or starts with "Foo.". + bool should_run() const { return should_run_; } + + // Returns true if and only if this test will appear in the XML report. + bool is_reportable() const { + // The XML report includes tests matching the filter, excluding those + // run in other shards. + return matches_filter_ && !is_in_another_shard_; + } + + // Returns the result of the test. + const TestResult* result() const { return &result_; } + + private: +#if GTEST_HAS_DEATH_TEST + friend class internal::DefaultDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + friend class Test; + friend class TestSuite; + friend class internal::UnitTestImpl; + friend class internal::StreamingListenerTest; + friend TestInfo* internal::MakeAndRegisterTestInfo( + const char* test_suite_name, const char* name, const char* type_param, + const char* value_param, internal::CodeLocation code_location, + internal::TypeId fixture_class_id, internal::SetUpTestSuiteFunc set_up_tc, + internal::TearDownTestSuiteFunc tear_down_tc, + internal::TestFactoryBase* factory); + + // Constructs a TestInfo object. The newly constructed instance assumes + // ownership of the factory object. + TestInfo(const std::string& test_suite_name, const std::string& name, + const char* a_type_param, // NULL if not a type-parameterized test + const char* a_value_param, // NULL if not a value-parameterized test + internal::CodeLocation a_code_location, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory); + + // Increments the number of death tests encountered in this test so + // far. + int increment_death_test_count() { + return result_.increment_death_test_count(); + } + + // Creates the test object, runs it, records its result, and then + // deletes it. + void Run(); + + static void ClearTestResult(TestInfo* test_info) { + test_info->result_.Clear(); + } + + // These fields are immutable properties of the test. + const std::string test_suite_name_; // test suite name + const std::string name_; // Test name + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const std::unique_ptr type_param_; + // Text representation of the value parameter, or NULL if this is not a + // value-parameterized test. + const std::unique_ptr value_param_; + internal::CodeLocation location_; + const internal::TypeId fixture_class_id_; // ID of the test fixture class + bool should_run_; // True if and only if this test should run + bool is_disabled_; // True if and only if this test is disabled + bool matches_filter_; // True if this test matches the + // user-specified filter. + bool is_in_another_shard_; // Will be run in another shard. + internal::TestFactoryBase* const factory_; // The factory that creates + // the test object + + // This field is mutable and needs to be reset before running the + // test for the second time. + TestResult result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo); +}; + +// A test suite, which consists of a vector of TestInfos. +// +// TestSuite is not copyable. +class GTEST_API_ TestSuite { + public: + // Creates a TestSuite with the given name. + // + // TestSuite does NOT have a default constructor. Always use this + // constructor to create a TestSuite object. + // + // Arguments: + // + // name: name of the test suite + // a_type_param: the name of the test's type parameter, or NULL if + // this is not a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test suite + // tear_down_tc: pointer to the function that tears down the test suite + TestSuite(const char* name, const char* a_type_param, + internal::SetUpTestSuiteFunc set_up_tc, + internal::TearDownTestSuiteFunc tear_down_tc); + + // Destructor of TestSuite. + virtual ~TestSuite(); + + // Gets the name of the TestSuite. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a + // type-parameterized test suite. + const char* type_param() const { + if (type_param_.get() != nullptr) return type_param_->c_str(); + return nullptr; + } + + // Returns true if any test in this test suite should run. + bool should_run() const { return should_run_; } + + // Gets the number of successful tests in this test suite. + int successful_test_count() const; + + // Gets the number of skipped tests in this test suite. + int skipped_test_count() const; + + // Gets the number of failed tests in this test suite. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests in this test suite. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Get the number of tests in this test suite that should run. + int test_to_run_count() const; + + // Gets the number of all tests in this test suite. + int total_test_count() const; + + // Returns true if and only if the test suite passed. + bool Passed() const { return !Failed(); } + + // Returns true if and only if the test suite failed. + bool Failed() const { + return failed_test_count() > 0 || ad_hoc_test_result().Failed(); + } + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Gets the time of the test suite start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const { return start_timestamp_; } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + const TestInfo* GetTestInfo(int i) const; + + // Returns the TestResult that holds test properties recorded during + // execution of SetUpTestSuite and TearDownTestSuite. + const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; } + + private: + friend class Test; + friend class internal::UnitTestImpl; + + // Gets the (mutable) vector of TestInfos in this TestSuite. + std::vector& test_info_list() { return test_info_list_; } + + // Gets the (immutable) vector of TestInfos in this TestSuite. + const std::vector& test_info_list() const { + return test_info_list_; + } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + TestInfo* GetMutableTestInfo(int i); + + // Sets the should_run member. + void set_should_run(bool should) { should_run_ = should; } + + // Adds a TestInfo to this test suite. Will delete the TestInfo upon + // destruction of the TestSuite object. + void AddTestInfo(TestInfo * test_info); + + // Clears the results of all tests in this test suite. + void ClearResult(); + + // Clears the results of all tests in the given test suite. + static void ClearTestSuiteResult(TestSuite* test_suite) { + test_suite->ClearResult(); + } + + // Runs every test in this TestSuite. + void Run(); + + // Runs SetUpTestSuite() for this TestSuite. This wrapper is needed + // for catching exceptions thrown from SetUpTestSuite(). + void RunSetUpTestSuite() { + if (set_up_tc_ != nullptr) { + (*set_up_tc_)(); + } + } + + // Runs TearDownTestSuite() for this TestSuite. This wrapper is + // needed for catching exceptions thrown from TearDownTestSuite(). + void RunTearDownTestSuite() { + if (tear_down_tc_ != nullptr) { + (*tear_down_tc_)(); + } + } + + // Returns true if and only if test passed. + static bool TestPassed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Passed(); + } + + // Returns true if and only if test skipped. + static bool TestSkipped(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Skipped(); + } + + // Returns true if and only if test failed. + static bool TestFailed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Failed(); + } + + // Returns true if and only if the test is disabled and will be reported in + // the XML report. + static bool TestReportableDisabled(const TestInfo* test_info) { + return test_info->is_reportable() && test_info->is_disabled_; + } + + // Returns true if and only if test is disabled. + static bool TestDisabled(const TestInfo* test_info) { + return test_info->is_disabled_; + } + + // Returns true if and only if this test will appear in the XML report. + static bool TestReportable(const TestInfo* test_info) { + return test_info->is_reportable(); + } + + // Returns true if the given test should run. + static bool ShouldRunTest(const TestInfo* test_info) { + return test_info->should_run(); + } + + // Shuffles the tests in this test suite. + void ShuffleTests(internal::Random* random); + + // Restores the test order to before the first shuffle. + void UnshuffleTests(); + + // Name of the test suite. + std::string name_; + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const std::unique_ptr type_param_; + // The vector of TestInfos in their original order. It owns the + // elements in the vector. + std::vector test_info_list_; + // Provides a level of indirection for the test list to allow easy + // shuffling and restoring the test order. The i-th element in this + // vector is the index of the i-th test in the shuffled test list. + std::vector test_indices_; + // Pointer to the function that sets up the test suite. + internal::SetUpTestSuiteFunc set_up_tc_; + // Pointer to the function that tears down the test suite. + internal::TearDownTestSuiteFunc tear_down_tc_; + // True if and only if any test in this test suite should run. + bool should_run_; + // The start time, in milliseconds since UNIX Epoch. + TimeInMillis start_timestamp_; + // Elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + // Holds test properties recorded during execution of SetUpTestSuite and + // TearDownTestSuite. + TestResult ad_hoc_test_result_; + + // We disallow copying TestSuites. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestSuite); +}; + +// An Environment object is capable of setting up and tearing down an +// environment. You should subclass this to define your own +// environment(s). +// +// An Environment object does the set-up and tear-down in virtual +// methods SetUp() and TearDown() instead of the constructor and the +// destructor, as: +// +// 1. You cannot safely throw from a destructor. This is a problem +// as in some cases Google Test is used where exceptions are enabled, and +// we may want to implement ASSERT_* using exceptions where they are +// available. +// 2. You cannot use ASSERT_* directly in a constructor or +// destructor. +class Environment { + public: + // The d'tor is virtual as we need to subclass Environment. + virtual ~Environment() {} + + // Override this to define how to set up the environment. + virtual void SetUp() {} + + // Override this to define how to tear down the environment. + virtual void TearDown() {} + private: + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return nullptr; } +}; + +#if GTEST_HAS_EXCEPTIONS + +// Exception which can be thrown from TestEventListener::OnTestPartResult. +class GTEST_API_ AssertionException + : public internal::GoogleTestFailureException { + public: + explicit AssertionException(const TestPartResult& result) + : GoogleTestFailureException(result) {} +}; + +#endif // GTEST_HAS_EXCEPTIONS + +// The interface for tracing execution of tests. The methods are organized in +// the order the corresponding events are fired. +class TestEventListener { + public: + virtual ~TestEventListener() {} + + // Fired before any test activity starts. + virtual void OnTestProgramStart(const UnitTest& unit_test) = 0; + + // Fired before each iteration of tests starts. There may be more than + // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration + // index, starting from 0. + virtual void OnTestIterationStart(const UnitTest& unit_test, + int iteration) = 0; + + // Fired before environment set-up for each iteration of tests starts. + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0; + + // Fired after environment set-up for each iteration of tests ends. + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0; + + // Fired before the test suite starts. + virtual void OnTestSuiteStart(const TestSuite& /*test_suite*/) {} + + // Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + virtual void OnTestCaseStart(const TestCase& /*test_case*/) {} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + // Fired before the test starts. + virtual void OnTestStart(const TestInfo& test_info) = 0; + + // Fired after a failed assertion or a SUCCEED() invocation. + // If you want to throw an exception from this function to skip to the next + // TEST, it must be AssertionException defined above, or inherited from it. + virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0; + + // Fired after the test ends. + virtual void OnTestEnd(const TestInfo& test_info) = 0; + + // Fired after the test suite ends. + virtual void OnTestSuiteEnd(const TestSuite& /*test_suite*/) {} + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + // Fired before environment tear-down for each iteration of tests starts. + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0; + + // Fired after environment tear-down for each iteration of tests ends. + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0; + + // Fired after each iteration of tests finishes. + virtual void OnTestIterationEnd(const UnitTest& unit_test, + int iteration) = 0; + + // Fired after all test activities have ended. + virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0; +}; + +// The convenience class for users who need to override just one or two +// methods and are not concerned that a possible change to a signature of +// the methods they override will not be caught during the build. For +// comments about each method please see the definition of TestEventListener +// above. +class EmptyTestEventListener : public TestEventListener { + public: + void OnTestProgramStart(const UnitTest& /*unit_test*/) override {} + void OnTestIterationStart(const UnitTest& /*unit_test*/, + int /*iteration*/) override {} + void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) override {} + void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {} + void OnTestSuiteStart(const TestSuite& /*test_suite*/) override {} +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + void OnTestCaseStart(const TestCase& /*test_case*/) override {} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + void OnTestStart(const TestInfo& /*test_info*/) override {} + void OnTestPartResult(const TestPartResult& /*test_part_result*/) override {} + void OnTestEnd(const TestInfo& /*test_info*/) override {} + void OnTestSuiteEnd(const TestSuite& /*test_suite*/) override {} +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + void OnTestCaseEnd(const TestCase& /*test_case*/) override {} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) override {} + void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) override {} + void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int /*iteration*/) override {} + void OnTestProgramEnd(const UnitTest& /*unit_test*/) override {} +}; + +// TestEventListeners lets users add listeners to track events in Google Test. +class GTEST_API_ TestEventListeners { + public: + TestEventListeners(); + ~TestEventListeners(); + + // Appends an event listener to the end of the list. Google Test assumes + // the ownership of the listener (i.e. it will delete the listener when + // the test program finishes). + void Append(TestEventListener* listener); + + // Removes the given event listener from the list and returns it. It then + // becomes the caller's responsibility to delete the listener. Returns + // NULL if the listener is not found in the list. + TestEventListener* Release(TestEventListener* listener); + + // Returns the standard listener responsible for the default console + // output. Can be removed from the listeners list to shut down default + // console output. Note that removing this object from the listener list + // with Release transfers its ownership to the caller and makes this + // function return NULL the next time. + TestEventListener* default_result_printer() const { + return default_result_printer_; + } + + // Returns the standard listener responsible for the default XML output + // controlled by the --gtest_output=xml flag. Can be removed from the + // listeners list by users who want to shut down the default XML output + // controlled by this flag and substitute it with custom one. Note that + // removing this object from the listener list with Release transfers its + // ownership to the caller and makes this function return NULL the next + // time. + TestEventListener* default_xml_generator() const { + return default_xml_generator_; + } + + private: + friend class TestSuite; + friend class TestInfo; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::NoExecDeathTest; + friend class internal::TestEventListenersAccessor; + friend class internal::UnitTestImpl; + + // Returns repeater that broadcasts the TestEventListener events to all + // subscribers. + TestEventListener* repeater(); + + // Sets the default_result_printer attribute to the provided listener. + // The listener is also added to the listener list and previous + // default_result_printer is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultResultPrinter(TestEventListener* listener); + + // Sets the default_xml_generator attribute to the provided listener. The + // listener is also added to the listener list and previous + // default_xml_generator is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultXmlGenerator(TestEventListener* listener); + + // Controls whether events will be forwarded by the repeater to the + // listeners in the list. + bool EventForwardingEnabled() const; + void SuppressEventForwarding(); + + // The actual list of listeners. + internal::TestEventRepeater* repeater_; + // Listener responsible for the standard result output. + TestEventListener* default_result_printer_; + // Listener responsible for the creation of the XML output file. + TestEventListener* default_xml_generator_; + + // We disallow copying TestEventListeners. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners); +}; + +// A UnitTest consists of a vector of TestSuites. +// +// This is a singleton class. The only instance of UnitTest is +// created when UnitTest::GetInstance() is first called. This +// instance is never deleted. +// +// UnitTest is not copyable. +// +// This class is thread-safe as long as the methods are called +// according to their specification. +class GTEST_API_ UnitTest { + public: + // Gets the singleton UnitTest object. The first time this method + // is called, a UnitTest object is constructed and returned. + // Consecutive calls will return the same object. + static UnitTest* GetInstance(); + + // Runs all tests in this UnitTest object and prints the result. + // Returns 0 if successful, or 1 otherwise. + // + // This method can only be called from the main thread. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + int Run() GTEST_MUST_USE_RESULT_; + + // Returns the working directory when the first TEST() or TEST_F() + // was executed. The UnitTest object owns the string. + const char* original_working_dir() const; + + // Returns the TestSuite object for the test that's currently running, + // or NULL if no test is running. + const TestSuite* current_test_suite() const GTEST_LOCK_EXCLUDED_(mutex_); + +// Legacy API is still available but deprecated +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + const TestCase* current_test_case() const GTEST_LOCK_EXCLUDED_(mutex_); +#endif + + // Returns the TestInfo object for the test that's currently running, + // or NULL if no test is running. + const TestInfo* current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the random seed used at the start of the current test run. + int random_seed() const; + + // Returns the ParameterizedTestSuiteRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::ParameterizedTestSuiteRegistry& parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_); + + // Gets the number of successful test suites. + int successful_test_suite_count() const; + + // Gets the number of failed test suites. + int failed_test_suite_count() const; + + // Gets the number of all test suites. + int total_test_suite_count() const; + + // Gets the number of all test suites that contain at least one test + // that should run. + int test_suite_to_run_count() const; + + // Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + int successful_test_case_count() const; + int failed_test_case_count() const; + int total_test_case_count() const; + int test_case_to_run_count() const; +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of skipped tests. + int skipped_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const; + + // Returns true if and only if the unit test passed (i.e. all test suites + // passed). + bool Passed() const; + + // Returns true if and only if the unit test failed (i.e. some test suite + // failed or something outside of all tests failed). + bool Failed() const; + + // Gets the i-th test suite among all the test suites. i can range from 0 to + // total_test_suite_count() - 1. If i is not in that range, returns NULL. + const TestSuite* GetTestSuite(int i) const; + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + const TestCase* GetTestCase(int i) const; +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + // Returns the TestResult containing information on test failures and + // properties logged outside of individual test suites. + const TestResult& ad_hoc_test_result() const; + + // Returns the list of event listeners that can be used to track events + // inside Google Test. + TestEventListeners& listeners(); + + private: + // Registers and returns a global test environment. When a test + // program is run, all global test environments will be set-up in + // the order they were registered. After all tests in the program + // have finished, all global test environments will be torn-down in + // the *reverse* order they were registered. + // + // The UnitTest object takes ownership of the given environment. + // + // This method can only be called from the main thread. + Environment* AddEnvironment(Environment* env); + + // Adds a TestPartResult to the current TestResult object. All + // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) + // eventually call this to report their results. The user code + // should use the assertion macros instead of calling this directly. + void AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Adds a TestProperty to the current TestResult object when invoked from + // inside a test, to current TestSuite's ad_hoc_test_result_ when invoked + // from SetUpTestSuite or TearDownTestSuite, or to the global property set + // when invoked elsewhere. If the result already contains a property with + // the same key, the value will be updated. + void RecordProperty(const std::string& key, const std::string& value); + + // Gets the i-th test suite among all the test suites. i can range from 0 to + // total_test_suite_count() - 1. If i is not in that range, returns NULL. + TestSuite* GetMutableTestSuite(int i); + + // Accessors for the implementation object. + internal::UnitTestImpl* impl() { return impl_; } + const internal::UnitTestImpl* impl() const { return impl_; } + + // These classes and functions are friends as they need to access private + // members of UnitTest. + friend class ScopedTrace; + friend class Test; + friend class internal::AssertHelper; + friend class internal::StreamingListenerTest; + friend class internal::UnitTestRecordPropertyTestHelper; + friend Environment* AddGlobalTestEnvironment(Environment* env); + friend std::set* internal::GetIgnoredParameterizedTestSuites(); + friend internal::UnitTestImpl* internal::GetUnitTestImpl(); + friend void internal::ReportFailureInUnknownLocation( + TestPartResult::Type result_type, + const std::string& message); + + // Creates an empty UnitTest. + UnitTest(); + + // D'tor + virtual ~UnitTest(); + + // Pushes a trace defined by SCOPED_TRACE() on to the per-thread + // Google Test trace stack. + void PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Pops a trace from the per-thread Google Test trace stack. + void PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_); + + // Protects mutable state in *impl_. This is mutable as some const + // methods need to lock it too. + mutable internal::Mutex mutex_; + + // Opaque implementation object. This field is never changed once + // the object is constructed. We don't mark it as const here, as + // doing so will cause a warning in the constructor of UnitTest. + // Mutable state in *impl_ is protected by mutex_. + internal::UnitTestImpl* impl_; + + // We disallow copying UnitTest. + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest); +}; + +// A convenient wrapper for adding an environment for the test +// program. +// +// You should call this before RUN_ALL_TESTS() is called, probably in +// main(). If you use gtest_main, you need to call this before main() +// starts for it to take effect. For example, you can define a global +// variable like this: +// +// testing::Environment* const foo_env = +// testing::AddGlobalTestEnvironment(new FooEnvironment); +// +// However, we strongly recommend you to write your own main() and +// call AddGlobalTestEnvironment() there, as relying on initialization +// of global variables makes the code harder to read and may cause +// problems when you register multiple environments from different +// translation units and the environments have dependencies among them +// (remember that the compiler doesn't guarantee the order in which +// global variables from different translation units are initialized). +inline Environment* AddGlobalTestEnvironment(Environment* env) { + return UnitTest::GetInstance()->AddEnvironment(env); +} + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +GTEST_API_ void InitGoogleTest(int* argc, char** argv); + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv); + +// This overloaded version can be used on Arduino/embedded platforms where +// there is no argc/argv. +GTEST_API_ void InitGoogleTest(); + +namespace internal { + +// Separate the error generating code from the code path to reduce the stack +// frame size of CmpHelperEQ. This helps reduce the overhead of some sanitizers +// when calling EXPECT_* in a tight loop. +template +AssertionResult CmpHelperEQFailure(const char* lhs_expression, + const char* rhs_expression, + const T1& lhs, const T2& rhs) { + return EqFailure(lhs_expression, + rhs_expression, + FormatForComparisonFailureMessage(lhs, rhs), + FormatForComparisonFailureMessage(rhs, lhs), + false); +} + +// This block of code defines operator==/!= +// to block lexical scope lookup. +// It prevents using invalid operator==/!= defined at namespace scope. +struct faketype {}; +inline bool operator==(faketype, faketype) { return true; } +inline bool operator!=(faketype, faketype) { return false; } + +// The helper function for {ASSERT|EXPECT}_EQ. +template +AssertionResult CmpHelperEQ(const char* lhs_expression, + const char* rhs_expression, + const T1& lhs, + const T2& rhs) { + if (lhs == rhs) { + return AssertionSuccess(); + } + + return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs); +} + +// With this overloaded version, we allow anonymous enums to be used +// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums +// can be implicitly cast to BiggestInt. +GTEST_API_ AssertionResult CmpHelperEQ(const char* lhs_expression, + const char* rhs_expression, + BiggestInt lhs, + BiggestInt rhs); + +class EqHelper { + public: + // This templatized version is for the general case. + template < + typename T1, typename T2, + // Disable this overload for cases where one argument is a pointer + // and the other is the null pointer constant. + typename std::enable_if::value || + !std::is_pointer::value>::type* = nullptr> + static AssertionResult Compare(const char* lhs_expression, + const char* rhs_expression, const T1& lhs, + const T2& rhs) { + return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs); + } + + // With this overloaded version, we allow anonymous enums to be used + // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous + // enums can be implicitly cast to BiggestInt. + // + // Even though its body looks the same as the above version, we + // cannot merge the two, as it will make anonymous enums unhappy. + static AssertionResult Compare(const char* lhs_expression, + const char* rhs_expression, + BiggestInt lhs, + BiggestInt rhs) { + return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs); + } + + template + static AssertionResult Compare( + const char* lhs_expression, const char* rhs_expression, + // Handle cases where '0' is used as a null pointer literal. + std::nullptr_t /* lhs */, T* rhs) { + // We already know that 'lhs' is a null pointer. + return CmpHelperEQ(lhs_expression, rhs_expression, static_cast(nullptr), + rhs); + } +}; + +// Separate the error generating code from the code path to reduce the stack +// frame size of CmpHelperOP. This helps reduce the overhead of some sanitizers +// when calling EXPECT_OP in a tight loop. +template +AssertionResult CmpHelperOpFailure(const char* expr1, const char* expr2, + const T1& val1, const T2& val2, + const char* op) { + return AssertionFailure() + << "Expected: (" << expr1 << ") " << op << " (" << expr2 + << "), actual: " << FormatForComparisonFailureMessage(val1, val2) + << " vs " << FormatForComparisonFailureMessage(val2, val1); +} + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste +// of similar code. +// +// For each templatized helper function, we also define an overloaded +// version for BiggestInt in order to reduce code bloat and allow +// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled +// with gcc 4. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +template \ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + const T1& val1, const T2& val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return CmpHelperOpFailure(expr1, expr2, val1, val2, #op);\ + }\ +}\ +GTEST_API_ AssertionResult CmpHelper##op_name(\ + const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2) + +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// Implements the helper function for {ASSERT|EXPECT}_NE +GTEST_IMPL_CMP_HELPER_(NE, !=); +// Implements the helper function for {ASSERT|EXPECT}_LE +GTEST_IMPL_CMP_HELPER_(LE, <=); +// Implements the helper function for {ASSERT|EXPECT}_LT +GTEST_IMPL_CMP_HELPER_(LT, <); +// Implements the helper function for {ASSERT|EXPECT}_GE +GTEST_IMPL_CMP_HELPER_(GE, >=); +// Implements the helper function for {ASSERT|EXPECT}_GT +GTEST_IMPL_CMP_HELPER_(GT, >); + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRNE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + + +// Helper function for *_STREQ on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2); + +// Helper function for *_STRNE on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2); + +} // namespace internal + +// IsSubstring() and IsNotSubstring() are intended to be used as the +// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by +// themselves. They check whether needle is a substring of haystack +// (NULL is considered a substring of itself only), and return an +// appropriate error message when they fail. +// +// The {needle,haystack}_expr arguments are the stringified +// expressions that generated the two real arguments. +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +// Helper template function for comparing floating-points. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +AssertionResult CmpHelperFloatingPointEQ(const char* lhs_expression, + const char* rhs_expression, + RawType lhs_value, + RawType rhs_value) { + const FloatingPoint lhs(lhs_value), rhs(rhs_value); + + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + ::std::stringstream lhs_ss; + lhs_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << lhs_value; + + ::std::stringstream rhs_ss; + rhs_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << rhs_value; + + return EqFailure(lhs_expression, + rhs_expression, + StringStreamToString(&lhs_ss), + StringStreamToString(&rhs_ss), + false); +} + +// Helper function for implementing ASSERT_NEAR. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error); + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// A class that enables one to stream messages to assertion macros +class GTEST_API_ AssertHelper { + public: + // Constructor. + AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message); + ~AssertHelper(); + + // Message assignment is a semantic trick to enable assertion + // streaming; see the GTEST_MESSAGE_ macro below. + void operator=(const Message& message) const; + + private: + // We put our data in a struct so that the size of the AssertHelper class can + // be as small as possible. This is important because gcc is incapable of + // re-using stack space even for temporary variables, so every EXPECT_EQ + // reserves stack space for another AssertHelper. + struct AssertHelperData { + AssertHelperData(TestPartResult::Type t, + const char* srcfile, + int line_num, + const char* msg) + : type(t), file(srcfile), line(line_num), message(msg) { } + + TestPartResult::Type const type; + const char* const file; + int const line; + std::string const message; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData); + }; + + AssertHelperData* const data_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper); +}; + +enum GTestColor { COLOR_DEFAULT, COLOR_RED, COLOR_GREEN, COLOR_YELLOW }; + +GTEST_API_ GTEST_ATTRIBUTE_PRINTF_(2, 3) void ColoredPrintf(GTestColor color, + const char* fmt, + ...); + +} // namespace internal + +// The pure interface class that all value-parameterized tests inherit from. +// A value-parameterized class must inherit from both ::testing::Test and +// ::testing::WithParamInterface. In most cases that just means inheriting +// from ::testing::TestWithParam, but more complicated test hierarchies +// may need to inherit from Test and WithParamInterface at different levels. +// +// This interface has support for accessing the test parameter value via +// the GetParam() method. +// +// Use it with one of the parameter generator defining functions, like Range(), +// Values(), ValuesIn(), Bool(), and Combine(). +// +// class FooTest : public ::testing::TestWithParam { +// protected: +// FooTest() { +// // Can use GetParam() here. +// } +// ~FooTest() override { +// // Can use GetParam() here. +// } +// void SetUp() override { +// // Can use GetParam() here. +// } +// void TearDown override { +// // Can use GetParam() here. +// } +// }; +// TEST_P(FooTest, DoesBar) { +// // Can use GetParam() method here. +// Foo foo; +// ASSERT_TRUE(foo.DoesBar(GetParam())); +// } +// INSTANTIATE_TEST_SUITE_P(OneToTenRange, FooTest, ::testing::Range(1, 10)); + +template +class WithParamInterface { + public: + typedef T ParamType; + virtual ~WithParamInterface() {} + + // The current parameter value. Is also available in the test fixture's + // constructor. + static const ParamType& GetParam() { + GTEST_CHECK_(parameter_ != nullptr) + << "GetParam() can only be called inside a value-parameterized test " + << "-- did you intend to write TEST_P instead of TEST_F?"; + return *parameter_; + } + + private: + // Sets parameter value. The caller is responsible for making sure the value + // remains alive and unchanged throughout the current test. + static void SetParam(const ParamType* parameter) { + parameter_ = parameter; + } + + // Static value used for accessing parameter during a test lifetime. + static const ParamType* parameter_; + + // TestClass must be a subclass of WithParamInterface and Test. + template friend class internal::ParameterizedTestFactory; +}; + +template +const T* WithParamInterface::parameter_ = nullptr; + +// Most value-parameterized classes can ignore the existence of +// WithParamInterface, and can just inherit from ::testing::TestWithParam. + +template +class TestWithParam : public Test, public WithParamInterface { +}; + +// Macros for indicating success/failure in test code. + +// Skips test in runtime. +// Skipping test aborts current function. +// Skipped tests are neither successful nor failed. +#define GTEST_SKIP() GTEST_SKIP_("") + +// ADD_FAILURE unconditionally adds a failure to the current test. +// SUCCEED generates a success - it doesn't automatically make the +// current test successful, as a test is only successful when it has +// no failure. +// +// EXPECT_* verifies that a certain condition is satisfied. If not, +// it behaves like ADD_FAILURE. In particular: +// +// EXPECT_TRUE verifies that a Boolean condition is true. +// EXPECT_FALSE verifies that a Boolean condition is false. +// +// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except +// that they will also abort the current function on failure. People +// usually want the fail-fast behavior of FAIL and ASSERT_*, but those +// writing data-driven tests often find themselves using ADD_FAILURE +// and EXPECT_* more. + +// Generates a nonfatal failure with a generic message. +#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed") + +// Generates a nonfatal failure at the given source file location with +// a generic message. +#define ADD_FAILURE_AT(file, line) \ + GTEST_MESSAGE_AT_(file, line, "Failed", \ + ::testing::TestPartResult::kNonFatalFailure) + +// Generates a fatal failure with a generic message. +#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed") + +// Like GTEST_FAIL(), but at the given source file location. +#define GTEST_FAIL_AT(file, line) \ + GTEST_MESSAGE_AT_(file, line, "Failed", \ + ::testing::TestPartResult::kFatalFailure) + +// Define this macro to 1 to omit the definition of FAIL(), which is a +// generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_FAIL +# define FAIL() GTEST_FAIL() +#endif + +// Generates a success with a generic message. +#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded") + +// Define this macro to 1 to omit the definition of SUCCEED(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_SUCCEED +# define SUCCEED() GTEST_SUCCEED() +#endif + +// Macros for testing exceptions. +// +// * {ASSERT|EXPECT}_THROW(statement, expected_exception): +// Tests that the statement throws the expected exception. +// * {ASSERT|EXPECT}_NO_THROW(statement): +// Tests that the statement doesn't throw any exception. +// * {ASSERT|EXPECT}_ANY_THROW(statement): +// Tests that the statement throws an exception. + +#define EXPECT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_) +#define EXPECT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define EXPECT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define ASSERT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_) +#define ASSERT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_) +#define ASSERT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_) + +// Boolean assertions. Condition can be either a Boolean expression or an +// AssertionResult. For more information on how to use AssertionResult with +// these macros see comments on that class. +#define EXPECT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_NONFATAL_FAILURE_) +#define EXPECT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_NONFATAL_FAILURE_) +#define ASSERT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_FATAL_FAILURE_) +#define ASSERT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_FATAL_FAILURE_) + +// Macros for testing equalities and inequalities. +// +// * {ASSERT|EXPECT}_EQ(v1, v2): Tests that v1 == v2 +// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2 +// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2 +// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2 +// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2 +// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2 +// +// When they are not, Google Test prints both the tested expressions and +// their actual values. The values must be compatible built-in types, +// or you will get a compiler error. By "compatible" we mean that the +// values can be compared by the respective operator. +// +// Note: +// +// 1. It is possible to make a user-defined type work with +// {ASSERT|EXPECT}_??(), but that requires overloading the +// comparison operators and is thus discouraged by the Google C++ +// Usage Guide. Therefore, you are advised to use the +// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are +// equal. +// +// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on +// pointers (in particular, C strings). Therefore, if you use it +// with two C strings, you are testing how their locations in memory +// are related, not how their content is related. To compare two C +// strings by content, use {ASSERT|EXPECT}_STR*(). +// +// 3. {ASSERT|EXPECT}_EQ(v1, v2) is preferred to +// {ASSERT|EXPECT}_TRUE(v1 == v2), as the former tells you +// what the actual value is when it fails, and similarly for the +// other comparisons. +// +// 4. Do not depend on the order in which {ASSERT|EXPECT}_??() +// evaluate their arguments, which is undefined. +// +// 5. These macros evaluate their arguments exactly once. +// +// Examples: +// +// EXPECT_NE(Foo(), 5); +// EXPECT_EQ(a_pointer, NULL); +// ASSERT_LT(i, array_size); +// ASSERT_GT(records.size(), 0) << "There is no record left."; + +#define EXPECT_EQ(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::EqHelper::Compare, val1, val2) +#define EXPECT_NE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2) +#define EXPECT_LE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define EXPECT_LT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define EXPECT_GE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define EXPECT_GT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +#define GTEST_ASSERT_EQ(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::EqHelper::Compare, val1, val2) +#define GTEST_ASSERT_NE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2) +#define GTEST_ASSERT_LE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define GTEST_ASSERT_LT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define GTEST_ASSERT_GE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define GTEST_ASSERT_GT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of +// ASSERT_XY(), which clashes with some users' own code. + +#if !GTEST_DONT_DEFINE_ASSERT_EQ +# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_NE +# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LE +# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LT +# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GE +# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GT +# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) +#endif + +// C-string Comparisons. All tests treat NULL and any non-NULL string +// as different. Two NULLs are equal. +// +// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2 +// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2 +// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case +// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case +// +// For wide or narrow string objects, you can use the +// {ASSERT|EXPECT}_??() macros. +// +// Don't depend on the order in which the arguments are evaluated, +// which is undefined. +// +// These macros evaluate their arguments exactly once. + +#define EXPECT_STREQ(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, s1, s2) +#define EXPECT_STRNE(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define EXPECT_STRCASEEQ(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2) +#define EXPECT_STRCASENE(s1, s2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +#define ASSERT_STREQ(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, s1, s2) +#define ASSERT_STRNE(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define ASSERT_STRCASEEQ(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2) +#define ASSERT_STRCASENE(s1, s2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +// Macros for comparing floating-point numbers. +// +// * {ASSERT|EXPECT}_FLOAT_EQ(val1, val2): +// Tests that two float values are almost equal. +// * {ASSERT|EXPECT}_DOUBLE_EQ(val1, val2): +// Tests that two double values are almost equal. +// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error): +// Tests that v1 and v2 are within the given distance to each other. +// +// Google Test uses ULP-based comparison to automatically pick a default +// error bound that is appropriate for the operands. See the +// FloatingPoint template class in gtest-internal.h if you are +// interested in the implementation details. + +#define EXPECT_FLOAT_EQ(val1, val2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + val1, val2) + +#define EXPECT_DOUBLE_EQ(val1, val2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + val1, val2) + +#define ASSERT_FLOAT_EQ(val1, val2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + val1, val2) + +#define ASSERT_DOUBLE_EQ(val1, val2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + val1, val2) + +#define EXPECT_NEAR(val1, val2, abs_error)\ + EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +#define ASSERT_NEAR(val1, val2, abs_error)\ + ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +// These predicate format functions work on floating-point values, and +// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g. +// +// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0); + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2); +GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2); + + +#if GTEST_OS_WINDOWS + +// Macros that test for HRESULT failure and success, these are only useful +// on Windows, and rely on Windows SDK macros and APIs to compile. +// +// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr) +// +// When expr unexpectedly fails or succeeds, Google Test prints the +// expected result and the actual result with both a human-readable +// string representation of the error, if available, as well as the +// hex result code. +# define EXPECT_HRESULT_SUCCEEDED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define ASSERT_HRESULT_SUCCEEDED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define EXPECT_HRESULT_FAILED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +# define ASSERT_HRESULT_FAILED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +#endif // GTEST_OS_WINDOWS + +// Macros that execute statement and check that it doesn't generate new fatal +// failures in the current thread. +// +// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement); +// +// Examples: +// +// EXPECT_NO_FATAL_FAILURE(Process()); +// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed"; +// +#define ASSERT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) +#define EXPECT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) + +// Causes a trace (including the given source file path and line number, +// and the given message) to be included in every test failure message generated +// by code in the scope of the lifetime of an instance of this class. The effect +// is undone with the destruction of the instance. +// +// The message argument can be anything streamable to std::ostream. +// +// Example: +// testing::ScopedTrace trace("file.cc", 123, "message"); +// +class GTEST_API_ ScopedTrace { + public: + // The c'tor pushes the given source file location and message onto + // a trace stack maintained by Google Test. + + // Template version. Uses Message() to convert the values into strings. + // Slow, but flexible. + template + ScopedTrace(const char* file, int line, const T& message) { + PushTrace(file, line, (Message() << message).GetString()); + } + + // Optimize for some known types. + ScopedTrace(const char* file, int line, const char* message) { + PushTrace(file, line, message ? message : "(null)"); + } + + ScopedTrace(const char* file, int line, const std::string& message) { + PushTrace(file, line, message); + } + + // The d'tor pops the info pushed by the c'tor. + // + // Note that the d'tor is not virtual in order to be efficient. + // Don't inherit from ScopedTrace! + ~ScopedTrace(); + + private: + void PushTrace(const char* file, int line, std::string message); + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace); +} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its + // c'tor and d'tor. Therefore it doesn't + // need to be used otherwise. + +// Causes a trace (including the source file path, the current line +// number, and the given message) to be included in every test failure +// message generated by code in the current scope. The effect is +// undone when the control leaves the current scope. +// +// The message argument can be anything streamable to std::ostream. +// +// In the implementation, we include the current line number as part +// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s +// to appear in the same block - as long as they are on different +// lines. +// +// Assuming that each thread maintains its own stack of traces. +// Therefore, a SCOPED_TRACE() would (correctly) only affect the +// assertions in its own thread. +#define SCOPED_TRACE(message) \ + ::testing::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\ + __FILE__, __LINE__, (message)) + +// Compile-time assertion for type equality. +// StaticAssertTypeEq() compiles if and only if type1 and type2 +// are the same type. The value it returns is not interesting. +// +// Instead of making StaticAssertTypeEq a class template, we make it a +// function template that invokes a helper class template. This +// prevents a user from misusing StaticAssertTypeEq by +// defining objects of that type. +// +// CAVEAT: +// +// When used inside a method of a class template, +// StaticAssertTypeEq() is effective ONLY IF the method is +// instantiated. For example, given: +// +// template class Foo { +// public: +// void Bar() { testing::StaticAssertTypeEq(); } +// }; +// +// the code: +// +// void Test1() { Foo foo; } +// +// will NOT generate a compiler error, as Foo::Bar() is never +// actually instantiated. Instead, you need: +// +// void Test2() { Foo foo; foo.Bar(); } +// +// to cause a compiler error. +template +constexpr bool StaticAssertTypeEq() noexcept { + static_assert(std::is_same::value, "T1 and T2 are not the same type"); + return true; +} + +// Defines a test. +// +// The first parameter is the name of the test suite, and the second +// parameter is the name of the test within the test suite. +// +// The convention is to end the test suite name with "Test". For +// example, a test suite for the Foo class can be named FooTest. +// +// Test code should appear between braces after an invocation of +// this macro. Example: +// +// TEST(FooTest, InitializesCorrectly) { +// Foo foo; +// EXPECT_TRUE(foo.StatusIsOK()); +// } + +// Note that we call GetTestTypeId() instead of GetTypeId< +// ::testing::Test>() here to get the type ID of testing::Test. This +// is to work around a suspected linker bug when using Google Test as +// a framework on Mac OS X. The bug causes GetTypeId< +// ::testing::Test>() to return different values depending on whether +// the call is from the Google Test framework itself or from user test +// code. GetTestTypeId() is guaranteed to always return the same +// value, as it always calls GetTypeId<>() from the Google Test +// framework. +#define GTEST_TEST(test_suite_name, test_name) \ + GTEST_TEST_(test_suite_name, test_name, ::testing::Test, \ + ::testing::internal::GetTestTypeId()) + +// Define this macro to 1 to omit the definition of TEST(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_TEST +#define TEST(test_suite_name, test_name) GTEST_TEST(test_suite_name, test_name) +#endif + +// Defines a test that uses a test fixture. +// +// The first parameter is the name of the test fixture class, which +// also doubles as the test suite name. The second parameter is the +// name of the test within the test suite. +// +// A test fixture class must be declared earlier. The user should put +// the test code between braces after using this macro. Example: +// +// class FooTest : public testing::Test { +// protected: +// void SetUp() override { b_.AddElement(3); } +// +// Foo a_; +// Foo b_; +// }; +// +// TEST_F(FooTest, InitializesCorrectly) { +// EXPECT_TRUE(a_.StatusIsOK()); +// } +// +// TEST_F(FooTest, ReturnsElementCountCorrectly) { +// EXPECT_EQ(a_.size(), 0); +// EXPECT_EQ(b_.size(), 1); +// } +// +// GOOGLETEST_CM0011 DO NOT DELETE +#define TEST_F(test_fixture, test_name)\ + GTEST_TEST_(test_fixture, test_name, test_fixture, \ + ::testing::internal::GetTypeId()) + +// Returns a path to temporary directory. +// Tries to determine an appropriate directory for the platform. +GTEST_API_ std::string TempDir(); + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +// Dynamically registers a test with the framework. +// +// This is an advanced API only to be used when the `TEST` macros are +// insufficient. The macros should be preferred when possible, as they avoid +// most of the complexity of calling this function. +// +// The `factory` argument is a factory callable (move-constructible) object or +// function pointer that creates a new instance of the Test object. It +// handles ownership to the caller. The signature of the callable is +// `Fixture*()`, where `Fixture` is the test fixture class for the test. All +// tests registered with the same `test_suite_name` must return the same +// fixture type. This is checked at runtime. +// +// The framework will infer the fixture class from the factory and will call +// the `SetUpTestSuite` and `TearDownTestSuite` for it. +// +// Must be called before `RUN_ALL_TESTS()` is invoked, otherwise behavior is +// undefined. +// +// Use case example: +// +// class MyFixture : public ::testing::Test { +// public: +// // All of these optional, just like in regular macro usage. +// static void SetUpTestSuite() { ... } +// static void TearDownTestSuite() { ... } +// void SetUp() override { ... } +// void TearDown() override { ... } +// }; +// +// class MyTest : public MyFixture { +// public: +// explicit MyTest(int data) : data_(data) {} +// void TestBody() override { ... } +// +// private: +// int data_; +// }; +// +// void RegisterMyTests(const std::vector& values) { +// for (int v : values) { +// ::testing::RegisterTest( +// "MyFixture", ("Test" + std::to_string(v)).c_str(), nullptr, +// std::to_string(v).c_str(), +// __FILE__, __LINE__, +// // Important to use the fixture type as the return type here. +// [=]() -> MyFixture* { return new MyTest(v); }); +// } +// } +// ... +// int main(int argc, char** argv) { +// std::vector values_to_test = LoadValuesFromConfig(); +// RegisterMyTests(values_to_test); +// ... +// return RUN_ALL_TESTS(); +// } +// +template +TestInfo* RegisterTest(const char* test_suite_name, const char* test_name, + const char* type_param, const char* value_param, + const char* file, int line, Factory factory) { + using TestT = typename std::remove_pointer::type; + + class FactoryImpl : public internal::TestFactoryBase { + public: + explicit FactoryImpl(Factory f) : factory_(std::move(f)) {} + Test* CreateTest() override { return factory_(); } + + private: + Factory factory_; + }; + + return internal::MakeAndRegisterTestInfo( + test_suite_name, test_name, type_param, value_param, + internal::CodeLocation(file, line), internal::GetTypeId(), + internal::SuiteApiResolver::GetSetUpCaseOrSuite(file, line), + internal::SuiteApiResolver::GetTearDownCaseOrSuite(file, line), + new FactoryImpl{std::move(factory)}); +} + +} // namespace testing + +// Use this function in main() to run all tests. It returns 0 if all +// tests are successful, or 1 otherwise. +// +// RUN_ALL_TESTS() should be invoked after the command line has been +// parsed by InitGoogleTest(). +// +// This function was formerly a macro; thus, it is in the global +// namespace and has an all-caps name. +int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_; + +inline int RUN_ALL_TESTS() { + return ::testing::UnitTest::GetInstance()->Run(); +} + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest_pred_impl.h b/source/3rdparty/gtest/include/gtest/gtest_pred_impl.h new file mode 100644 index 0000000..d514255 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest_pred_impl.h @@ -0,0 +1,359 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on 01/02/2019 by command +// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! +// +// Implements a family of generic predicate assertion macros. +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +#include "gtest/gtest.h" + +namespace testing { + +// This header implements a family of generic predicate assertion +// macros: +// +// ASSERT_PRED_FORMAT1(pred_format, v1) +// ASSERT_PRED_FORMAT2(pred_format, v1, v2) +// ... +// +// where pred_format is a function or functor that takes n (in the +// case of ASSERT_PRED_FORMATn) values and their source expression +// text, and returns a testing::AssertionResult. See the definition +// of ASSERT_EQ in gtest.h for an example. +// +// If you don't care about formatting, you can use the more +// restrictive version: +// +// ASSERT_PRED1(pred, v1) +// ASSERT_PRED2(pred, v1, v2) +// ... +// +// where pred is an n-ary function or functor that returns bool, +// and the values v1, v2, ..., must support the << operator for +// streaming to std::ostream. +// +// We also define the EXPECT_* variations. +// +// For now we only support predicates whose arity is at most 5. +// Please email googletestframework@googlegroups.com if you need +// support for higher arities. + +// GTEST_ASSERT_ is the basic statement to which all of the assertions +// in this file reduce. Don't use this in your code. + +#define GTEST_ASSERT_(expression, on_failure) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar = (expression)) \ + ; \ + else \ + on_failure(gtest_ar.failure_message()) + + +// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +template +AssertionResult AssertPred1Helper(const char* pred_text, + const char* e1, + Pred pred, + const T1& v1) { + if (pred(v1)) return AssertionSuccess(); + + return AssertionFailure() + << pred_text << "(" << e1 << ") evaluates to false, where" + << "\n" + << e1 << " evaluates to " << ::testing::PrintToString(v1); +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. +// Don't use this in your code. +#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, v1), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +#define GTEST_PRED1_(pred, v1, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \ + #v1, \ + pred, \ + v1), on_failure) + +// Unary predicate assertion macros. +#define EXPECT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +template +AssertionResult AssertPred2Helper(const char* pred_text, + const char* e1, + const char* e2, + Pred pred, + const T1& v1, + const T2& v2) { + if (pred(v1, v2)) return AssertionSuccess(); + + return AssertionFailure() + << pred_text << "(" << e1 << ", " << e2 + << ") evaluates to false, where" + << "\n" + << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" + << e2 << " evaluates to " << ::testing::PrintToString(v2); +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. +// Don't use this in your code. +#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +#define GTEST_PRED2_(pred, v1, v2, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ + #v1, \ + #v2, \ + pred, \ + v1, \ + v2), on_failure) + +// Binary predicate assertion macros. +#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +template +AssertionResult AssertPred3Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3) { + if (pred(v1, v2, v3)) return AssertionSuccess(); + + return AssertionFailure() + << pred_text << "(" << e1 << ", " << e2 << ", " << e3 + << ") evaluates to false, where" + << "\n" + << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" + << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n" + << e3 << " evaluates to " << ::testing::PrintToString(v3); +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. +// Don't use this in your code. +#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + pred, \ + v1, \ + v2, \ + v3), on_failure) + +// Ternary predicate assertion macros. +#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +template +AssertionResult AssertPred4Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) { + if (pred(v1, v2, v3, v4)) return AssertionSuccess(); + + return AssertionFailure() + << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4 + << ") evaluates to false, where" + << "\n" + << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" + << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n" + << e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n" + << e4 << " evaluates to " << ::testing::PrintToString(v4); +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. +// Don't use this in your code. +#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4), on_failure) + +// 4-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +template +AssertionResult AssertPred5Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + const char* e5, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) { + if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); + + return AssertionFailure() + << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4 + << ", " << e5 << ") evaluates to false, where" + << "\n" + << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" + << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n" + << e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n" + << e4 << " evaluates to " << ::testing::PrintToString(v4) << "\n" + << e5 << " evaluates to " << ::testing::PrintToString(v5); +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. +// Don't use this in your code. +#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + #v5, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4, \ + v5), on_failure) + +// 5-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) + + + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ diff --git a/source/3rdparty/gtest/include/gtest/gtest_prod.h b/source/3rdparty/gtest/include/gtest/gtest_prod.h new file mode 100644 index 0000000..e651671 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/gtest_prod.h @@ -0,0 +1,61 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// Google C++ Testing and Mocking Framework definitions useful in production code. +// GOOGLETEST_CM0003 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_ + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void PrivateMethod(); +// FRIEND_TEST(MyClassTest, PrivateMethodWorks); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, PrivateMethodWorks) { +// // Can call MyClass::PrivateMethod() here. +// } +// +// Note: The test class must be in the same namespace as the class being tested. +// For example, putting MyClassTest in an anonymous namespace will not work. + +#define FRIEND_TEST(test_case_name, test_name)\ +friend class test_case_name##_##test_name##_Test + +#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/custom/README.md b/source/3rdparty/gtest/include/gtest/internal/custom/README.md new file mode 100644 index 0000000..ff391fb --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/custom/README.md @@ -0,0 +1,56 @@ +# Customization Points + +The custom directory is an injection point for custom user configurations. + +## Header `gtest.h` + +### The following macros can be defined: + +* `GTEST_OS_STACK_TRACE_GETTER_` - The name of an implementation of + `OsStackTraceGetterInterface`. +* `GTEST_CUSTOM_TEMPDIR_FUNCTION_` - An override for `testing::TempDir()`. See + `testing::TempDir` for semantics and signature. + +## Header `gtest-port.h` + +The following macros can be defined: + +### Flag related macros: + +* `GTEST_FLAG(flag_name)` +* `GTEST_USE_OWN_FLAGFILE_FLAG_` - Define to 0 when the system provides its + own flagfile flag parsing. +* `GTEST_DECLARE_bool_(name)` +* `GTEST_DECLARE_int32_(name)` +* `GTEST_DECLARE_string_(name)` +* `GTEST_DEFINE_bool_(name, default_val, doc)` +* `GTEST_DEFINE_int32_(name, default_val, doc)` +* `GTEST_DEFINE_string_(name, default_val, doc)` + +### Logging: + +* `GTEST_LOG_(severity)` +* `GTEST_CHECK_(condition)` +* Functions `LogToStderr()` and `FlushInfoLog()` have to be provided too. + +### Threading: + +* `GTEST_HAS_NOTIFICATION_` - Enabled if Notification is already provided. +* `GTEST_HAS_MUTEX_AND_THREAD_LOCAL_` - Enabled if `Mutex` and `ThreadLocal` + are already provided. Must also provide `GTEST_DECLARE_STATIC_MUTEX_(mutex)` + and `GTEST_DEFINE_STATIC_MUTEX_(mutex)` +* `GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)` +* `GTEST_LOCK_EXCLUDED_(locks)` + +### Underlying library support features + +* `GTEST_HAS_CXXABI_H_` + +### Exporting API symbols: + +* `GTEST_API_` - Specifier for exported symbols. + +## Header `gtest-printers.h` + +* See documentation at `gtest/gtest-printers.h` for details on how to define a + custom printer. diff --git a/source/3rdparty/gtest/include/gtest/internal/custom/gtest-port.h b/source/3rdparty/gtest/include/gtest/internal/custom/gtest-port.h new file mode 100644 index 0000000..cd85d95 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/custom/gtest-port.h @@ -0,0 +1,37 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Injection point for custom user configurations. See README for details +// +// ** Custom implementation starts here ** + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/custom/gtest-printers.h b/source/3rdparty/gtest/include/gtest/internal/custom/gtest-printers.h new file mode 100644 index 0000000..eb4467a --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/custom/gtest-printers.h @@ -0,0 +1,42 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// This file provides an injection point for custom printers in a local +// installation of gTest. +// It will be included from gtest-printers.h and the overrides in this file +// will be visible to everyone. +// +// Injection point for custom user configurations. See README for details +// +// ** Custom implementation starts here ** + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/custom/gtest.h b/source/3rdparty/gtest/include/gtest/internal/custom/gtest.h new file mode 100644 index 0000000..4c8e07b --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/custom/gtest.h @@ -0,0 +1,37 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Injection point for custom user configurations. See README for details +// +// ** Custom implementation starts here ** + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/gtest-death-test-internal.h b/source/3rdparty/gtest/include/gtest/internal/gtest-death-test-internal.h new file mode 100644 index 0000000..68bd353 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/gtest-death-test-internal.h @@ -0,0 +1,304 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This header file defines internal utilities needed for implementing +// death tests. They are subject to change without notice. +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + +#include "gtest/gtest-matchers.h" +#include "gtest/internal/gtest-internal.h" + +#include +#include + +namespace testing { +namespace internal { + +GTEST_DECLARE_string_(internal_run_death_test); + +// Names of the flags (needed for parsing Google Test flags). +const char kDeathTestStyleFlag[] = "death_test_style"; +const char kDeathTestUseFork[] = "death_test_use_fork"; +const char kInternalRunDeathTestFlag[] = "internal_run_death_test"; + +#if GTEST_HAS_DEATH_TEST + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +// DeathTest is a class that hides much of the complexity of the +// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method +// returns a concrete class that depends on the prevailing death test +// style, as defined by the --gtest_death_test_style and/or +// --gtest_internal_run_death_test flags. + +// In describing the results of death tests, these terms are used with +// the corresponding definitions: +// +// exit status: The integer exit information in the format specified +// by wait(2) +// exit code: The integer code passed to exit(3), _exit(2), or +// returned from main() +class GTEST_API_ DeathTest { + public: + // Create returns false if there was an error determining the + // appropriate action to take for the current death test; for example, + // if the gtest_death_test_style flag is set to an invalid value. + // The LastMessage method will return a more detailed message in that + // case. Otherwise, the DeathTest pointer pointed to by the "test" + // argument is set. If the death test should be skipped, the pointer + // is set to NULL; otherwise, it is set to the address of a new concrete + // DeathTest object that controls the execution of the current test. + static bool Create(const char* statement, Matcher matcher, + const char* file, int line, DeathTest** test); + DeathTest(); + virtual ~DeathTest() { } + + // A helper class that aborts a death test when it's deleted. + class ReturnSentinel { + public: + explicit ReturnSentinel(DeathTest* test) : test_(test) { } + ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } + private: + DeathTest* const test_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); + } GTEST_ATTRIBUTE_UNUSED_; + + // An enumeration of possible roles that may be taken when a death + // test is encountered. EXECUTE means that the death test logic should + // be executed immediately. OVERSEE means that the program should prepare + // the appropriate environment for a child process to execute the death + // test, then wait for it to complete. + enum TestRole { OVERSEE_TEST, EXECUTE_TEST }; + + // An enumeration of the three reasons that a test might be aborted. + enum AbortReason { + TEST_ENCOUNTERED_RETURN_STATEMENT, + TEST_THREW_EXCEPTION, + TEST_DID_NOT_DIE + }; + + // Assumes one of the above roles. + virtual TestRole AssumeRole() = 0; + + // Waits for the death test to finish and returns its status. + virtual int Wait() = 0; + + // Returns true if the death test passed; that is, the test process + // exited during the test, its exit status matches a user-supplied + // predicate, and its stderr output matches a user-supplied regular + // expression. + // The user-supplied predicate may be a macro expression rather + // than a function pointer or functor, or else Wait and Passed could + // be combined. + virtual bool Passed(bool exit_status_ok) = 0; + + // Signals that the death test did not die as expected. + virtual void Abort(AbortReason reason) = 0; + + // Returns a human-readable outcome message regarding the outcome of + // the last death test. + static const char* LastMessage(); + + static void set_last_death_test_message(const std::string& message); + + private: + // A string containing a description of the outcome of the last death test. + static std::string last_death_test_message_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); +}; + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +// Factory interface for death tests. May be mocked out for testing. +class DeathTestFactory { + public: + virtual ~DeathTestFactory() { } + virtual bool Create(const char* statement, + Matcher matcher, const char* file, + int line, DeathTest** test) = 0; +}; + +// A concrete DeathTestFactory implementation for normal use. +class DefaultDeathTestFactory : public DeathTestFactory { + public: + bool Create(const char* statement, Matcher matcher, + const char* file, int line, DeathTest** test) override; +}; + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +GTEST_API_ bool ExitedUnsuccessfully(int exit_status); + +// A string passed to EXPECT_DEATH (etc.) is caught by one of these overloads +// and interpreted as a regex (rather than an Eq matcher) for legacy +// compatibility. +inline Matcher MakeDeathTestMatcher( + ::testing::internal::RE regex) { + return ContainsRegex(regex.pattern()); +} +inline Matcher MakeDeathTestMatcher(const char* regex) { + return ContainsRegex(regex); +} +inline Matcher MakeDeathTestMatcher( + const ::std::string& regex) { + return ContainsRegex(regex); +} + +// If a Matcher is passed to EXPECT_DEATH (etc.), it's +// used directly. +inline Matcher MakeDeathTestMatcher( + Matcher matcher) { + return matcher; +} + +// Traps C++ exceptions escaping statement and reports them as test +// failures. Note that trapping SEH exceptions is not implemented here. +# if GTEST_HAS_EXCEPTIONS +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const ::std::exception& gtest_exception) { \ + fprintf(\ + stderr, \ + "\n%s: Caught std::exception-derived exception escaping the " \ + "death test statement. Exception message: %s\n", \ + ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ + gtest_exception.what()); \ + fflush(stderr); \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } catch (...) { \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } + +# else +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) + +# endif + +// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, +// ASSERT_EXIT*, and EXPECT_EXIT*. +#define GTEST_DEATH_TEST_(statement, predicate, regex_or_matcher, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + ::testing::internal::DeathTest* gtest_dt; \ + if (!::testing::internal::DeathTest::Create( \ + #statement, \ + ::testing::internal::MakeDeathTestMatcher(regex_or_matcher), \ + __FILE__, __LINE__, >est_dt)) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + if (gtest_dt != nullptr) { \ + std::unique_ptr< ::testing::internal::DeathTest> gtest_dt_ptr(gtest_dt); \ + switch (gtest_dt->AssumeRole()) { \ + case ::testing::internal::DeathTest::OVERSEE_TEST: \ + if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + break; \ + case ::testing::internal::DeathTest::EXECUTE_TEST: { \ + ::testing::internal::DeathTest::ReturnSentinel gtest_sentinel( \ + gtest_dt); \ + GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \ + gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \ + break; \ + } \ + default: \ + break; \ + } \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__) \ + : fail(::testing::internal::DeathTest::LastMessage()) +// The symbol "fail" here expands to something into which a message +// can be streamed. + +// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in +// NDEBUG mode. In this case we need the statements to be executed and the macro +// must accept a streamed message even though the message is never printed. +// The regex object is not evaluated, but it is used to prevent "unused" +// warnings and to avoid an expression that doesn't compile in debug mode. +#define GTEST_EXECUTE_STATEMENT_(statement, regex_or_matcher) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } else if (!::testing::internal::AlwaysTrue()) { \ + ::testing::internal::MakeDeathTestMatcher(regex_or_matcher); \ + } else \ + ::testing::Message() + +// A class representing the parsed contents of the +// --gtest_internal_run_death_test flag, as it existed when +// RUN_ALL_TESTS was called. +class InternalRunDeathTestFlag { + public: + InternalRunDeathTestFlag(const std::string& a_file, + int a_line, + int an_index, + int a_write_fd) + : file_(a_file), line_(a_line), index_(an_index), + write_fd_(a_write_fd) {} + + ~InternalRunDeathTestFlag() { + if (write_fd_ >= 0) + posix::Close(write_fd_); + } + + const std::string& file() const { return file_; } + int line() const { return line_; } + int index() const { return index_; } + int write_fd() const { return write_fd_; } + + private: + std::string file_; + int line_; + int index_; + int write_fd_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag); +}; + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag(); + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/gtest-filepath.h b/source/3rdparty/gtest/include/gtest/internal/gtest-filepath.h new file mode 100644 index 0000000..c11b101 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/gtest-filepath.h @@ -0,0 +1,211 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Google Test filepath utilities +// +// This header file declares classes and functions used internally by +// Google Test. They are subject to change without notice. +// +// This file is #included in gtest/internal/gtest-internal.h. +// Do not include this header file separately! + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ + +#include "gtest/internal/gtest-string.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +namespace testing { +namespace internal { + +// FilePath - a class for file and directory pathname manipulation which +// handles platform-specific conventions (like the pathname separator). +// Used for helper functions for naming files in a directory for xml output. +// Except for Set methods, all methods are const or static, which provides an +// "immutable value object" -- useful for peace of mind. +// A FilePath with a value ending in a path separator ("like/this/") represents +// a directory, otherwise it is assumed to represent a file. In either case, +// it may or may not represent an actual file or directory in the file system. +// Names are NOT checked for syntax correctness -- no checking for illegal +// characters, malformed paths, etc. + +class GTEST_API_ FilePath { + public: + FilePath() : pathname_("") { } + FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } + + explicit FilePath(const std::string& pathname) : pathname_(pathname) { + Normalize(); + } + + FilePath& operator=(const FilePath& rhs) { + Set(rhs); + return *this; + } + + void Set(const FilePath& rhs) { + pathname_ = rhs.pathname_; + } + + const std::string& string() const { return pathname_; } + const char* c_str() const { return pathname_.c_str(); } + + // Returns the current working directory, or "" if unsuccessful. + static FilePath GetCurrentDir(); + + // Given directory = "dir", base_name = "test", number = 0, + // extension = "xml", returns "dir/test.xml". If number is greater + // than zero (e.g., 12), returns "dir/test_12.xml". + // On Windows platform, uses \ as the separator rather than /. + static FilePath MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension); + + // Given directory = "dir", relative_path = "test.xml", + // returns "dir/test.xml". + // On Windows, uses \ as the separator rather than /. + static FilePath ConcatPaths(const FilePath& directory, + const FilePath& relative_path); + + // Returns a pathname for a file that does not currently exist. The pathname + // will be directory/base_name.extension or + // directory/base_name_.extension if directory/base_name.extension + // already exists. The number will be incremented until a pathname is found + // that does not already exist. + // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. + // There could be a race condition if two or more processes are calling this + // function at the same time -- they could both pick the same filename. + static FilePath GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension); + + // Returns true if and only if the path is "". + bool IsEmpty() const { return pathname_.empty(); } + + // If input name has a trailing separator character, removes it and returns + // the name, otherwise return the name string unmodified. + // On Windows platform, uses \ as the separator, other platforms use /. + FilePath RemoveTrailingPathSeparator() const; + + // Returns a copy of the FilePath with the directory part removed. + // Example: FilePath("path/to/file").RemoveDirectoryName() returns + // FilePath("file"). If there is no directory part ("just_a_file"), it returns + // the FilePath unmodified. If there is no file part ("just_a_dir/") it + // returns an empty FilePath (""). + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveDirectoryName() const; + + // RemoveFileName returns the directory path with the filename removed. + // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". + // If the FilePath is "a_file" or "/a_file", RemoveFileName returns + // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does + // not have a file, like "just/a/dir/", it returns the FilePath unmodified. + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveFileName() const; + + // Returns a copy of the FilePath with the case-insensitive extension removed. + // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns + // FilePath("dir/file"). If a case-insensitive extension is not + // found, returns a copy of the original FilePath. + FilePath RemoveExtension(const char* extension) const; + + // Creates directories so that path exists. Returns true if successful or if + // the directories already exist; returns false if unable to create + // directories for any reason. Will also return false if the FilePath does + // not represent a directory (that is, it doesn't end with a path separator). + bool CreateDirectoriesRecursively() const; + + // Create the directory so that path exists. Returns true if successful or + // if the directory already exists; returns false if unable to create the + // directory for any reason, including if the parent directory does not + // exist. Not named "CreateDirectory" because that's a macro on Windows. + bool CreateFolder() const; + + // Returns true if FilePath describes something in the file-system, + // either a file, directory, or whatever, and that something exists. + bool FileOrDirectoryExists() const; + + // Returns true if pathname describes a directory in the file-system + // that exists. + bool DirectoryExists() const; + + // Returns true if FilePath ends with a path separator, which indicates that + // it is intended to represent a directory. Returns false otherwise. + // This does NOT check that a directory (or file) actually exists. + bool IsDirectory() const; + + // Returns true if pathname describes a root directory. (Windows has one + // root directory per disk drive.) + bool IsRootDirectory() const; + + // Returns true if pathname describes an absolute path. + bool IsAbsolutePath() const; + + private: + // Replaces multiple consecutive separators with a single separator. + // For example, "bar///foo" becomes "bar/foo". Does not eliminate other + // redundancies that might be in a pathname involving "." or "..". + // + // A pathname with multiple consecutive separators may occur either through + // user error or as a result of some scripts or APIs that generate a pathname + // with a trailing separator. On other platforms the same API or script + // may NOT generate a pathname with a trailing "/". Then elsewhere that + // pathname may have another "/" and pathname components added to it, + // without checking for the separator already being there. + // The script language and operating system may allow paths like "foo//bar" + // but some of the functions in FilePath will not handle that correctly. In + // particular, RemoveTrailingPathSeparator() only removes one separator, and + // it is called in CreateDirectoriesRecursively() assuming that it will change + // a pathname from directory syntax (trailing separator) to filename syntax. + // + // On Windows this method also replaces the alternate path separator '/' with + // the primary path separator '\\', so that for example "bar\\/\\foo" becomes + // "bar\\foo". + + void Normalize(); + + // Returns a pointer to the last occurence of a valid path separator in + // the FilePath. On Windows, for example, both '/' and '\' are valid path + // separators. Returns NULL if no path separator was found. + const char* FindLastPathSeparator() const; + + std::string pathname_; +}; // class FilePath + +} // namespace internal +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/gtest-internal.h b/source/3rdparty/gtest/include/gtest/internal/gtest-internal.h new file mode 100644 index 0000000..6bad878 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/gtest-internal.h @@ -0,0 +1,1432 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This header file declares functions and macros used internally by +// Google Test. They are subject to change without notice. + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ + +#include "gtest/internal/gtest-port.h" + +#if GTEST_OS_LINUX +# include +# include +# include +# include +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest-message.h" +#include "gtest/internal/gtest-filepath.h" +#include "gtest/internal/gtest-string.h" +#include "gtest/internal/gtest-type-util.h" + +// Due to C++ preprocessor weirdness, we need double indirection to +// concatenate two tokens when one of them is __LINE__. Writing +// +// foo ## __LINE__ +// +// will result in the token foo__LINE__, instead of foo followed by +// the current line number. For more details, see +// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6 +#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) +#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar + +// Stringifies its argument. +// Work around a bug in visual studio which doesn't accept code like this: +// +// #define GTEST_STRINGIFY_(name) #name +// #define MACRO(a, b, c) ... GTEST_STRINGIFY_(a) ... +// MACRO(, x, y) +// +// Complaining about the argument to GTEST_STRINGIFY_ being empty. +// This is allowed by the spec. +#define GTEST_STRINGIFY_HELPER_(name, ...) #name +#define GTEST_STRINGIFY_(...) GTEST_STRINGIFY_HELPER_(__VA_ARGS__, ) + +namespace proto2 { class Message; } + +namespace testing { + +// Forward declarations. + +class AssertionResult; // Result of an assertion. +class Message; // Represents a failure message. +class Test; // Represents a test. +class TestInfo; // Information about a test. +class TestPartResult; // Result of a test part. +class UnitTest; // A collection of test suites. + +template +::std::string PrintToString(const T& value); + +namespace internal { + +struct TraceInfo; // Information about a trace point. +class TestInfoImpl; // Opaque implementation of TestInfo +class UnitTestImpl; // Opaque implementation of UnitTest + +// The text used in failure messages to indicate the start of the +// stack trace. +GTEST_API_ extern const char kStackTraceMarker[]; + +// An IgnoredValue object can be implicitly constructed from ANY value. +class IgnoredValue { + struct Sink {}; + public: + // This constructor template allows any value to be implicitly + // converted to IgnoredValue. The object has no data member and + // doesn't try to remember anything about the argument. We + // deliberately omit the 'explicit' keyword in order to allow the + // conversion to be implicit. + // Disable the conversion if T already has a magical conversion operator. + // Otherwise we get ambiguity. + template ::value, + int>::type = 0> + IgnoredValue(const T& /* ignored */) {} // NOLINT(runtime/explicit) +}; + +// Appends the user-supplied message to the Google-Test-generated message. +GTEST_API_ std::string AppendUserMessage( + const std::string& gtest_msg, const Message& user_msg); + +#if GTEST_HAS_EXCEPTIONS + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4275 \ +/* an exported class was derived from a class that was not exported */) + +// This exception is thrown by (and only by) a failed Google Test +// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions +// are enabled). We derive it from std::runtime_error, which is for +// errors presumably detectable only at run time. Since +// std::runtime_error inherits from std::exception, many testing +// frameworks know how to extract and print the message inside it. +class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error { + public: + explicit GoogleTestFailureException(const TestPartResult& failure); +}; + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4275 + +#endif // GTEST_HAS_EXCEPTIONS + +namespace edit_distance { +// Returns the optimal edits to go from 'left' to 'right'. +// All edits cost the same, with replace having lower priority than +// add/remove. +// Simple implementation of the Wagner-Fischer algorithm. +// See http://en.wikipedia.org/wiki/Wagner-Fischer_algorithm +enum EditType { kMatch, kAdd, kRemove, kReplace }; +GTEST_API_ std::vector CalculateOptimalEdits( + const std::vector& left, const std::vector& right); + +// Same as above, but the input is represented as strings. +GTEST_API_ std::vector CalculateOptimalEdits( + const std::vector& left, + const std::vector& right); + +// Create a diff of the input strings in Unified diff format. +GTEST_API_ std::string CreateUnifiedDiff(const std::vector& left, + const std::vector& right, + size_t context = 2); + +} // namespace edit_distance + +// Calculate the diff between 'left' and 'right' and return it in unified diff +// format. +// If not null, stores in 'total_line_count' the total number of lines found +// in left + right. +GTEST_API_ std::string DiffStrings(const std::string& left, + const std::string& right, + size_t* total_line_count); + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true if and only if the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +GTEST_API_ AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const std::string& expected_value, + const std::string& actual_value, + bool ignoring_case); + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +GTEST_API_ std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value); + +// This template class represents an IEEE floating-point number +// (either single-precision or double-precision, depending on the +// template parameters). +// +// The purpose of this class is to do more sophisticated number +// comparison. (Due to round-off error, etc, it's very unlikely that +// two floating-points will be equal exactly. Hence a naive +// comparison by the == operation often doesn't work.) +// +// Format of IEEE floating-point: +// +// The most-significant bit being the leftmost, an IEEE +// floating-point looks like +// +// sign_bit exponent_bits fraction_bits +// +// Here, sign_bit is a single bit that designates the sign of the +// number. +// +// For float, there are 8 exponent bits and 23 fraction bits. +// +// For double, there are 11 exponent bits and 52 fraction bits. +// +// More details can be found at +// http://en.wikipedia.org/wiki/IEEE_floating-point_standard. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +template +class FloatingPoint { + public: + // Defines the unsigned integer type that has the same size as the + // floating point number. + typedef typename TypeWithSize::UInt Bits; + + // Constants. + + // # of bits in a number. + static const size_t kBitCount = 8*sizeof(RawType); + + // # of fraction bits in a number. + static const size_t kFractionBitCount = + std::numeric_limits::digits - 1; + + // # of exponent bits in a number. + static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount; + + // The mask for the sign bit. + static const Bits kSignBitMask = static_cast(1) << (kBitCount - 1); + + // The mask for the fraction bits. + static const Bits kFractionBitMask = + ~static_cast(0) >> (kExponentBitCount + 1); + + // The mask for the exponent bits. + static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask); + + // How many ULP's (Units in the Last Place) we want to tolerate when + // comparing two numbers. The larger the value, the more error we + // allow. A 0 value means that two numbers must be exactly the same + // to be considered equal. + // + // The maximum error of a single floating-point operation is 0.5 + // units in the last place. On Intel CPU's, all floating-point + // calculations are done with 80-bit precision, while double has 64 + // bits. Therefore, 4 should be enough for ordinary use. + // + // See the following article for more details on ULP: + // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + static const size_t kMaxUlps = 4; + + // Constructs a FloatingPoint from a raw floating-point number. + // + // On an Intel CPU, passing a non-normalized NAN (Not a Number) + // around may change its bits, although the new value is guaranteed + // to be also a NAN. Therefore, don't expect this constructor to + // preserve the bits in x when x is a NAN. + explicit FloatingPoint(const RawType& x) { u_.value_ = x; } + + // Static methods + + // Reinterprets a bit pattern as a floating-point number. + // + // This function is needed to test the AlmostEquals() method. + static RawType ReinterpretBits(const Bits bits) { + FloatingPoint fp(0); + fp.u_.bits_ = bits; + return fp.u_.value_; + } + + // Returns the floating-point number that represent positive infinity. + static RawType Infinity() { + return ReinterpretBits(kExponentBitMask); + } + + // Returns the maximum representable finite floating-point number. + static RawType Max(); + + // Non-static methods + + // Returns the bits that represents this number. + const Bits &bits() const { return u_.bits_; } + + // Returns the exponent bits of this number. + Bits exponent_bits() const { return kExponentBitMask & u_.bits_; } + + // Returns the fraction bits of this number. + Bits fraction_bits() const { return kFractionBitMask & u_.bits_; } + + // Returns the sign bit of this number. + Bits sign_bit() const { return kSignBitMask & u_.bits_; } + + // Returns true if and only if this is NAN (not a number). + bool is_nan() const { + // It's a NAN if the exponent bits are all ones and the fraction + // bits are not entirely zeros. + return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0); + } + + // Returns true if and only if this number is at most kMaxUlps ULP's away + // from rhs. In particular, this function: + // + // - returns false if either number is (or both are) NAN. + // - treats really large numbers as almost equal to infinity. + // - thinks +0.0 and -0.0 are 0 DLP's apart. + bool AlmostEquals(const FloatingPoint& rhs) const { + // The IEEE standard says that any comparison operation involving + // a NAN must return false. + if (is_nan() || rhs.is_nan()) return false; + + return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_) + <= kMaxUlps; + } + + private: + // The data type used to store the actual floating-point number. + union FloatingPointUnion { + RawType value_; // The raw floating-point number. + Bits bits_; // The bits that represent the number. + }; + + // Converts an integer from the sign-and-magnitude representation to + // the biased representation. More precisely, let N be 2 to the + // power of (kBitCount - 1), an integer x is represented by the + // unsigned number x + N. + // + // For instance, + // + // -N + 1 (the most negative number representable using + // sign-and-magnitude) is represented by 1; + // 0 is represented by N; and + // N - 1 (the biggest number representable using + // sign-and-magnitude) is represented by 2N - 1. + // + // Read http://en.wikipedia.org/wiki/Signed_number_representations + // for more details on signed number representations. + static Bits SignAndMagnitudeToBiased(const Bits &sam) { + if (kSignBitMask & sam) { + // sam represents a negative number. + return ~sam + 1; + } else { + // sam represents a positive number. + return kSignBitMask | sam; + } + } + + // Given two numbers in the sign-and-magnitude representation, + // returns the distance between them as an unsigned number. + static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1, + const Bits &sam2) { + const Bits biased1 = SignAndMagnitudeToBiased(sam1); + const Bits biased2 = SignAndMagnitudeToBiased(sam2); + return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1); + } + + FloatingPointUnion u_; +}; + +// We cannot use std::numeric_limits::max() as it clashes with the max() +// macro defined by . +template <> +inline float FloatingPoint::Max() { return FLT_MAX; } +template <> +inline double FloatingPoint::Max() { return DBL_MAX; } + +// Typedefs the instances of the FloatingPoint template class that we +// care to use. +typedef FloatingPoint Float; +typedef FloatingPoint Double; + +// In order to catch the mistake of putting tests that use different +// test fixture classes in the same test suite, we need to assign +// unique IDs to fixture classes and compare them. The TypeId type is +// used to hold such IDs. The user should treat TypeId as an opaque +// type: the only operation allowed on TypeId values is to compare +// them for equality using the == operator. +typedef const void* TypeId; + +template +class TypeIdHelper { + public: + // dummy_ must not have a const type. Otherwise an overly eager + // compiler (e.g. MSVC 7.1 & 8.0) may try to merge + // TypeIdHelper::dummy_ for different Ts as an "optimization". + static bool dummy_; +}; + +template +bool TypeIdHelper::dummy_ = false; + +// GetTypeId() returns the ID of type T. Different values will be +// returned for different types. Calling the function twice with the +// same type argument is guaranteed to return the same ID. +template +TypeId GetTypeId() { + // The compiler is required to allocate a different + // TypeIdHelper::dummy_ variable for each T used to instantiate + // the template. Therefore, the address of dummy_ is guaranteed to + // be unique. + return &(TypeIdHelper::dummy_); +} + +// Returns the type ID of ::testing::Test. Always call this instead +// of GetTypeId< ::testing::Test>() to get the type ID of +// ::testing::Test, as the latter may give the wrong result due to a +// suspected linker bug when compiling Google Test as a Mac OS X +// framework. +GTEST_API_ TypeId GetTestTypeId(); + +// Defines the abstract factory interface that creates instances +// of a Test object. +class TestFactoryBase { + public: + virtual ~TestFactoryBase() {} + + // Creates a test instance to run. The instance is both created and destroyed + // within TestInfoImpl::Run() + virtual Test* CreateTest() = 0; + + protected: + TestFactoryBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase); +}; + +// This class provides implementation of TeastFactoryBase interface. +// It is used in TEST and TEST_F macros. +template +class TestFactoryImpl : public TestFactoryBase { + public: + Test* CreateTest() override { return new TestClass; } +}; + +#if GTEST_OS_WINDOWS + +// Predicate-formatters for implementing the HRESULT checking macros +// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED} +// We pass a long instead of HRESULT to avoid causing an +// include dependency for the HRESULT type. +GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr, + long hr); // NOLINT +GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr, + long hr); // NOLINT + +#endif // GTEST_OS_WINDOWS + +// Types of SetUpTestSuite() and TearDownTestSuite() functions. +using SetUpTestSuiteFunc = void (*)(); +using TearDownTestSuiteFunc = void (*)(); + +struct CodeLocation { + CodeLocation(const std::string& a_file, int a_line) + : file(a_file), line(a_line) {} + + std::string file; + int line; +}; + +// Helper to identify which setup function for TestCase / TestSuite to call. +// Only one function is allowed, either TestCase or TestSute but not both. + +// Utility functions to help SuiteApiResolver +using SetUpTearDownSuiteFuncType = void (*)(); + +inline SetUpTearDownSuiteFuncType GetNotDefaultOrNull( + SetUpTearDownSuiteFuncType a, SetUpTearDownSuiteFuncType def) { + return a == def ? nullptr : a; +} + +template +// Note that SuiteApiResolver inherits from T because +// SetUpTestSuite()/TearDownTestSuite() could be protected. Ths way +// SuiteApiResolver can access them. +struct SuiteApiResolver : T { + // testing::Test is only forward declared at this point. So we make it a + // dependend class for the compiler to be OK with it. + using Test = + typename std::conditional::type; + + static SetUpTearDownSuiteFuncType GetSetUpCaseOrSuite(const char* filename, + int line_num) { + SetUpTearDownSuiteFuncType test_case_fp = + GetNotDefaultOrNull(&T::SetUpTestCase, &Test::SetUpTestCase); + SetUpTearDownSuiteFuncType test_suite_fp = + GetNotDefaultOrNull(&T::SetUpTestSuite, &Test::SetUpTestSuite); + + GTEST_CHECK_(!test_case_fp || !test_suite_fp) + << "Test can not provide both SetUpTestSuite and SetUpTestCase, please " + "make sure there is only one present at " + << filename << ":" << line_num; + + return test_case_fp != nullptr ? test_case_fp : test_suite_fp; + } + + static SetUpTearDownSuiteFuncType GetTearDownCaseOrSuite(const char* filename, + int line_num) { + SetUpTearDownSuiteFuncType test_case_fp = + GetNotDefaultOrNull(&T::TearDownTestCase, &Test::TearDownTestCase); + SetUpTearDownSuiteFuncType test_suite_fp = + GetNotDefaultOrNull(&T::TearDownTestSuite, &Test::TearDownTestSuite); + + GTEST_CHECK_(!test_case_fp || !test_suite_fp) + << "Test can not provide both TearDownTestSuite and TearDownTestCase," + " please make sure there is only one present at" + << filename << ":" << line_num; + + return test_case_fp != nullptr ? test_case_fp : test_suite_fp; + } +}; + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_suite_name: name of the test suite +// name: name of the test +// type_param the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param text representation of the test's value parameter, +// or NULL if this is not a type-parameterized test. +// code_location: code location where the test is defined +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test suite +// tear_down_tc: pointer to the function that tears down the test suite +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +GTEST_API_ TestInfo* MakeAndRegisterTestInfo( + const char* test_suite_name, const char* name, const char* type_param, + const char* value_param, CodeLocation code_location, + TypeId fixture_class_id, SetUpTestSuiteFunc set_up_tc, + TearDownTestSuiteFunc tear_down_tc, TestFactoryBase* factory); + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr); + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +// State of the definition of a type-parameterized test suite. +class GTEST_API_ TypedTestSuitePState { + public: + TypedTestSuitePState() : registered_(false) {} + + // Adds the given test name to defined_test_names_ and return true + // if the test suite hasn't been registered; otherwise aborts the + // program. + bool AddTestName(const char* file, int line, const char* case_name, + const char* test_name) { + if (registered_) { + fprintf(stderr, + "%s Test %s must be defined before " + "REGISTER_TYPED_TEST_SUITE_P(%s, ...).\n", + FormatFileLocation(file, line).c_str(), test_name, case_name); + fflush(stderr); + posix::Abort(); + } + registered_tests_.insert( + ::std::make_pair(test_name, CodeLocation(file, line))); + return true; + } + + bool TestExists(const std::string& test_name) const { + return registered_tests_.count(test_name) > 0; + } + + const CodeLocation& GetCodeLocation(const std::string& test_name) const { + RegisteredTestsMap::const_iterator it = registered_tests_.find(test_name); + GTEST_CHECK_(it != registered_tests_.end()); + return it->second; + } + + // Verifies that registered_tests match the test names in + // defined_test_names_; returns registered_tests if successful, or + // aborts the program otherwise. + const char* VerifyRegisteredTestNames(const char* test_suite_name, + const char* file, int line, + const char* registered_tests); + + private: + typedef ::std::map RegisteredTestsMap; + + bool registered_; + RegisteredTestsMap registered_tests_; +}; + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +using TypedTestCasePState = TypedTestSuitePState; +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +// Skips to the first non-space char after the first comma in 'str'; +// returns NULL if no comma is found in 'str'. +inline const char* SkipComma(const char* str) { + const char* comma = strchr(str, ','); + if (comma == nullptr) { + return nullptr; + } + while (IsSpace(*(++comma))) {} + return comma; +} + +// Returns the prefix of 'str' before the first comma in it; returns +// the entire string if it contains no comma. +inline std::string GetPrefixUntilComma(const char* str) { + const char* comma = strchr(str, ','); + return comma == nullptr ? str : std::string(str, comma); +} + +// Splits a given string on a given delimiter, populating a given +// vector with the fields. +void SplitString(const ::std::string& str, char delimiter, + ::std::vector< ::std::string>* dest); + +// The default argument to the template below for the case when the user does +// not provide a name generator. +struct DefaultNameGenerator { + template + static std::string GetName(int i) { + return StreamableToString(i); + } +}; + +template +struct NameGeneratorSelector { + typedef Provided type; +}; + +template +void GenerateNamesRecursively(internal::None, std::vector*, int) {} + +template +void GenerateNamesRecursively(Types, std::vector* result, int i) { + result->push_back(NameGenerator::template GetName(i)); + GenerateNamesRecursively(typename Types::Tail(), result, + i + 1); +} + +template +std::vector GenerateNames() { + std::vector result; + GenerateNamesRecursively(Types(), &result, 0); + return result; +} + +// TypeParameterizedTest::Register() +// registers a list of type-parameterized tests with Google Test. The +// return value is insignificant - we just need to return something +// such that we can call this function in a namespace scope. +// +// Implementation note: The GTEST_TEMPLATE_ macro declares a template +// template parameter. It's defined in gtest-type-util.h. +template +class TypeParameterizedTest { + public: + // 'index' is the index of the test in the type list 'Types' + // specified in INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, TestSuite, + // Types). Valid values for 'index' are [0, N - 1] where N is the + // length of Types. + static bool Register(const char* prefix, const CodeLocation& code_location, + const char* case_name, const char* test_names, int index, + const std::vector& type_names = + GenerateNames()) { + typedef typename Types::Head Type; + typedef Fixture FixtureClass; + typedef typename GTEST_BIND_(TestSel, Type) TestClass; + + // First, registers the first type-parameterized test in the type + // list. + MakeAndRegisterTestInfo( + (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + + "/" + type_names[static_cast(index)]) + .c_str(), + StripTrailingSpaces(GetPrefixUntilComma(test_names)).c_str(), + GetTypeName().c_str(), + nullptr, // No value parameter. + code_location, GetTypeId(), + SuiteApiResolver::GetSetUpCaseOrSuite( + code_location.file.c_str(), code_location.line), + SuiteApiResolver::GetTearDownCaseOrSuite( + code_location.file.c_str(), code_location.line), + new TestFactoryImpl); + + // Next, recurses (at compile time) with the tail of the type list. + return TypeParameterizedTest::Register(prefix, + code_location, + case_name, + test_names, + index + 1, + type_names); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTest { + public: + static bool Register(const char* /*prefix*/, const CodeLocation&, + const char* /*case_name*/, const char* /*test_names*/, + int /*index*/, + const std::vector& = + std::vector() /*type_names*/) { + return true; + } +}; + +GTEST_API_ void RegisterTypeParameterizedTestSuite(const char* test_suite_name, + CodeLocation code_location); +GTEST_API_ void RegisterTypeParameterizedTestSuiteInstantiation( + const char* case_name); + +// TypeParameterizedTestSuite::Register() +// registers *all combinations* of 'Tests' and 'Types' with Google +// Test. The return value is insignificant - we just need to return +// something such that we can call this function in a namespace scope. +template +class TypeParameterizedTestSuite { + public: + static bool Register(const char* prefix, CodeLocation code_location, + const TypedTestSuitePState* state, const char* case_name, + const char* test_names, + const std::vector& type_names = + GenerateNames()) { + RegisterTypeParameterizedTestSuiteInstantiation(case_name); + std::string test_name = StripTrailingSpaces( + GetPrefixUntilComma(test_names)); + if (!state->TestExists(test_name)) { + fprintf(stderr, "Failed to get code location for test %s.%s at %s.", + case_name, test_name.c_str(), + FormatFileLocation(code_location.file.c_str(), + code_location.line).c_str()); + fflush(stderr); + posix::Abort(); + } + const CodeLocation& test_location = state->GetCodeLocation(test_name); + + typedef typename Tests::Head Head; + + // First, register the first test in 'Test' for each type in 'Types'. + TypeParameterizedTest::Register( + prefix, test_location, case_name, test_names, 0, type_names); + + // Next, recurses (at compile time) with the tail of the test list. + return TypeParameterizedTestSuite::Register(prefix, code_location, + state, case_name, + SkipComma(test_names), + type_names); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTestSuite { + public: + static bool Register(const char* /*prefix*/, const CodeLocation&, + const TypedTestSuitePState* /*state*/, + const char* /*case_name*/, const char* /*test_names*/, + const std::vector& = + std::vector() /*type_names*/) { + return true; + } +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +GTEST_API_ std::string GetCurrentOsStackTraceExceptTop( + UnitTest* unit_test, int skip_count); + +// Helpers for suppressing warnings on unreachable code or constant +// condition. + +// Always returns true. +GTEST_API_ bool AlwaysTrue(); + +// Always returns false. +inline bool AlwaysFalse() { return !AlwaysTrue(); } + +// Helper for suppressing false warning from Clang on a const char* +// variable declared in a conditional expression always being NULL in +// the else branch. +struct GTEST_API_ ConstCharPtr { + ConstCharPtr(const char* str) : value(str) {} + operator bool() const { return true; } + const char* value; +}; + +// Helper for declaring std::string within 'if' statement +// in pre C++17 build environment. +struct TrueWithString { + TrueWithString() = default; + explicit TrueWithString(const char* str) : value(str) {} + explicit TrueWithString(const std::string& str) : value(str) {} + explicit operator bool() const { return true; } + std::string value; +}; + +// A simple Linear Congruential Generator for generating random +// numbers with a uniform distribution. Unlike rand() and srand(), it +// doesn't use global state (and therefore can't interfere with user +// code). Unlike rand_r(), it's portable. An LCG isn't very random, +// but it's good enough for our purposes. +class GTEST_API_ Random { + public: + static const uint32_t kMaxRange = 1u << 31; + + explicit Random(uint32_t seed) : state_(seed) {} + + void Reseed(uint32_t seed) { state_ = seed; } + + // Generates a random number from [0, range). Crashes if 'range' is + // 0 or greater than kMaxRange. + uint32_t Generate(uint32_t range); + + private: + uint32_t state_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(Random); +}; + +// Turns const U&, U&, const U, and U all into U. +#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \ + typename std::remove_const::type>::type + +// IsAProtocolMessage::value is a compile-time bool constant that's +// true if and only if T is type proto2::Message or a subclass of it. +template +struct IsAProtocolMessage + : public std::is_convertible {}; + +// When the compiler sees expression IsContainerTest(0), if C is an +// STL-style container class, the first overload of IsContainerTest +// will be viable (since both C::iterator* and C::const_iterator* are +// valid types and NULL can be implicitly converted to them). It will +// be picked over the second overload as 'int' is a perfect match for +// the type of argument 0. If C::iterator or C::const_iterator is not +// a valid type, the first overload is not viable, and the second +// overload will be picked. Therefore, we can determine whether C is +// a container class by checking the type of IsContainerTest(0). +// The value of the expression is insignificant. +// +// In C++11 mode we check the existence of a const_iterator and that an +// iterator is properly implemented for the container. +// +// For pre-C++11 that we look for both C::iterator and C::const_iterator. +// The reason is that C++ injects the name of a class as a member of the +// class itself (e.g. you can refer to class iterator as either +// 'iterator' or 'iterator::iterator'). If we look for C::iterator +// only, for example, we would mistakenly think that a class named +// iterator is an STL container. +// +// Also note that the simpler approach of overloading +// IsContainerTest(typename C::const_iterator*) and +// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++. +typedef int IsContainer; +template ().begin()), + class = decltype(::std::declval().end()), + class = decltype(++::std::declval()), + class = decltype(*::std::declval()), + class = typename C::const_iterator> +IsContainer IsContainerTest(int /* dummy */) { + return 0; +} + +typedef char IsNotContainer; +template +IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; } + +// Trait to detect whether a type T is a hash table. +// The heuristic used is that the type contains an inner type `hasher` and does +// not contain an inner type `reverse_iterator`. +// If the container is iterable in reverse, then order might actually matter. +template +struct IsHashTable { + private: + template + static char test(typename U::hasher*, typename U::reverse_iterator*); + template + static int test(typename U::hasher*, ...); + template + static char test(...); + + public: + static const bool value = sizeof(test(nullptr, nullptr)) == sizeof(int); +}; + +template +const bool IsHashTable::value; + +template (0)) == sizeof(IsContainer)> +struct IsRecursiveContainerImpl; + +template +struct IsRecursiveContainerImpl : public std::false_type {}; + +// Since the IsRecursiveContainerImpl depends on the IsContainerTest we need to +// obey the same inconsistencies as the IsContainerTest, namely check if +// something is a container is relying on only const_iterator in C++11 and +// is relying on both const_iterator and iterator otherwise +template +struct IsRecursiveContainerImpl { + using value_type = decltype(*std::declval()); + using type = + std::is_same::type>::type, + C>; +}; + +// IsRecursiveContainer is a unary compile-time predicate that +// evaluates whether C is a recursive container type. A recursive container +// type is a container type whose value_type is equal to the container type +// itself. An example for a recursive container type is +// boost::filesystem::path, whose iterator has a value_type that is equal to +// boost::filesystem::path. +template +struct IsRecursiveContainer : public IsRecursiveContainerImpl::type {}; + +// Utilities for native arrays. + +// ArrayEq() compares two k-dimensional native arrays using the +// elements' operator==, where k can be any integer >= 0. When k is +// 0, ArrayEq() degenerates into comparing a single pair of values. + +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs); + +// This generic version is used when k is 0. +template +inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; } + +// This overload is used when k >= 1. +template +inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) { + return internal::ArrayEq(lhs, N, rhs); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous ArrayEq() function, arrays with different sizes would +// lead to different copies of the template code. +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs) { + for (size_t i = 0; i != size; i++) { + if (!internal::ArrayEq(lhs[i], rhs[i])) + return false; + } + return true; +} + +// Finds the first element in the iterator range [begin, end) that +// equals elem. Element may be a native array type itself. +template +Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) { + for (Iter it = begin; it != end; ++it) { + if (internal::ArrayEq(*it, elem)) + return it; + } + return end; +} + +// CopyArray() copies a k-dimensional native array using the elements' +// operator=, where k can be any integer >= 0. When k is 0, +// CopyArray() degenerates into copying a single value. + +template +void CopyArray(const T* from, size_t size, U* to); + +// This generic version is used when k is 0. +template +inline void CopyArray(const T& from, U* to) { *to = from; } + +// This overload is used when k >= 1. +template +inline void CopyArray(const T(&from)[N], U(*to)[N]) { + internal::CopyArray(from, N, *to); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous CopyArray() function, arrays with different sizes +// would lead to different copies of the template code. +template +void CopyArray(const T* from, size_t size, U* to) { + for (size_t i = 0; i != size; i++) { + internal::CopyArray(from[i], to + i); + } +} + +// The relation between an NativeArray object (see below) and the +// native array it represents. +// We use 2 different structs to allow non-copyable types to be used, as long +// as RelationToSourceReference() is passed. +struct RelationToSourceReference {}; +struct RelationToSourceCopy {}; + +// Adapts a native array to a read-only STL-style container. Instead +// of the complete STL container concept, this adaptor only implements +// members useful for Google Mock's container matchers. New members +// should be added as needed. To simplify the implementation, we only +// support Element being a raw type (i.e. having no top-level const or +// reference modifier). It's the client's responsibility to satisfy +// this requirement. Element can be an array type itself (hence +// multi-dimensional arrays are supported). +template +class NativeArray { + public: + // STL-style container typedefs. + typedef Element value_type; + typedef Element* iterator; + typedef const Element* const_iterator; + + // Constructs from a native array. References the source. + NativeArray(const Element* array, size_t count, RelationToSourceReference) { + InitRef(array, count); + } + + // Constructs from a native array. Copies the source. + NativeArray(const Element* array, size_t count, RelationToSourceCopy) { + InitCopy(array, count); + } + + // Copy constructor. + NativeArray(const NativeArray& rhs) { + (this->*rhs.clone_)(rhs.array_, rhs.size_); + } + + ~NativeArray() { + if (clone_ != &NativeArray::InitRef) + delete[] array_; + } + + // STL-style container methods. + size_t size() const { return size_; } + const_iterator begin() const { return array_; } + const_iterator end() const { return array_ + size_; } + bool operator==(const NativeArray& rhs) const { + return size() == rhs.size() && + ArrayEq(begin(), size(), rhs.begin()); + } + + private: + static_assert(!std::is_const::value, "Type must not be const"); + static_assert(!std::is_reference::value, + "Type must not be a reference"); + + // Initializes this object with a copy of the input. + void InitCopy(const Element* array, size_t a_size) { + Element* const copy = new Element[a_size]; + CopyArray(array, a_size, copy); + array_ = copy; + size_ = a_size; + clone_ = &NativeArray::InitCopy; + } + + // Initializes this object with a reference of the input. + void InitRef(const Element* array, size_t a_size) { + array_ = array; + size_ = a_size; + clone_ = &NativeArray::InitRef; + } + + const Element* array_; + size_t size_; + void (NativeArray::*clone_)(const Element*, size_t); + + GTEST_DISALLOW_ASSIGN_(NativeArray); +}; + +// Backport of std::index_sequence. +template +struct IndexSequence { + using type = IndexSequence; +}; + +// Double the IndexSequence, and one if plus_one is true. +template +struct DoubleSequence; +template +struct DoubleSequence, sizeofT> { + using type = IndexSequence; +}; +template +struct DoubleSequence, sizeofT> { + using type = IndexSequence; +}; + +// Backport of std::make_index_sequence. +// It uses O(ln(N)) instantiation depth. +template +struct MakeIndexSequence + : DoubleSequence::type, + N / 2>::type {}; + +template <> +struct MakeIndexSequence<0> : IndexSequence<> {}; + +template +struct Ignore { + Ignore(...); // NOLINT +}; + +template +struct ElemFromListImpl; +template +struct ElemFromListImpl> { + // We make Ignore a template to solve a problem with MSVC. + // A non-template Ignore would work fine with `decltype(Ignore(I))...`, but + // MSVC doesn't understand how to deal with that pack expansion. + // Use `0 * I` to have a single instantiation of Ignore. + template + static R Apply(Ignore<0 * I>..., R (*)(), ...); +}; + +template +struct ElemFromList { + using type = + decltype(ElemFromListImpl::type>::Apply( + static_cast(nullptr)...)); +}; + +template +class FlatTuple; + +template +struct FlatTupleElemBase; + +template +struct FlatTupleElemBase, I> { + using value_type = typename ElemFromList::type; + FlatTupleElemBase() = default; + explicit FlatTupleElemBase(value_type t) : value(std::move(t)) {} + value_type value; +}; + +template +struct FlatTupleBase; + +template +struct FlatTupleBase, IndexSequence> + : FlatTupleElemBase, Idx>... { + using Indices = IndexSequence; + FlatTupleBase() = default; + explicit FlatTupleBase(T... t) + : FlatTupleElemBase, Idx>(std::move(t))... {} +}; + +// Analog to std::tuple but with different tradeoffs. +// This class minimizes the template instantiation depth, thus allowing more +// elements than std::tuple would. std::tuple has been seen to require an +// instantiation depth of more than 10x the number of elements in some +// implementations. +// FlatTuple and ElemFromList are not recursive and have a fixed depth +// regardless of T... +// MakeIndexSequence, on the other hand, it is recursive but with an +// instantiation depth of O(ln(N)). +template +class FlatTuple + : private FlatTupleBase, + typename MakeIndexSequence::type> { + using Indices = typename FlatTupleBase< + FlatTuple, typename MakeIndexSequence::type>::Indices; + + public: + FlatTuple() = default; + explicit FlatTuple(T... t) : FlatTuple::FlatTupleBase(std::move(t)...) {} + + template + const typename ElemFromList::type& Get() const { + return static_cast*>(this)->value; + } + + template + typename ElemFromList::type& Get() { + return static_cast*>(this)->value; + } +}; + +// Utility functions to be called with static_assert to induce deprecation +// warnings. +GTEST_INTERNAL_DEPRECATED( + "INSTANTIATE_TEST_CASE_P is deprecated, please use " + "INSTANTIATE_TEST_SUITE_P") +constexpr bool InstantiateTestCase_P_IsDeprecated() { return true; } + +GTEST_INTERNAL_DEPRECATED( + "TYPED_TEST_CASE_P is deprecated, please use " + "TYPED_TEST_SUITE_P") +constexpr bool TypedTestCase_P_IsDeprecated() { return true; } + +GTEST_INTERNAL_DEPRECATED( + "TYPED_TEST_CASE is deprecated, please use " + "TYPED_TEST_SUITE") +constexpr bool TypedTestCaseIsDeprecated() { return true; } + +GTEST_INTERNAL_DEPRECATED( + "REGISTER_TYPED_TEST_CASE_P is deprecated, please use " + "REGISTER_TYPED_TEST_SUITE_P") +constexpr bool RegisterTypedTestCase_P_IsDeprecated() { return true; } + +GTEST_INTERNAL_DEPRECATED( + "INSTANTIATE_TYPED_TEST_CASE_P is deprecated, please use " + "INSTANTIATE_TYPED_TEST_SUITE_P") +constexpr bool InstantiateTypedTestCase_P_IsDeprecated() { return true; } + +} // namespace internal +} // namespace testing + +#define GTEST_MESSAGE_AT_(file, line, message, result_type) \ + ::testing::internal::AssertHelper(result_type, file, line, message) \ + = ::testing::Message() + +#define GTEST_MESSAGE_(message, result_type) \ + GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type) + +#define GTEST_FATAL_FAILURE_(message) \ + return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure) + +#define GTEST_NONFATAL_FAILURE_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure) + +#define GTEST_SUCCESS_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess) + +#define GTEST_SKIP_(message) \ + return GTEST_MESSAGE_(message, ::testing::TestPartResult::kSkip) + +// Suppress MSVC warning 4072 (unreachable code) for the code following +// statement if it returns or throws (or doesn't return or throw in some +// situations). +#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \ + if (::testing::internal::AlwaysTrue()) { statement; } + +#define GTEST_TEST_THROW_(statement, expected_exception, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::ConstCharPtr gtest_msg = "") { \ + bool gtest_caught_expected = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (expected_exception const&) { \ + gtest_caught_expected = true; \ + } \ + catch (...) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws a different type."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + if (!gtest_caught_expected) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws nothing."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \ + fail(gtest_msg.value) + +#if GTEST_HAS_EXCEPTIONS + +#define GTEST_TEST_NO_THROW_CATCH_STD_EXCEPTION_() \ + catch (std::exception const& e) { \ + gtest_msg.value = ( \ + "it throws std::exception-derived exception with description: \"" \ + ); \ + gtest_msg.value += e.what(); \ + gtest_msg.value += "\"."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ + } + +#else // GTEST_HAS_EXCEPTIONS + +#define GTEST_TEST_NO_THROW_CATCH_STD_EXCEPTION_() + +#endif // GTEST_HAS_EXCEPTIONS + +#define GTEST_TEST_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::TrueWithString gtest_msg{}) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + GTEST_TEST_NO_THROW_CATCH_STD_EXCEPTION_() \ + catch (...) { \ + gtest_msg.value = "it throws."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \ + fail(("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: " + gtest_msg.value).c_str()) + +#define GTEST_TEST_ANY_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + bool gtest_caught_any = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + gtest_caught_any = true; \ + } \ + if (!gtest_caught_any) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \ + fail("Expected: " #statement " throws an exception.\n" \ + " Actual: it doesn't.") + + +// Implements Boolean test assertions such as EXPECT_TRUE. expression can be +// either a boolean expression or an AssertionResult. text is a textual +// represenation of expression as it was passed into the EXPECT_TRUE. +#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar_ = \ + ::testing::AssertionResult(expression)) \ + ; \ + else \ + fail(::testing::internal::GetBoolAssertionFailureMessage(\ + gtest_ar_, text, #actual, #expected).c_str()) + +#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \ + fail("Expected: " #statement " doesn't generate new fatal " \ + "failures in the current thread.\n" \ + " Actual: it does.") + +// Expands to the name of the class that implements the given test. +#define GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ + test_suite_name##_##test_name##_Test + +// Helper macro for defining tests. +#define GTEST_TEST_(test_suite_name, test_name, parent_class, parent_id) \ + static_assert(sizeof(GTEST_STRINGIFY_(test_suite_name)) > 1, \ + "test_suite_name must not be empty"); \ + static_assert(sizeof(GTEST_STRINGIFY_(test_name)) > 1, \ + "test_name must not be empty"); \ + class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ + : public parent_class { \ + public: \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() {} \ + ~GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() override = default; \ + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name)); \ + GTEST_DISALLOW_MOVE_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name)); \ + \ + private: \ + void TestBody() override; \ + static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_; \ + }; \ + \ + ::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name)::test_info_ = \ + ::testing::internal::MakeAndRegisterTestInfo( \ + #test_suite_name, #test_name, nullptr, nullptr, \ + ::testing::internal::CodeLocation(__FILE__, __LINE__), (parent_id), \ + ::testing::internal::SuiteApiResolver< \ + parent_class>::GetSetUpCaseOrSuite(__FILE__, __LINE__), \ + ::testing::internal::SuiteApiResolver< \ + parent_class>::GetTearDownCaseOrSuite(__FILE__, __LINE__), \ + new ::testing::internal::TestFactoryImpl); \ + void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody() + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/gtest-param-util.h b/source/3rdparty/gtest/include/gtest/internal/gtest-param-util.h new file mode 100644 index 0000000..7f7a13b --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/gtest-param-util.h @@ -0,0 +1,934 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Type and function utilities for implementing parameterized tests. + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" +#include "gtest/gtest-printers.h" +#include "gtest/gtest-test-part.h" + +namespace testing { +// Input to a parameterized test name generator, describing a test parameter. +// Consists of the parameter value and the integer parameter index. +template +struct TestParamInfo { + TestParamInfo(const ParamType& a_param, size_t an_index) : + param(a_param), + index(an_index) {} + ParamType param; + size_t index; +}; + +// A builtin parameterized test name generator which returns the result of +// testing::PrintToString. +struct PrintToStringParamName { + template + std::string operator()(const TestParamInfo& info) const { + return PrintToString(info.param); + } +}; + +namespace internal { + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// Utility Functions + +// Outputs a message explaining invalid registration of different +// fixture class for the same test suite. This may happen when +// TEST_P macro is used to define two tests with the same name +// but in different namespaces. +GTEST_API_ void ReportInvalidTestSuiteType(const char* test_suite_name, + CodeLocation code_location); + +template class ParamGeneratorInterface; +template class ParamGenerator; + +// Interface for iterating over elements provided by an implementation +// of ParamGeneratorInterface. +template +class ParamIteratorInterface { + public: + virtual ~ParamIteratorInterface() {} + // A pointer to the base generator instance. + // Used only for the purposes of iterator comparison + // to make sure that two iterators belong to the same generator. + virtual const ParamGeneratorInterface* BaseGenerator() const = 0; + // Advances iterator to point to the next element + // provided by the generator. The caller is responsible + // for not calling Advance() on an iterator equal to + // BaseGenerator()->End(). + virtual void Advance() = 0; + // Clones the iterator object. Used for implementing copy semantics + // of ParamIterator. + virtual ParamIteratorInterface* Clone() const = 0; + // Dereferences the current iterator and provides (read-only) access + // to the pointed value. It is the caller's responsibility not to call + // Current() on an iterator equal to BaseGenerator()->End(). + // Used for implementing ParamGenerator::operator*(). + virtual const T* Current() const = 0; + // Determines whether the given iterator and other point to the same + // element in the sequence generated by the generator. + // Used for implementing ParamGenerator::operator==(). + virtual bool Equals(const ParamIteratorInterface& other) const = 0; +}; + +// Class iterating over elements provided by an implementation of +// ParamGeneratorInterface. It wraps ParamIteratorInterface +// and implements the const forward iterator concept. +template +class ParamIterator { + public: + typedef T value_type; + typedef const T& reference; + typedef ptrdiff_t difference_type; + + // ParamIterator assumes ownership of the impl_ pointer. + ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {} + ParamIterator& operator=(const ParamIterator& other) { + if (this != &other) + impl_.reset(other.impl_->Clone()); + return *this; + } + + const T& operator*() const { return *impl_->Current(); } + const T* operator->() const { return impl_->Current(); } + // Prefix version of operator++. + ParamIterator& operator++() { + impl_->Advance(); + return *this; + } + // Postfix version of operator++. + ParamIterator operator++(int /*unused*/) { + ParamIteratorInterface* clone = impl_->Clone(); + impl_->Advance(); + return ParamIterator(clone); + } + bool operator==(const ParamIterator& other) const { + return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_); + } + bool operator!=(const ParamIterator& other) const { + return !(*this == other); + } + + private: + friend class ParamGenerator; + explicit ParamIterator(ParamIteratorInterface* impl) : impl_(impl) {} + std::unique_ptr > impl_; +}; + +// ParamGeneratorInterface is the binary interface to access generators +// defined in other translation units. +template +class ParamGeneratorInterface { + public: + typedef T ParamType; + + virtual ~ParamGeneratorInterface() {} + + // Generator interface definition + virtual ParamIteratorInterface* Begin() const = 0; + virtual ParamIteratorInterface* End() const = 0; +}; + +// Wraps ParamGeneratorInterface and provides general generator syntax +// compatible with the STL Container concept. +// This class implements copy initialization semantics and the contained +// ParamGeneratorInterface instance is shared among all copies +// of the original object. This is possible because that instance is immutable. +template +class ParamGenerator { + public: + typedef ParamIterator iterator; + + explicit ParamGenerator(ParamGeneratorInterface* impl) : impl_(impl) {} + ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {} + + ParamGenerator& operator=(const ParamGenerator& other) { + impl_ = other.impl_; + return *this; + } + + iterator begin() const { return iterator(impl_->Begin()); } + iterator end() const { return iterator(impl_->End()); } + + private: + std::shared_ptr > impl_; +}; + +// Generates values from a range of two comparable values. Can be used to +// generate sequences of user-defined types that implement operator+() and +// operator<(). +// This class is used in the Range() function. +template +class RangeGenerator : public ParamGeneratorInterface { + public: + RangeGenerator(T begin, T end, IncrementT step) + : begin_(begin), end_(end), + step_(step), end_index_(CalculateEndIndex(begin, end, step)) {} + ~RangeGenerator() override {} + + ParamIteratorInterface* Begin() const override { + return new Iterator(this, begin_, 0, step_); + } + ParamIteratorInterface* End() const override { + return new Iterator(this, end_, end_index_, step_); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, T value, int index, + IncrementT step) + : base_(base), value_(value), index_(index), step_(step) {} + ~Iterator() override {} + + const ParamGeneratorInterface* BaseGenerator() const override { + return base_; + } + void Advance() override { + value_ = static_cast(value_ + step_); + index_++; + } + ParamIteratorInterface* Clone() const override { + return new Iterator(*this); + } + const T* Current() const override { return &value_; } + bool Equals(const ParamIteratorInterface& other) const override { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const int other_index = + CheckedDowncastToActualType(&other)->index_; + return index_ == other_index; + } + + private: + Iterator(const Iterator& other) + : ParamIteratorInterface(), + base_(other.base_), value_(other.value_), index_(other.index_), + step_(other.step_) {} + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + T value_; + int index_; + const IncrementT step_; + }; // class RangeGenerator::Iterator + + static int CalculateEndIndex(const T& begin, + const T& end, + const IncrementT& step) { + int end_index = 0; + for (T i = begin; i < end; i = static_cast(i + step)) + end_index++; + return end_index; + } + + // No implementation - assignment is unsupported. + void operator=(const RangeGenerator& other); + + const T begin_; + const T end_; + const IncrementT step_; + // The index for the end() iterator. All the elements in the generated + // sequence are indexed (0-based) to aid iterator comparison. + const int end_index_; +}; // class RangeGenerator + + +// Generates values from a pair of STL-style iterators. Used in the +// ValuesIn() function. The elements are copied from the source range +// since the source can be located on the stack, and the generator +// is likely to persist beyond that stack frame. +template +class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { + public: + template + ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end) + : container_(begin, end) {} + ~ValuesInIteratorRangeGenerator() override {} + + ParamIteratorInterface* Begin() const override { + return new Iterator(this, container_.begin()); + } + ParamIteratorInterface* End() const override { + return new Iterator(this, container_.end()); + } + + private: + typedef typename ::std::vector ContainerType; + + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + typename ContainerType::const_iterator iterator) + : base_(base), iterator_(iterator) {} + ~Iterator() override {} + + const ParamGeneratorInterface* BaseGenerator() const override { + return base_; + } + void Advance() override { + ++iterator_; + value_.reset(); + } + ParamIteratorInterface* Clone() const override { + return new Iterator(*this); + } + // We need to use cached value referenced by iterator_ because *iterator_ + // can return a temporary object (and of type other then T), so just + // having "return &*iterator_;" doesn't work. + // value_ is updated here and not in Advance() because Advance() + // can advance iterator_ beyond the end of the range, and we cannot + // detect that fact. The client code, on the other hand, is + // responsible for not calling Current() on an out-of-range iterator. + const T* Current() const override { + if (value_.get() == nullptr) value_.reset(new T(*iterator_)); + return value_.get(); + } + bool Equals(const ParamIteratorInterface& other) const override { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + return iterator_ == + CheckedDowncastToActualType(&other)->iterator_; + } + + private: + Iterator(const Iterator& other) + // The explicit constructor call suppresses a false warning + // emitted by gcc when supplied with the -Wextra option. + : ParamIteratorInterface(), + base_(other.base_), + iterator_(other.iterator_) {} + + const ParamGeneratorInterface* const base_; + typename ContainerType::const_iterator iterator_; + // A cached value of *iterator_. We keep it here to allow access by + // pointer in the wrapping iterator's operator->(). + // value_ needs to be mutable to be accessed in Current(). + // Use of std::unique_ptr helps manage cached value's lifetime, + // which is bound by the lifespan of the iterator itself. + mutable std::unique_ptr value_; + }; // class ValuesInIteratorRangeGenerator::Iterator + + // No implementation - assignment is unsupported. + void operator=(const ValuesInIteratorRangeGenerator& other); + + const ContainerType container_; +}; // class ValuesInIteratorRangeGenerator + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Default parameterized test name generator, returns a string containing the +// integer test parameter index. +template +std::string DefaultParamName(const TestParamInfo& info) { + Message name_stream; + name_stream << info.index; + return name_stream.GetString(); +} + +template +void TestNotEmpty() { + static_assert(sizeof(T) == 0, "Empty arguments are not allowed."); +} +template +void TestNotEmpty(const T&) {} + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Stores a parameter value and later creates tests parameterized with that +// value. +template +class ParameterizedTestFactory : public TestFactoryBase { + public: + typedef typename TestClass::ParamType ParamType; + explicit ParameterizedTestFactory(ParamType parameter) : + parameter_(parameter) {} + Test* CreateTest() override { + TestClass::SetParam(¶meter_); + return new TestClass(); + } + + private: + const ParamType parameter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactoryBase is a base class for meta-factories that create +// test factories for passing into MakeAndRegisterTestInfo function. +template +class TestMetaFactoryBase { + public: + virtual ~TestMetaFactoryBase() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0; +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactory creates test factories for passing into +// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives +// ownership of test factory pointer, same factory object cannot be passed +// into that method twice. But ParameterizedTestSuiteInfo is going to call +// it for each Test/Parameter value combination. Thus it needs meta factory +// creator class. +template +class TestMetaFactory + : public TestMetaFactoryBase { + public: + using ParamType = typename TestSuite::ParamType; + + TestMetaFactory() {} + + TestFactoryBase* CreateTestFactory(ParamType parameter) override { + return new ParameterizedTestFactory(parameter); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestSuiteInfoBase is a generic interface +// to ParameterizedTestSuiteInfo classes. ParameterizedTestSuiteInfoBase +// accumulates test information provided by TEST_P macro invocations +// and generators provided by INSTANTIATE_TEST_SUITE_P macro invocations +// and uses that information to register all resulting test instances +// in RegisterTests method. The ParameterizeTestSuiteRegistry class holds +// a collection of pointers to the ParameterizedTestSuiteInfo objects +// and calls RegisterTests() on each of them when asked. +class ParameterizedTestSuiteInfoBase { + public: + virtual ~ParameterizedTestSuiteInfoBase() {} + + // Base part of test suite name for display purposes. + virtual const std::string& GetTestSuiteName() const = 0; + // Test case id to verify identity. + virtual TypeId GetTestSuiteTypeId() const = 0; + // UnitTest class invokes this method to register tests in this + // test suite right before running them in RUN_ALL_TESTS macro. + // This method should not be called more than once on any single + // instance of a ParameterizedTestSuiteInfoBase derived class. + virtual void RegisterTests() = 0; + + protected: + ParameterizedTestSuiteInfoBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteInfoBase); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Report a the name of a test_suit as safe to ignore +// as the side effect of construction of this type. +struct MarkAsIgnored { + explicit MarkAsIgnored(const char* test_suite); +}; + +GTEST_API_ void InsertSyntheticTestCase(const std::string& name, + CodeLocation location, bool has_test_p); + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestSuiteInfo accumulates tests obtained from TEST_P +// macro invocations for a particular test suite and generators +// obtained from INSTANTIATE_TEST_SUITE_P macro invocations for that +// test suite. It registers tests with all values generated by all +// generators when asked. +template +class ParameterizedTestSuiteInfo : public ParameterizedTestSuiteInfoBase { + public: + // ParamType and GeneratorCreationFunc are private types but are required + // for declarations of public methods AddTestPattern() and + // AddTestSuiteInstantiation(). + using ParamType = typename TestSuite::ParamType; + // A function that returns an instance of appropriate generator type. + typedef ParamGenerator(GeneratorCreationFunc)(); + using ParamNameGeneratorFunc = std::string(const TestParamInfo&); + + explicit ParameterizedTestSuiteInfo(const char* name, + CodeLocation code_location) + : test_suite_name_(name), code_location_(code_location) {} + + // Test case base name for display purposes. + const std::string& GetTestSuiteName() const override { + return test_suite_name_; + } + // Test case id to verify identity. + TypeId GetTestSuiteTypeId() const override { return GetTypeId(); } + // TEST_P macro uses AddTestPattern() to record information + // about a single test in a LocalTestInfo structure. + // test_suite_name is the base name of the test suite (without invocation + // prefix). test_base_name is the name of an individual test without + // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is + // test suite base name and DoBar is test base name. + void AddTestPattern(const char* test_suite_name, const char* test_base_name, + TestMetaFactoryBase* meta_factory) { + tests_.push_back(std::shared_ptr( + new TestInfo(test_suite_name, test_base_name, meta_factory))); + } + // INSTANTIATE_TEST_SUITE_P macro uses AddGenerator() to record information + // about a generator. + int AddTestSuiteInstantiation(const std::string& instantiation_name, + GeneratorCreationFunc* func, + ParamNameGeneratorFunc* name_func, + const char* file, int line) { + instantiations_.push_back( + InstantiationInfo(instantiation_name, func, name_func, file, line)); + return 0; // Return value used only to run this method in namespace scope. + } + // UnitTest class invokes this method to register tests in this test suite + // right before running tests in RUN_ALL_TESTS macro. + // This method should not be called more than once on any single + // instance of a ParameterizedTestSuiteInfoBase derived class. + // UnitTest has a guard to prevent from calling this method more than once. + void RegisterTests() override { + bool generated_instantiations = false; + + for (typename TestInfoContainer::iterator test_it = tests_.begin(); + test_it != tests_.end(); ++test_it) { + std::shared_ptr test_info = *test_it; + for (typename InstantiationContainer::iterator gen_it = + instantiations_.begin(); gen_it != instantiations_.end(); + ++gen_it) { + const std::string& instantiation_name = gen_it->name; + ParamGenerator generator((*gen_it->generator)()); + ParamNameGeneratorFunc* name_func = gen_it->name_func; + const char* file = gen_it->file; + int line = gen_it->line; + + std::string test_suite_name; + if ( !instantiation_name.empty() ) + test_suite_name = instantiation_name + "/"; + test_suite_name += test_info->test_suite_base_name; + + size_t i = 0; + std::set test_param_names; + for (typename ParamGenerator::iterator param_it = + generator.begin(); + param_it != generator.end(); ++param_it, ++i) { + generated_instantiations = true; + + Message test_name_stream; + + std::string param_name = name_func( + TestParamInfo(*param_it, i)); + + GTEST_CHECK_(IsValidParamName(param_name)) + << "Parameterized test name '" << param_name + << "' is invalid, in " << file + << " line " << line << std::endl; + + GTEST_CHECK_(test_param_names.count(param_name) == 0) + << "Duplicate parameterized test name '" << param_name + << "', in " << file << " line " << line << std::endl; + + test_param_names.insert(param_name); + + if (!test_info->test_base_name.empty()) { + test_name_stream << test_info->test_base_name << "/"; + } + test_name_stream << param_name; + MakeAndRegisterTestInfo( + test_suite_name.c_str(), test_name_stream.GetString().c_str(), + nullptr, // No type parameter. + PrintToString(*param_it).c_str(), code_location_, + GetTestSuiteTypeId(), + SuiteApiResolver::GetSetUpCaseOrSuite(file, line), + SuiteApiResolver::GetTearDownCaseOrSuite(file, line), + test_info->test_meta_factory->CreateTestFactory(*param_it)); + } // for param_it + } // for gen_it + } // for test_it + + if (!generated_instantiations) { + // There are no generaotrs, or they all generate nothing ... + InsertSyntheticTestCase(GetTestSuiteName(), code_location_, + !tests_.empty()); + } + } // RegisterTests + + private: + // LocalTestInfo structure keeps information about a single test registered + // with TEST_P macro. + struct TestInfo { + TestInfo(const char* a_test_suite_base_name, const char* a_test_base_name, + TestMetaFactoryBase* a_test_meta_factory) + : test_suite_base_name(a_test_suite_base_name), + test_base_name(a_test_base_name), + test_meta_factory(a_test_meta_factory) {} + + const std::string test_suite_base_name; + const std::string test_base_name; + const std::unique_ptr > test_meta_factory; + }; + using TestInfoContainer = ::std::vector >; + // Records data received from INSTANTIATE_TEST_SUITE_P macros: + // + struct InstantiationInfo { + InstantiationInfo(const std::string &name_in, + GeneratorCreationFunc* generator_in, + ParamNameGeneratorFunc* name_func_in, + const char* file_in, + int line_in) + : name(name_in), + generator(generator_in), + name_func(name_func_in), + file(file_in), + line(line_in) {} + + std::string name; + GeneratorCreationFunc* generator; + ParamNameGeneratorFunc* name_func; + const char* file; + int line; + }; + typedef ::std::vector InstantiationContainer; + + static bool IsValidParamName(const std::string& name) { + // Check for empty string + if (name.empty()) + return false; + + // Check for invalid characters + for (std::string::size_type index = 0; index < name.size(); ++index) { + if (!isalnum(name[index]) && name[index] != '_') + return false; + } + + return true; + } + + const std::string test_suite_name_; + CodeLocation code_location_; + TestInfoContainer tests_; + InstantiationContainer instantiations_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteInfo); +}; // class ParameterizedTestSuiteInfo + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +template +using ParameterizedTestCaseInfo = ParameterizedTestSuiteInfo; +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestSuiteRegistry contains a map of +// ParameterizedTestSuiteInfoBase classes accessed by test suite names. TEST_P +// and INSTANTIATE_TEST_SUITE_P macros use it to locate their corresponding +// ParameterizedTestSuiteInfo descriptors. +class ParameterizedTestSuiteRegistry { + public: + ParameterizedTestSuiteRegistry() {} + ~ParameterizedTestSuiteRegistry() { + for (auto& test_suite_info : test_suite_infos_) { + delete test_suite_info; + } + } + + // Looks up or creates and returns a structure containing information about + // tests and instantiations of a particular test suite. + template + ParameterizedTestSuiteInfo* GetTestSuitePatternHolder( + const char* test_suite_name, CodeLocation code_location) { + ParameterizedTestSuiteInfo* typed_test_info = nullptr; + for (auto& test_suite_info : test_suite_infos_) { + if (test_suite_info->GetTestSuiteName() == test_suite_name) { + if (test_suite_info->GetTestSuiteTypeId() != GetTypeId()) { + // Complain about incorrect usage of Google Test facilities + // and terminate the program since we cannot guaranty correct + // test suite setup and tear-down in this case. + ReportInvalidTestSuiteType(test_suite_name, code_location); + posix::Abort(); + } else { + // At this point we are sure that the object we found is of the same + // type we are looking for, so we downcast it to that type + // without further checks. + typed_test_info = CheckedDowncastToActualType< + ParameterizedTestSuiteInfo >(test_suite_info); + } + break; + } + } + if (typed_test_info == nullptr) { + typed_test_info = new ParameterizedTestSuiteInfo( + test_suite_name, code_location); + test_suite_infos_.push_back(typed_test_info); + } + return typed_test_info; + } + void RegisterTests() { + for (auto& test_suite_info : test_suite_infos_) { + test_suite_info->RegisterTests(); + } + } +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + template + ParameterizedTestCaseInfo* GetTestCasePatternHolder( + const char* test_case_name, CodeLocation code_location) { + return GetTestSuitePatternHolder(test_case_name, code_location); + } + +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + private: + using TestSuiteInfoContainer = ::std::vector; + + TestSuiteInfoContainer test_suite_infos_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteRegistry); +}; + +// Keep track of what type-parameterized test suite are defined and +// where as well as which are intatiated. This allows susequently +// identifying suits that are defined but never used. +class TypeParameterizedTestSuiteRegistry { + public: + // Add a suite definition + void RegisterTestSuite(const char* test_suite_name, + CodeLocation code_location); + + // Add an instantiation of a suit. + void RegisterInstantiation(const char* test_suite_name); + + // For each suit repored as defined but not reported as instantiation, + // emit a test that reports that fact (configurably, as an error). + void CheckForInstantiations(); + + private: + struct TypeParameterizedTestSuiteInfo { + explicit TypeParameterizedTestSuiteInfo(CodeLocation c) + : code_location(c), instantiated(false) {} + + CodeLocation code_location; + bool instantiated; + }; + + std::map suites_; +}; + +} // namespace internal + +// Forward declarations of ValuesIn(), which is implemented in +// include/gtest/gtest-param-test.h. +template +internal::ParamGenerator ValuesIn( + const Container& container); + +namespace internal { +// Used in the Values() function to provide polymorphic capabilities. + +template +class ValueArray { + public: + ValueArray(Ts... v) : v_{std::move(v)...} {} + + template + operator ParamGenerator() const { // NOLINT + return ValuesIn(MakeVector(MakeIndexSequence())); + } + + private: + template + std::vector MakeVector(IndexSequence) const { + return std::vector{static_cast(v_.template Get())...}; + } + + FlatTuple v_; +}; + +template +class CartesianProductGenerator + : public ParamGeneratorInterface<::std::tuple> { + public: + typedef ::std::tuple ParamType; + + CartesianProductGenerator(const std::tuple...>& g) + : generators_(g) {} + ~CartesianProductGenerator() override {} + + ParamIteratorInterface* Begin() const override { + return new Iterator(this, generators_, false); + } + ParamIteratorInterface* End() const override { + return new Iterator(this, generators_, true); + } + + private: + template + class IteratorImpl; + template + class IteratorImpl> + : public ParamIteratorInterface { + public: + IteratorImpl(const ParamGeneratorInterface* base, + const std::tuple...>& generators, bool is_end) + : base_(base), + begin_(std::get(generators).begin()...), + end_(std::get(generators).end()...), + current_(is_end ? end_ : begin_) { + ComputeCurrentValue(); + } + ~IteratorImpl() override {} + + const ParamGeneratorInterface* BaseGenerator() const override { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + void Advance() override { + assert(!AtEnd()); + // Advance the last iterator. + ++std::get(current_); + // if that reaches end, propagate that up. + AdvanceIfEnd(); + ComputeCurrentValue(); + } + ParamIteratorInterface* Clone() const override { + return new IteratorImpl(*this); + } + + const ParamType* Current() const override { return current_value_.get(); } + + bool Equals(const ParamIteratorInterface& other) const override { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const IteratorImpl* typed_other = + CheckedDowncastToActualType(&other); + + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + if (AtEnd() && typed_other->AtEnd()) return true; + + bool same = true; + bool dummy[] = { + (same = same && std::get(current_) == + std::get(typed_other->current_))...}; + (void)dummy; + return same; + } + + private: + template + void AdvanceIfEnd() { + if (std::get(current_) != std::get(end_)) return; + + bool last = ThisI == 0; + if (last) { + // We are done. Nothing else to propagate. + return; + } + + constexpr size_t NextI = ThisI - (ThisI != 0); + std::get(current_) = std::get(begin_); + ++std::get(current_); + AdvanceIfEnd(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = std::make_shared(*std::get(current_)...); + } + bool AtEnd() const { + bool at_end = false; + bool dummy[] = { + (at_end = at_end || std::get(current_) == std::get(end_))...}; + (void)dummy; + return at_end; + } + + const ParamGeneratorInterface* const base_; + std::tuple::iterator...> begin_; + std::tuple::iterator...> end_; + std::tuple::iterator...> current_; + std::shared_ptr current_value_; + }; + + using Iterator = IteratorImpl::type>; + + std::tuple...> generators_; +}; + +template +class CartesianProductHolder { + public: + CartesianProductHolder(const Gen&... g) : generators_(g...) {} + template + operator ParamGenerator<::std::tuple>() const { + return ParamGenerator<::std::tuple>( + new CartesianProductGenerator(generators_)); + } + + private: + std::tuple generators_; +}; + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/gtest-port-arch.h b/source/3rdparty/gtest/include/gtest/internal/gtest-port-arch.h new file mode 100644 index 0000000..d3239b2 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/gtest-port-arch.h @@ -0,0 +1,111 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This header file defines the GTEST_OS_* macro. +// It is separate from gtest-port.h so that custom/gtest-port.h can include it. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_ + +// Determines the platform on which Google Test is compiled. +#ifdef __CYGWIN__ +# define GTEST_OS_CYGWIN 1 +# elif defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__) +# define GTEST_OS_WINDOWS_MINGW 1 +# define GTEST_OS_WINDOWS 1 +#elif defined _WIN32 +# define GTEST_OS_WINDOWS 1 +# ifdef _WIN32_WCE +# define GTEST_OS_WINDOWS_MOBILE 1 +# elif defined(WINAPI_FAMILY) +# include +# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +# define GTEST_OS_WINDOWS_DESKTOP 1 +# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP) +# define GTEST_OS_WINDOWS_PHONE 1 +# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) +# define GTEST_OS_WINDOWS_RT 1 +# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_TV_TITLE) +# define GTEST_OS_WINDOWS_PHONE 1 +# define GTEST_OS_WINDOWS_TV_TITLE 1 +# else + // WINAPI_FAMILY defined but no known partition matched. + // Default to desktop. +# define GTEST_OS_WINDOWS_DESKTOP 1 +# endif +# else +# define GTEST_OS_WINDOWS_DESKTOP 1 +# endif // _WIN32_WCE +#elif defined __OS2__ +# define GTEST_OS_OS2 1 +#elif defined __APPLE__ +# define GTEST_OS_MAC 1 +# if TARGET_OS_IPHONE +# define GTEST_OS_IOS 1 +# endif +#elif defined __DragonFly__ +# define GTEST_OS_DRAGONFLY 1 +#elif defined __FreeBSD__ +# define GTEST_OS_FREEBSD 1 +#elif defined __Fuchsia__ +# define GTEST_OS_FUCHSIA 1 +#elif defined(__GLIBC__) && defined(__FreeBSD_kernel__) +# define GTEST_OS_GNU_KFREEBSD 1 +#elif defined __linux__ +# define GTEST_OS_LINUX 1 +# if defined __ANDROID__ +# define GTEST_OS_LINUX_ANDROID 1 +# endif +#elif defined __MVS__ +# define GTEST_OS_ZOS 1 +#elif defined(__sun) && defined(__SVR4) +# define GTEST_OS_SOLARIS 1 +#elif defined(_AIX) +# define GTEST_OS_AIX 1 +#elif defined(__hpux) +# define GTEST_OS_HPUX 1 +#elif defined __native_client__ +# define GTEST_OS_NACL 1 +#elif defined __NetBSD__ +# define GTEST_OS_NETBSD 1 +#elif defined __OpenBSD__ +# define GTEST_OS_OPENBSD 1 +#elif defined __QNX__ +# define GTEST_OS_QNX 1 +#elif defined(__HAIKU__) +#define GTEST_OS_HAIKU 1 +#elif defined ESP8266 +#define GTEST_OS_ESP8266 1 +#elif defined ESP32 +#define GTEST_OS_ESP32 1 +#endif // __CYGWIN__ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/gtest-port.h b/source/3rdparty/gtest/include/gtest/internal/gtest-port.h new file mode 100644 index 0000000..0543da5 --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/gtest-port.h @@ -0,0 +1,2223 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Low-level types and utilities for porting Google Test to various +// platforms. All macros ending with _ and symbols defined in an +// internal namespace are subject to change without notice. Code +// outside Google Test MUST NOT USE THEM DIRECTLY. Macros that don't +// end with _ are part of Google Test's public API and can be used by +// code outside Google Test. +// +// This file is fundamental to Google Test. All other Google Test source +// files are expected to #include this. Therefore, it cannot #include +// any other Google Test header. + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +// Environment-describing macros +// ----------------------------- +// +// Google Test can be used in many different environments. Macros in +// this section tell Google Test what kind of environment it is being +// used in, such that Google Test can provide environment-specific +// features and implementations. +// +// Google Test tries to automatically detect the properties of its +// environment, so users usually don't need to worry about these +// macros. However, the automatic detection is not perfect. +// Sometimes it's necessary for a user to define some of the following +// macros in the build script to override Google Test's decisions. +// +// If the user doesn't define a macro in the list, Google Test will +// provide a default definition. After this header is #included, all +// macros in this list will be defined to either 1 or 0. +// +// Notes to maintainers: +// - Each macro here is a user-tweakable knob; do not grow the list +// lightly. +// - Use #if to key off these macros. Don't use #ifdef or "#if +// defined(...)", which will not work as these macros are ALWAYS +// defined. +// +// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2) +// is/isn't available. +// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions +// are enabled. +// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular +// expressions are/aren't available. +// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that +// is/isn't available. +// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't +// enabled. +// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that +// std::wstring does/doesn't work (Google Test can +// be used where std::wstring is unavailable). +// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the +// compiler supports Microsoft's "Structured +// Exception Handling". +// GTEST_HAS_STREAM_REDIRECTION +// - Define it to 1/0 to indicate whether the +// platform supports I/O stream redirection using +// dup() and dup2(). +// GTEST_LINKED_AS_SHARED_LIBRARY +// - Define to 1 when compiling tests that use +// Google Test as a shared library (known as +// DLL on Windows). +// GTEST_CREATE_SHARED_LIBRARY +// - Define to 1 when compiling Google Test itself +// as a shared library. +// GTEST_DEFAULT_DEATH_TEST_STYLE +// - The default value of --gtest_death_test_style. +// The legacy default has been "fast" in the open +// source version since 2008. The recommended value +// is "threadsafe", and can be set in +// custom/gtest-port.h. + +// Platform-indicating macros +// -------------------------- +// +// Macros indicating the platform on which Google Test is being used +// (a macro is defined to 1 if compiled on the given platform; +// otherwise UNDEFINED -- it's never defined to 0.). Google Test +// defines these macros automatically. Code outside Google Test MUST +// NOT define them. +// +// GTEST_OS_AIX - IBM AIX +// GTEST_OS_CYGWIN - Cygwin +// GTEST_OS_DRAGONFLY - DragonFlyBSD +// GTEST_OS_FREEBSD - FreeBSD +// GTEST_OS_FUCHSIA - Fuchsia +// GTEST_OS_GNU_KFREEBSD - GNU/kFreeBSD +// GTEST_OS_HAIKU - Haiku +// GTEST_OS_HPUX - HP-UX +// GTEST_OS_LINUX - Linux +// GTEST_OS_LINUX_ANDROID - Google Android +// GTEST_OS_MAC - Mac OS X +// GTEST_OS_IOS - iOS +// GTEST_OS_NACL - Google Native Client (NaCl) +// GTEST_OS_NETBSD - NetBSD +// GTEST_OS_OPENBSD - OpenBSD +// GTEST_OS_OS2 - OS/2 +// GTEST_OS_QNX - QNX +// GTEST_OS_SOLARIS - Sun Solaris +// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile) +// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop +// GTEST_OS_WINDOWS_MINGW - MinGW +// GTEST_OS_WINDOWS_MOBILE - Windows Mobile +// GTEST_OS_WINDOWS_PHONE - Windows Phone +// GTEST_OS_WINDOWS_RT - Windows Store App/WinRT +// GTEST_OS_ZOS - z/OS +// +// Among the platforms, Cygwin, Linux, Mac OS X, and Windows have the +// most stable support. Since core members of the Google Test project +// don't have access to other platforms, support for them may be less +// stable. If you notice any problems on your platform, please notify +// googletestframework@googlegroups.com (patches for fixing them are +// even more welcome!). +// +// It is possible that none of the GTEST_OS_* macros are defined. + +// Feature-indicating macros +// ------------------------- +// +// Macros indicating which Google Test features are available (a macro +// is defined to 1 if the corresponding feature is supported; +// otherwise UNDEFINED -- it's never defined to 0.). Google Test +// defines these macros automatically. Code outside Google Test MUST +// NOT define them. +// +// These macros are public so that portable tests can be written. +// Such tests typically surround code using a feature with an #if +// which controls that code. For example: +// +// #if GTEST_HAS_DEATH_TEST +// EXPECT_DEATH(DoSomethingDeadly()); +// #endif +// +// GTEST_HAS_DEATH_TEST - death tests +// GTEST_HAS_TYPED_TEST - typed tests +// GTEST_HAS_TYPED_TEST_P - type-parameterized tests +// GTEST_IS_THREADSAFE - Google Test is thread-safe. +// GOOGLETEST_CM0007 DO NOT DELETE +// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with +// GTEST_HAS_POSIX_RE (see above) which users can +// define themselves. +// GTEST_USES_SIMPLE_RE - our own simple regex is used; +// the above RE\b(s) are mutually exclusive. + +// Misc public macros +// ------------------ +// +// GTEST_FLAG(flag_name) - references the variable corresponding to +// the given Google Test flag. + +// Internal utilities +// ------------------ +// +// The following macros and utilities are for Google Test's INTERNAL +// use only. Code outside Google Test MUST NOT USE THEM DIRECTLY. +// +// Macros for basic C++ coding: +// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning. +// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a +// variable don't have to be used. +// GTEST_DISALLOW_ASSIGN_ - disables copy operator=. +// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=. +// GTEST_DISALLOW_MOVE_ASSIGN_ - disables move operator=. +// GTEST_DISALLOW_MOVE_AND_ASSIGN_ - disables move ctor and operator=. +// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used. +// GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is +// suppressed (constant conditional). +// GTEST_INTENTIONAL_CONST_COND_POP_ - finish code section where MSVC C4127 +// is suppressed. +// +// Synchronization: +// Mutex, MutexLock, ThreadLocal, GetThreadCount() +// - synchronization primitives. +// +// Regular expressions: +// RE - a simple regular expression class using the POSIX +// Extended Regular Expression syntax on UNIX-like platforms +// GOOGLETEST_CM0008 DO NOT DELETE +// or a reduced regular exception syntax on other +// platforms, including Windows. +// Logging: +// GTEST_LOG_() - logs messages at the specified severity level. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. +// +// Stdout and stderr capturing: +// CaptureStdout() - starts capturing stdout. +// GetCapturedStdout() - stops capturing stdout and returns the captured +// string. +// CaptureStderr() - starts capturing stderr. +// GetCapturedStderr() - stops capturing stderr and returns the captured +// string. +// +// Integer types: +// TypeWithSize - maps an integer to a int type. +// TimeInMillis - integers of known sizes. +// BiggestInt - the biggest signed integer type. +// +// Command-line utilities: +// GTEST_DECLARE_*() - declares a flag. +// GTEST_DEFINE_*() - defines a flag. +// GetInjectableArgvs() - returns the command line as a vector of strings. +// +// Environment variable utilities: +// GetEnv() - gets the value of an environment variable. +// BoolFromGTestEnv() - parses a bool environment variable. +// Int32FromGTestEnv() - parses an int32_t environment variable. +// StringFromGTestEnv() - parses a string environment variable. +// +// Deprecation warnings: +// GTEST_INTERNAL_DEPRECATED(message) - attribute marking a function as +// deprecated; calling a marked function +// should generate a compiler warning + +#include // for isspace, etc +#include // for ptrdiff_t +#include +#include +#include +#include +#include +#include + +#ifndef _WIN32_WCE +# include +# include +#endif // !_WIN32_WCE + +#if defined __APPLE__ +# include +# include +#endif + +#include // NOLINT +#include +#include // NOLINT +#include +#include // NOLINT + +#include "gtest/internal/custom/gtest-port.h" +#include "gtest/internal/gtest-port-arch.h" + +#if !defined(GTEST_DEV_EMAIL_) +# define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com" +# define GTEST_FLAG_PREFIX_ "gtest_" +# define GTEST_FLAG_PREFIX_DASH_ "gtest-" +# define GTEST_FLAG_PREFIX_UPPER_ "GTEST_" +# define GTEST_NAME_ "Google Test" +# define GTEST_PROJECT_URL_ "https://github.com/google/googletest/" +#endif // !defined(GTEST_DEV_EMAIL_) + +#if !defined(GTEST_INIT_GOOGLE_TEST_NAME_) +# define GTEST_INIT_GOOGLE_TEST_NAME_ "testing::InitGoogleTest" +#endif // !defined(GTEST_INIT_GOOGLE_TEST_NAME_) + +// Determines the version of gcc that is used to compile this. +#ifdef __GNUC__ +// 40302 means version 4.3.2. +# define GTEST_GCC_VER_ \ + (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) +#endif // __GNUC__ + +// Macros for disabling Microsoft Visual C++ warnings. +// +// GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 4385) +// /* code that triggers warnings C4800 and C4385 */ +// GTEST_DISABLE_MSC_WARNINGS_POP_() +#if defined(_MSC_VER) +# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \ + __pragma(warning(push)) \ + __pragma(warning(disable: warnings)) +# define GTEST_DISABLE_MSC_WARNINGS_POP_() \ + __pragma(warning(pop)) +#else +// Not all compilers are MSVC +# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) +# define GTEST_DISABLE_MSC_WARNINGS_POP_() +#endif + +// Clang on Windows does not understand MSVC's pragma warning. +// We need clang-specific way to disable function deprecation warning. +#ifdef __clang__ +# define GTEST_DISABLE_MSC_DEPRECATED_PUSH_() \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-implementations\"") +#define GTEST_DISABLE_MSC_DEPRECATED_POP_() \ + _Pragma("clang diagnostic pop") +#else +# define GTEST_DISABLE_MSC_DEPRECATED_PUSH_() \ + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996) +# define GTEST_DISABLE_MSC_DEPRECATED_POP_() \ + GTEST_DISABLE_MSC_WARNINGS_POP_() +#endif + +// Brings in definitions for functions used in the testing::internal::posix +// namespace (read, write, close, chdir, isatty, stat). We do not currently +// use them on Windows Mobile. +#if GTEST_OS_WINDOWS +# if !GTEST_OS_WINDOWS_MOBILE +# include +# include +# endif +// In order to avoid having to include , use forward declaration +#if GTEST_OS_WINDOWS_MINGW && !defined(__MINGW64_VERSION_MAJOR) +// MinGW defined _CRITICAL_SECTION and _RTL_CRITICAL_SECTION as two +// separate (equivalent) structs, instead of using typedef +typedef struct _CRITICAL_SECTION GTEST_CRITICAL_SECTION; +#else +// Assume CRITICAL_SECTION is a typedef of _RTL_CRITICAL_SECTION. +// This assumption is verified by +// WindowsTypesTest.CRITICAL_SECTIONIs_RTL_CRITICAL_SECTION. +typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; +#endif +#else +// This assumes that non-Windows OSes provide unistd.h. For OSes where this +// is not the case, we need to include headers that provide the functions +// mentioned above. +# include +# include +#endif // GTEST_OS_WINDOWS + +#if GTEST_OS_LINUX_ANDROID +// Used to define __ANDROID_API__ matching the target NDK API level. +# include // NOLINT +#endif + +// Defines this to true if and only if Google Test can use POSIX regular +// expressions. +#ifndef GTEST_HAS_POSIX_RE +# if GTEST_OS_LINUX_ANDROID +// On Android, is only available starting with Gingerbread. +# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9) +# else +# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS) +# endif +#endif + +#if GTEST_USES_PCRE +// The appropriate headers have already been included. + +#elif GTEST_HAS_POSIX_RE + +// On some platforms, needs someone to define size_t, and +// won't compile otherwise. We can #include it here as we already +// included , which is guaranteed to define size_t through +// . +# include // NOLINT + +# define GTEST_USES_POSIX_RE 1 + +#elif GTEST_OS_WINDOWS + +// is not available on Windows. Use our own simple regex +// implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#else + +// may not be available on this platform. Use our own +// simple regex implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#endif // GTEST_USES_PCRE + +#ifndef GTEST_HAS_EXCEPTIONS +// The user didn't tell us whether exceptions are enabled, so we need +// to figure it out. +# if defined(_MSC_VER) && defined(_CPPUNWIND) +// MSVC defines _CPPUNWIND to 1 if and only if exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__BORLANDC__) +// C++Builder's implementation of the STL uses the _HAS_EXCEPTIONS +// macro to enable exceptions, so we'll do the same. +// Assumes that exceptions are enabled by default. +# ifndef _HAS_EXCEPTIONS +# define _HAS_EXCEPTIONS 1 +# endif // _HAS_EXCEPTIONS +# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS +# elif defined(__clang__) +// clang defines __EXCEPTIONS if and only if exceptions are enabled before clang +// 220714, but if and only if cleanups are enabled after that. In Obj-C++ files, +// there can be cleanups for ObjC exceptions which also need cleanups, even if +// C++ exceptions are disabled. clang has __has_feature(cxx_exceptions) which +// checks for C++ exceptions starting at clang r206352, but which checked for +// cleanups prior to that. To reliably check for C++ exception availability with +// clang, check for +// __EXCEPTIONS && __has_feature(cxx_exceptions). +# define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions)) +# elif defined(__GNUC__) && __EXCEPTIONS +// gcc defines __EXCEPTIONS to 1 if and only if exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__SUNPRO_CC) +// Sun Pro CC supports exceptions. However, there is no compile-time way of +// detecting whether they are enabled or not. Therefore, we assume that +// they are enabled unless the user tells us otherwise. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__IBMCPP__) && __EXCEPTIONS +// xlC defines __EXCEPTIONS to 1 if and only if exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__HP_aCC) +// Exception handling is in effect by default in HP aCC compiler. It has to +// be turned of by +noeh compiler option if desired. +# define GTEST_HAS_EXCEPTIONS 1 +# else +// For other compilers, we assume exceptions are disabled to be +// conservative. +# define GTEST_HAS_EXCEPTIONS 0 +# endif // defined(_MSC_VER) || defined(__BORLANDC__) +#endif // GTEST_HAS_EXCEPTIONS + +#ifndef GTEST_HAS_STD_WSTRING +// The user didn't tell us whether ::std::wstring is available, so we need +// to figure it out. +// Cygwin 1.7 and below doesn't support ::std::wstring. +// Solaris' libc++ doesn't support it either. Android has +// no support for it at least as recent as Froyo (2.2). +#define GTEST_HAS_STD_WSTRING \ + (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ + GTEST_OS_HAIKU || GTEST_OS_ESP32 || GTEST_OS_ESP8266)) + +#endif // GTEST_HAS_STD_WSTRING + +// Determines whether RTTI is available. +#ifndef GTEST_HAS_RTTI +// The user didn't tell us whether RTTI is enabled, so we need to +// figure it out. + +# ifdef _MSC_VER + +#ifdef _CPPRTTI // MSVC defines this macro if and only if RTTI is enabled. +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +// Starting with version 4.3.2, gcc defines __GXX_RTTI if and only if RTTI is +// enabled. +# elif defined(__GNUC__) + +# ifdef __GXX_RTTI +// When building against STLport with the Android NDK and with +// -frtti -fno-exceptions, the build fails at link time with undefined +// references to __cxa_bad_typeid. Note sure if STL or toolchain bug, +// so disable RTTI when detected. +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \ + !defined(__EXCEPTIONS) +# define GTEST_HAS_RTTI 0 +# else +# define GTEST_HAS_RTTI 1 +# endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS +# else +# define GTEST_HAS_RTTI 0 +# endif // __GXX_RTTI + +// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends +// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the +// first version with C++ support. +# elif defined(__clang__) + +# define GTEST_HAS_RTTI __has_feature(cxx_rtti) + +// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if +// both the typeid and dynamic_cast features are present. +# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) + +# ifdef __RTTI_ALL__ +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +# else + +// For all other compilers, we assume RTTI is enabled. +# define GTEST_HAS_RTTI 1 + +# endif // _MSC_VER + +#endif // GTEST_HAS_RTTI + +// It's this header's responsibility to #include when RTTI +// is enabled. +#if GTEST_HAS_RTTI +# include +#endif + +// Determines whether Google Test can use the pthreads library. +#ifndef GTEST_HAS_PTHREAD +// The user didn't tell us explicitly, so we make reasonable assumptions about +// which platforms have pthreads support. +// +// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0 +// to your compiler flags. +#define GTEST_HAS_PTHREAD \ + (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX || GTEST_OS_QNX || \ + GTEST_OS_FREEBSD || GTEST_OS_NACL || GTEST_OS_NETBSD || GTEST_OS_FUCHSIA || \ + GTEST_OS_DRAGONFLY || GTEST_OS_GNU_KFREEBSD || GTEST_OS_OPENBSD || \ + GTEST_OS_HAIKU) +#endif // GTEST_HAS_PTHREAD + +#if GTEST_HAS_PTHREAD +// gtest-port.h guarantees to #include when GTEST_HAS_PTHREAD is +// true. +# include // NOLINT + +// For timespec and nanosleep, used below. +# include // NOLINT +#endif + +// Determines whether clone(2) is supported. +// Usually it will only be available on Linux, excluding +// Linux on the Itanium architecture. +// Also see http://linux.die.net/man/2/clone. +#ifndef GTEST_HAS_CLONE +// The user didn't tell us, so we need to figure it out. + +# if GTEST_OS_LINUX && !defined(__ia64__) +# if GTEST_OS_LINUX_ANDROID +// On Android, clone() became available at different API levels for each 32-bit +// architecture. +# if defined(__LP64__) || \ + (defined(__arm__) && __ANDROID_API__ >= 9) || \ + (defined(__mips__) && __ANDROID_API__ >= 12) || \ + (defined(__i386__) && __ANDROID_API__ >= 17) +# define GTEST_HAS_CLONE 1 +# else +# define GTEST_HAS_CLONE 0 +# endif +# else +# define GTEST_HAS_CLONE 1 +# endif +# else +# define GTEST_HAS_CLONE 0 +# endif // GTEST_OS_LINUX && !defined(__ia64__) + +#endif // GTEST_HAS_CLONE + +// Determines whether to support stream redirection. This is used to test +// output correctness and to implement death tests. +#ifndef GTEST_HAS_STREAM_REDIRECTION +// By default, we assume that stream redirection is supported on all +// platforms except known mobile ones. +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || \ + GTEST_OS_WINDOWS_RT || GTEST_OS_ESP8266 +# define GTEST_HAS_STREAM_REDIRECTION 0 +# else +# define GTEST_HAS_STREAM_REDIRECTION 1 +# endif // !GTEST_OS_WINDOWS_MOBILE +#endif // GTEST_HAS_STREAM_REDIRECTION + +// Determines whether to support death tests. +// pops up a dialog window that cannot be suppressed programmatically. +#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ + (GTEST_OS_MAC && !GTEST_OS_IOS) || \ + (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER) || GTEST_OS_WINDOWS_MINGW || \ + GTEST_OS_AIX || GTEST_OS_HPUX || GTEST_OS_OPENBSD || GTEST_OS_QNX || \ + GTEST_OS_FREEBSD || GTEST_OS_NETBSD || GTEST_OS_FUCHSIA || \ + GTEST_OS_DRAGONFLY || GTEST_OS_GNU_KFREEBSD || GTEST_OS_HAIKU) +# define GTEST_HAS_DEATH_TEST 1 +#endif + +// Determines whether to support type-driven tests. + +// Typed tests need and variadic macros, which GCC, VC++ 8.0, +// Sun Pro CC, IBM Visual Age, and HP aCC support. +#if defined(__GNUC__) || defined(_MSC_VER) || defined(__SUNPRO_CC) || \ + defined(__IBMCPP__) || defined(__HP_aCC) +# define GTEST_HAS_TYPED_TEST 1 +# define GTEST_HAS_TYPED_TEST_P 1 +#endif + +// Determines whether the system compiler uses UTF-16 for encoding wide strings. +#define GTEST_WIDE_STRING_USES_UTF16_ \ + (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_AIX || GTEST_OS_OS2) + +// Determines whether test results can be streamed to a socket. +#if GTEST_OS_LINUX || GTEST_OS_GNU_KFREEBSD || GTEST_OS_DRAGONFLY || \ + GTEST_OS_FREEBSD || GTEST_OS_NETBSD || GTEST_OS_OPENBSD +# define GTEST_CAN_STREAM_RESULTS_ 1 +#endif + +// Defines some utility macros. + +// The GNU compiler emits a warning if nested "if" statements are followed by +// an "else" statement and braces are not used to explicitly disambiguate the +// "else" binding. This leads to problems with code like: +// +// if (gate) +// ASSERT_*(condition) << "Some message"; +// +// The "switch (0) case 0:" idiom is used to suppress this. +#ifdef __INTEL_COMPILER +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ +#else +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT +#endif + +// Use this annotation at the end of a struct/class definition to +// prevent the compiler from optimizing away instances that are never +// used. This is useful when all interesting logic happens inside the +// c'tor and / or d'tor. Example: +// +// struct Foo { +// Foo() { ... } +// } GTEST_ATTRIBUTE_UNUSED_; +// +// Also use it after a variable or parameter declaration to tell the +// compiler the variable/parameter does not have to be used. +#if defined(__GNUC__) && !defined(COMPILER_ICC) +# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +#elif defined(__clang__) +# if __has_attribute(unused) +# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +# endif +#endif +#ifndef GTEST_ATTRIBUTE_UNUSED_ +# define GTEST_ATTRIBUTE_UNUSED_ +#endif + +// Use this annotation before a function that takes a printf format string. +#if (defined(__GNUC__) || defined(__clang__)) && !defined(COMPILER_ICC) +# if defined(__MINGW_PRINTF_FORMAT) +// MinGW has two different printf implementations. Ensure the format macro +// matches the selected implementation. See +// https://sourceforge.net/p/mingw-w64/wiki2/gnu%20printf/. +# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) \ + __attribute__((__format__(__MINGW_PRINTF_FORMAT, string_index, \ + first_to_check))) +# else +# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +# endif +#else +# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) +#endif + + +// A macro to disallow copy operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_ASSIGN_(type) \ + type& operator=(type const &) = delete + +// A macro to disallow copy constructor and operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type) \ + type(type const &) = delete; \ + GTEST_DISALLOW_ASSIGN_(type) + +// A macro to disallow move operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_MOVE_ASSIGN_(type) \ + type& operator=(type &&) noexcept = delete + +// A macro to disallow move constructor and operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_MOVE_AND_ASSIGN_(type) \ + type(type &&) noexcept = delete; \ + GTEST_DISALLOW_MOVE_ASSIGN_(type) + +// Tell the compiler to warn about unused return values for functions declared +// with this macro. The macro should be used on function declarations +// following the argument list: +// +// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_; +#if defined(__GNUC__) && !defined(COMPILER_ICC) +# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result)) +#else +# define GTEST_MUST_USE_RESULT_ +#endif // __GNUC__ && !COMPILER_ICC + +// MS C++ compiler emits warning when a conditional expression is compile time +// constant. In some contexts this warning is false positive and needs to be +// suppressed. Use the following two macros in such cases: +// +// GTEST_INTENTIONAL_CONST_COND_PUSH_() +// while (true) { +// GTEST_INTENTIONAL_CONST_COND_POP_() +// } +# define GTEST_INTENTIONAL_CONST_COND_PUSH_() \ + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127) +# define GTEST_INTENTIONAL_CONST_COND_POP_() \ + GTEST_DISABLE_MSC_WARNINGS_POP_() + +// Determine whether the compiler supports Microsoft's Structured Exception +// Handling. This is supported by several Windows compilers but generally +// does not exist on any other system. +#ifndef GTEST_HAS_SEH +// The user didn't tell us, so we need to figure it out. + +# if defined(_MSC_VER) || defined(__BORLANDC__) +// These two compilers are known to support SEH. +# define GTEST_HAS_SEH 1 +# else +// Assume no SEH. +# define GTEST_HAS_SEH 0 +# endif + +#endif // GTEST_HAS_SEH + +#ifndef GTEST_IS_THREADSAFE + +#define GTEST_IS_THREADSAFE \ + (GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ || \ + (GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT) || \ + GTEST_HAS_PTHREAD) + +#endif // GTEST_IS_THREADSAFE + +// GTEST_API_ qualifies all symbols that must be exported. The definitions below +// are guarded by #ifndef to give embedders a chance to define GTEST_API_ in +// gtest/internal/custom/gtest-port.h +#ifndef GTEST_API_ + +#ifdef _MSC_VER +# if GTEST_LINKED_AS_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllimport) +# elif GTEST_CREATE_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllexport) +# endif +#elif __GNUC__ >= 4 || defined(__clang__) +# define GTEST_API_ __attribute__((visibility ("default"))) +#endif // _MSC_VER + +#endif // GTEST_API_ + +#ifndef GTEST_API_ +# define GTEST_API_ +#endif // GTEST_API_ + +#ifndef GTEST_DEFAULT_DEATH_TEST_STYLE +# define GTEST_DEFAULT_DEATH_TEST_STYLE "fast" +#endif // GTEST_DEFAULT_DEATH_TEST_STYLE + +#ifdef __GNUC__ +// Ask the compiler to never inline a given function. +# define GTEST_NO_INLINE_ __attribute__((noinline)) +#else +# define GTEST_NO_INLINE_ +#endif + +// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project. +#if !defined(GTEST_HAS_CXXABI_H_) +# if defined(__GLIBCXX__) || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) +# define GTEST_HAS_CXXABI_H_ 1 +# else +# define GTEST_HAS_CXXABI_H_ 0 +# endif +#endif + +// A function level attribute to disable checking for use of uninitialized +// memory when built with MemorySanitizer. +#if defined(__clang__) +# if __has_feature(memory_sanitizer) +# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ \ + __attribute__((no_sanitize_memory)) +# else +# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +# endif // __has_feature(memory_sanitizer) +#else +# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +#endif // __clang__ + +// A function level attribute to disable AddressSanitizer instrumentation. +#if defined(__clang__) +# if __has_feature(address_sanitizer) +# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \ + __attribute__((no_sanitize_address)) +# else +# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +# endif // __has_feature(address_sanitizer) +#else +# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +#endif // __clang__ + +// A function level attribute to disable HWAddressSanitizer instrumentation. +#if defined(__clang__) +# if __has_feature(hwaddress_sanitizer) +# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ \ + __attribute__((no_sanitize("hwaddress"))) +# else +# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +# endif // __has_feature(hwaddress_sanitizer) +#else +# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +#endif // __clang__ + +// A function level attribute to disable ThreadSanitizer instrumentation. +#if defined(__clang__) +# if __has_feature(thread_sanitizer) +# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ \ + __attribute__((no_sanitize_thread)) +# else +# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +# endif // __has_feature(thread_sanitizer) +#else +# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +#endif // __clang__ + +namespace testing { + +class Message; + +// Legacy imports for backwards compatibility. +// New code should use std:: names directly. +using std::get; +using std::make_tuple; +using std::tuple; +using std::tuple_element; +using std::tuple_size; + +namespace internal { + +// A secret type that Google Test users don't know about. It has no +// definition on purpose. Therefore it's impossible to create a +// Secret object, which is what we want. +class Secret; + +// The GTEST_COMPILE_ASSERT_ is a legacy macro used to verify that a compile +// time expression is true (in new code, use static_assert instead). For +// example, you could use it to verify the size of a static array: +// +// GTEST_COMPILE_ASSERT_(GTEST_ARRAY_SIZE_(names) == NUM_NAMES, +// names_incorrect_size); +// +// The second argument to the macro must be a valid C++ identifier. If the +// expression is false, compiler will issue an error containing this identifier. +#define GTEST_COMPILE_ASSERT_(expr, msg) static_assert(expr, #msg) + +// A helper for suppressing warnings on constant condition. It just +// returns 'condition'. +GTEST_API_ bool IsTrue(bool condition); + +// Defines RE. + +#if GTEST_USES_PCRE +// if used, PCRE is injected by custom/gtest-port.h +#elif GTEST_USES_POSIX_RE || GTEST_USES_SIMPLE_RE + +// A simple C++ wrapper for . It uses the POSIX Extended +// Regular Expression syntax. +class GTEST_API_ RE { + public: + // A copy constructor is required by the Standard to initialize object + // references from r-values. + RE(const RE& other) { Init(other.pattern()); } + + // Constructs an RE from a string. + RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT + + RE(const char* regex) { Init(regex); } // NOLINT + ~RE(); + + // Returns the string representation of the regex. + const char* pattern() const { return pattern_; } + + // FullMatch(str, re) returns true if and only if regular expression re + // matches the entire str. + // PartialMatch(str, re) returns true if and only if regular expression re + // matches a substring of str (including str itself). + static bool FullMatch(const ::std::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::std::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + + static bool FullMatch(const char* str, const RE& re); + static bool PartialMatch(const char* str, const RE& re); + + private: + void Init(const char* regex); + const char* pattern_; + bool is_valid_; + +# if GTEST_USES_POSIX_RE + + regex_t full_regex_; // For FullMatch(). + regex_t partial_regex_; // For PartialMatch(). + +# else // GTEST_USES_SIMPLE_RE + + const char* full_pattern_; // For FullMatch(); + +# endif + + GTEST_DISALLOW_ASSIGN_(RE); +}; + +#endif // GTEST_USES_PCRE + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line); + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file, + int line); + +// Defines logging utilities: +// GTEST_LOG_(severity) - logs messages at the specified severity level. The +// message itself is streamed into the macro. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. + +enum GTestLogSeverity { + GTEST_INFO, + GTEST_WARNING, + GTEST_ERROR, + GTEST_FATAL +}; + +// Formats log entry severity, provides a stream object for streaming the +// log message, and terminates the message with a newline when going out of +// scope. +class GTEST_API_ GTestLog { + public: + GTestLog(GTestLogSeverity severity, const char* file, int line); + + // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. + ~GTestLog(); + + ::std::ostream& GetStream() { return ::std::cerr; } + + private: + const GTestLogSeverity severity_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog); +}; + +#if !defined(GTEST_LOG_) + +# define GTEST_LOG_(severity) \ + ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \ + __FILE__, __LINE__).GetStream() + +inline void LogToStderr() {} +inline void FlushInfoLog() { fflush(nullptr); } + +#endif // !defined(GTEST_LOG_) + +#if !defined(GTEST_CHECK_) +// INTERNAL IMPLEMENTATION - DO NOT USE. +// +// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition +// is not satisfied. +// Synopsys: +// GTEST_CHECK_(boolean_condition); +// or +// GTEST_CHECK_(boolean_condition) << "Additional message"; +// +// This checks the condition and if the condition is not satisfied +// it prints message about the condition violation, including the +// condition itself, plus additional message streamed into it, if any, +// and then it aborts the program. It aborts the program irrespective of +// whether it is built in the debug mode or not. +# define GTEST_CHECK_(condition) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::IsTrue(condition)) \ + ; \ + else \ + GTEST_LOG_(FATAL) << "Condition " #condition " failed. " +#endif // !defined(GTEST_CHECK_) + +// An all-mode assert to verify that the given POSIX-style function +// call returns 0 (indicating success). Known limitation: this +// doesn't expand to a balanced 'if' statement, so enclose the macro +// in {} if you need to use it as the only statement in an 'if' +// branch. +#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \ + if (const int gtest_error = (posix_call)) \ + GTEST_LOG_(FATAL) << #posix_call << "failed with error " \ + << gtest_error + +// Transforms "T" into "const T&" according to standard reference collapsing +// rules (this is only needed as a backport for C++98 compilers that do not +// support reference collapsing). Specifically, it transforms: +// +// char ==> const char& +// const char ==> const char& +// char& ==> char& +// const char& ==> const char& +// +// Note that the non-const reference will not have "const" added. This is +// standard, and necessary so that "T" can always bind to "const T&". +template +struct ConstRef { typedef const T& type; }; +template +struct ConstRef { typedef T& type; }; + +// The argument T must depend on some template parameters. +#define GTEST_REFERENCE_TO_CONST_(T) \ + typename ::testing::internal::ConstRef::type + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Use ImplicitCast_ as a safe version of static_cast for upcasting in +// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a +// const Foo*). When you use ImplicitCast_, the compiler checks that +// the cast is safe. Such explicit ImplicitCast_s are necessary in +// surprisingly many situations where C++ demands an exact type match +// instead of an argument type convertable to a target type. +// +// The syntax for using ImplicitCast_ is the same as for static_cast: +// +// ImplicitCast_(expr) +// +// ImplicitCast_ would have been part of the C++ standard library, +// but the proposal was submitted too late. It will probably make +// its way into the language in the future. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., implicit_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template +inline To ImplicitCast_(To x) { return x; } + +// When you upcast (that is, cast a pointer from type Foo to type +// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts +// always succeed. When you downcast (that is, cast a pointer from +// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because +// how do you know the pointer is really of type SubclassOfFoo? It +// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus, +// when you downcast, you should use this macro. In debug mode, we +// use dynamic_cast<> to double-check the downcast is legal (we die +// if it's not). In normal mode, we do the efficient static_cast<> +// instead. Thus, it's important to test in debug mode to make sure +// the cast is legal! +// This is the only place in the code we should use dynamic_cast<>. +// In particular, you SHOULDN'T be using dynamic_cast<> in order to +// do RTTI (eg code like this: +// if (dynamic_cast(foo)) HandleASubclass1Object(foo); +// if (dynamic_cast(foo)) HandleASubclass2Object(foo); +// You should design the code some other way not to need this. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., down_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template // use like this: DownCast_(foo); +inline To DownCast_(From* f) { // so we only accept pointers + // Ensures that To is a sub-type of From *. This test is here only + // for compile-time type checking, and has no overhead in an + // optimized build at run-time, as it will be optimized away + // completely. + GTEST_INTENTIONAL_CONST_COND_PUSH_() + if (false) { + GTEST_INTENTIONAL_CONST_COND_POP_() + const To to = nullptr; + ::testing::internal::ImplicitCast_(to); + } + +#if GTEST_HAS_RTTI + // RTTI: debug mode only! + GTEST_CHECK_(f == nullptr || dynamic_cast(f) != nullptr); +#endif + return static_cast(f); +} + +// Downcasts the pointer of type Base to Derived. +// Derived must be a subclass of Base. The parameter MUST +// point to a class of type Derived, not any subclass of it. +// When RTTI is available, the function performs a runtime +// check to enforce this. +template +Derived* CheckedDowncastToActualType(Base* base) { +#if GTEST_HAS_RTTI + GTEST_CHECK_(typeid(*base) == typeid(Derived)); +#endif + +#if GTEST_HAS_DOWNCAST_ + return ::down_cast(base); +#elif GTEST_HAS_RTTI + return dynamic_cast(base); // NOLINT +#else + return static_cast(base); // Poor man's downcast. +#endif +} + +#if GTEST_HAS_STREAM_REDIRECTION + +// Defines the stderr capturer: +// CaptureStdout - starts capturing stdout. +// GetCapturedStdout - stops capturing stdout and returns the captured string. +// CaptureStderr - starts capturing stderr. +// GetCapturedStderr - stops capturing stderr and returns the captured string. +// +GTEST_API_ void CaptureStdout(); +GTEST_API_ std::string GetCapturedStdout(); +GTEST_API_ void CaptureStderr(); +GTEST_API_ std::string GetCapturedStderr(); + +#endif // GTEST_HAS_STREAM_REDIRECTION +// Returns the size (in bytes) of a file. +GTEST_API_ size_t GetFileSize(FILE* file); + +// Reads the entire content of a file as a string. +GTEST_API_ std::string ReadEntireFile(FILE* file); + +// All command line arguments. +GTEST_API_ std::vector GetArgvs(); + +#if GTEST_HAS_DEATH_TEST + +std::vector GetInjectableArgvs(); +// Deprecated: pass the args vector by value instead. +void SetInjectableArgvs(const std::vector* new_argvs); +void SetInjectableArgvs(const std::vector& new_argvs); +void ClearInjectableArgvs(); + +#endif // GTEST_HAS_DEATH_TEST + +// Defines synchronization primitives. +#if GTEST_IS_THREADSAFE +# if GTEST_HAS_PTHREAD +// Sleeps for (roughly) n milliseconds. This function is only for testing +// Google Test's own constructs. Don't use it in user tests, either +// directly or indirectly. +inline void SleepMilliseconds(int n) { + const timespec time = { + 0, // 0 seconds. + n * 1000L * 1000L, // And n ms. + }; + nanosleep(&time, nullptr); +} +# endif // GTEST_HAS_PTHREAD + +# if GTEST_HAS_NOTIFICATION_ +// Notification has already been imported into the namespace. +// Nothing to do here. + +# elif GTEST_HAS_PTHREAD +// Allows a controller thread to pause execution of newly created +// threads until notified. Instances of this class must be created +// and destroyed in the controller thread. +// +// This class is only for testing Google Test's own constructs. Do not +// use it in user tests, either directly or indirectly. +class Notification { + public: + Notification() : notified_(false) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, nullptr)); + } + ~Notification() { + pthread_mutex_destroy(&mutex_); + } + + // Notifies all threads created with this notification to start. Must + // be called from the controller thread. + void Notify() { + pthread_mutex_lock(&mutex_); + notified_ = true; + pthread_mutex_unlock(&mutex_); + } + + // Blocks until the controller thread notifies. Must be called from a test + // thread. + void WaitForNotification() { + for (;;) { + pthread_mutex_lock(&mutex_); + const bool notified = notified_; + pthread_mutex_unlock(&mutex_); + if (notified) + break; + SleepMilliseconds(10); + } + } + + private: + pthread_mutex_t mutex_; + bool notified_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); +}; + +# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + +GTEST_API_ void SleepMilliseconds(int n); + +// Provides leak-safe Windows kernel handle ownership. +// Used in death tests and in threading support. +class GTEST_API_ AutoHandle { + public: + // Assume that Win32 HANDLE type is equivalent to void*. Doing so allows us to + // avoid including in this header file. Including is + // undesirable because it defines a lot of symbols and macros that tend to + // conflict with client code. This assumption is verified by + // WindowsTypesTest.HANDLEIsVoidStar. + typedef void* Handle; + AutoHandle(); + explicit AutoHandle(Handle handle); + + ~AutoHandle(); + + Handle Get() const; + void Reset(); + void Reset(Handle handle); + + private: + // Returns true if and only if the handle is a valid handle object that can be + // closed. + bool IsCloseable() const; + + Handle handle_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle); +}; + +// Allows a controller thread to pause execution of newly created +// threads until notified. Instances of this class must be created +// and destroyed in the controller thread. +// +// This class is only for testing Google Test's own constructs. Do not +// use it in user tests, either directly or indirectly. +class GTEST_API_ Notification { + public: + Notification(); + void Notify(); + void WaitForNotification(); + + private: + AutoHandle event_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); +}; +# endif // GTEST_HAS_NOTIFICATION_ + +// On MinGW, we can have both GTEST_OS_WINDOWS and GTEST_HAS_PTHREAD +// defined, but we don't want to use MinGW's pthreads implementation, which +// has conformance problems with some versions of the POSIX standard. +# if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW + +// As a C-function, ThreadFuncWithCLinkage cannot be templated itself. +// Consequently, it cannot select a correct instantiation of ThreadWithParam +// in order to call its Run(). Introducing ThreadWithParamBase as a +// non-templated base class for ThreadWithParam allows us to bypass this +// problem. +class ThreadWithParamBase { + public: + virtual ~ThreadWithParamBase() {} + virtual void Run() = 0; +}; + +// pthread_create() accepts a pointer to a function type with the C linkage. +// According to the Standard (7.5/1), function types with different linkages +// are different even if they are otherwise identical. Some compilers (for +// example, SunStudio) treat them as different types. Since class methods +// cannot be defined with C-linkage we need to define a free C-function to +// pass into pthread_create(). +extern "C" inline void* ThreadFuncWithCLinkage(void* thread) { + static_cast(thread)->Run(); + return nullptr; +} + +// Helper class for testing Google Test's multi-threading constructs. +// To use it, write: +// +// void ThreadFunc(int param) { /* Do things with param */ } +// Notification thread_can_start; +// ... +// // The thread_can_start parameter is optional; you can supply NULL. +// ThreadWithParam thread(&ThreadFunc, 5, &thread_can_start); +// thread_can_start.Notify(); +// +// These classes are only for testing Google Test's own constructs. Do +// not use them in user tests, either directly or indirectly. +template +class ThreadWithParam : public ThreadWithParamBase { + public: + typedef void UserThreadFunc(T); + + ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start) + : func_(func), + param_(param), + thread_can_start_(thread_can_start), + finished_(false) { + ThreadWithParamBase* const base = this; + // The thread can be created only after all fields except thread_ + // have been initialized. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_create(&thread_, nullptr, &ThreadFuncWithCLinkage, base)); + } + ~ThreadWithParam() override { Join(); } + + void Join() { + if (!finished_) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, nullptr)); + finished_ = true; + } + } + + void Run() override { + if (thread_can_start_ != nullptr) thread_can_start_->WaitForNotification(); + func_(param_); + } + + private: + UserThreadFunc* const func_; // User-supplied thread function. + const T param_; // User-supplied parameter to the thread function. + // When non-NULL, used to block execution until the controller thread + // notifies. + Notification* const thread_can_start_; + bool finished_; // true if and only if we know that the thread function has + // finished. + pthread_t thread_; // The native thread object. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); +}; +# endif // !GTEST_OS_WINDOWS && GTEST_HAS_PTHREAD || + // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ + +# if GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ +// Mutex and ThreadLocal have already been imported into the namespace. +// Nothing to do here. + +# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + +// Mutex implements mutex on Windows platforms. It is used in conjunction +// with class MutexLock: +// +// Mutex mutex; +// ... +// MutexLock lock(&mutex); // Acquires the mutex and releases it at the +// // end of the current scope. +// +// A static Mutex *must* be defined or declared using one of the following +// macros: +// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex); +// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex); +// +// (A non-static Mutex is defined/declared in the usual way). +class GTEST_API_ Mutex { + public: + enum MutexType { kStatic = 0, kDynamic = 1 }; + // We rely on kStaticMutex being 0 as it is to what the linker initializes + // type_ in static mutexes. critical_section_ will be initialized lazily + // in ThreadSafeLazyInit(). + enum StaticConstructorSelector { kStaticMutex = 0 }; + + // This constructor intentionally does nothing. It relies on type_ being + // statically initialized to 0 (effectively setting it to kStatic) and on + // ThreadSafeLazyInit() to lazily initialize the rest of the members. + explicit Mutex(StaticConstructorSelector /*dummy*/) {} + + Mutex(); + ~Mutex(); + + void Lock(); + + void Unlock(); + + // Does nothing if the current thread holds the mutex. Otherwise, crashes + // with high probability. + void AssertHeld(); + + private: + // Initializes owner_thread_id_ and critical_section_ in static mutexes. + void ThreadSafeLazyInit(); + + // Per https://blogs.msdn.microsoft.com/oldnewthing/20040223-00/?p=40503, + // we assume that 0 is an invalid value for thread IDs. + unsigned int owner_thread_id_; + + // For static mutexes, we rely on these members being initialized to zeros + // by the linker. + MutexType type_; + long critical_section_init_phase_; // NOLINT + GTEST_CRITICAL_SECTION* critical_section_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); +}; + +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex + +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex) + +// We cannot name this class MutexLock because the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. That macro is used as a defensive measure to prevent against +// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than +// "MutexLock l(&mu)". Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(Mutex* mutex) + : mutex_(mutex) { mutex_->Lock(); } + + ~GTestMutexLock() { mutex_->Unlock(); } + + private: + Mutex* const mutex_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); +}; + +typedef GTestMutexLock MutexLock; + +// Base class for ValueHolder. Allows a caller to hold and delete a value +// without knowing its type. +class ThreadLocalValueHolderBase { + public: + virtual ~ThreadLocalValueHolderBase() {} +}; + +// Provides a way for a thread to send notifications to a ThreadLocal +// regardless of its parameter type. +class ThreadLocalBase { + public: + // Creates a new ValueHolder object holding a default value passed to + // this ThreadLocal's constructor and returns it. It is the caller's + // responsibility not to call this when the ThreadLocal instance already + // has a value on the current thread. + virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const = 0; + + protected: + ThreadLocalBase() {} + virtual ~ThreadLocalBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocalBase); +}; + +// Maps a thread to a set of ThreadLocals that have values instantiated on that +// thread and notifies them when the thread exits. A ThreadLocal instance is +// expected to persist until all threads it has values on have terminated. +class GTEST_API_ ThreadLocalRegistry { + public: + // Registers thread_local_instance as having value on the current thread. + // Returns a value that can be used to identify the thread from other threads. + static ThreadLocalValueHolderBase* GetValueOnCurrentThread( + const ThreadLocalBase* thread_local_instance); + + // Invoked when a ThreadLocal instance is destroyed. + static void OnThreadLocalDestroyed( + const ThreadLocalBase* thread_local_instance); +}; + +class GTEST_API_ ThreadWithParamBase { + public: + void Join(); + + protected: + class Runnable { + public: + virtual ~Runnable() {} + virtual void Run() = 0; + }; + + ThreadWithParamBase(Runnable *runnable, Notification* thread_can_start); + virtual ~ThreadWithParamBase(); + + private: + AutoHandle thread_; +}; + +// Helper class for testing Google Test's multi-threading constructs. +template +class ThreadWithParam : public ThreadWithParamBase { + public: + typedef void UserThreadFunc(T); + + ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start) + : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) { + } + virtual ~ThreadWithParam() {} + + private: + class RunnableImpl : public Runnable { + public: + RunnableImpl(UserThreadFunc* func, T param) + : func_(func), + param_(param) { + } + virtual ~RunnableImpl() {} + virtual void Run() { + func_(param_); + } + + private: + UserThreadFunc* const func_; + const T param_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(RunnableImpl); + }; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); +}; + +// Implements thread-local storage on Windows systems. +// +// // Thread 1 +// ThreadLocal tl(100); // 100 is the default value for each thread. +// +// // Thread 2 +// tl.set(150); // Changes the value for thread 2 only. +// EXPECT_EQ(150, tl.get()); +// +// // Thread 1 +// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value. +// tl.set(200); +// EXPECT_EQ(200, tl.get()); +// +// The template type argument T must have a public copy constructor. +// In addition, the default ThreadLocal constructor requires T to have +// a public default constructor. +// +// The users of a TheadLocal instance have to make sure that all but one +// threads (including the main one) using that instance have exited before +// destroying it. Otherwise, the per-thread objects managed for them by the +// ThreadLocal instance are not guaranteed to be destroyed on all platforms. +// +// Google Test only uses global ThreadLocal objects. That means they +// will die after main() has returned. Therefore, no per-thread +// object managed by Google Test will be leaked as long as all threads +// using Google Test have exited when main() returns. +template +class ThreadLocal : public ThreadLocalBase { + public: + ThreadLocal() : default_factory_(new DefaultValueHolderFactory()) {} + explicit ThreadLocal(const T& value) + : default_factory_(new InstanceValueHolderFactory(value)) {} + + ~ThreadLocal() { ThreadLocalRegistry::OnThreadLocalDestroyed(this); } + + T* pointer() { return GetOrCreateValue(); } + const T* pointer() const { return GetOrCreateValue(); } + const T& get() const { return *pointer(); } + void set(const T& value) { *pointer() = value; } + + private: + // Holds a value of T. Can be deleted via its base class without the caller + // knowing the type of T. + class ValueHolder : public ThreadLocalValueHolderBase { + public: + ValueHolder() : value_() {} + explicit ValueHolder(const T& value) : value_(value) {} + + T* pointer() { return &value_; } + + private: + T value_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + }; + + + T* GetOrCreateValue() const { + return static_cast( + ThreadLocalRegistry::GetValueOnCurrentThread(this))->pointer(); + } + + virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const { + return default_factory_->MakeNewHolder(); + } + + class ValueHolderFactory { + public: + ValueHolderFactory() {} + virtual ~ValueHolderFactory() {} + virtual ValueHolder* MakeNewHolder() const = 0; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory); + }; + + class DefaultValueHolderFactory : public ValueHolderFactory { + public: + DefaultValueHolderFactory() {} + ValueHolder* MakeNewHolder() const override { return new ValueHolder(); } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory); + }; + + class InstanceValueHolderFactory : public ValueHolderFactory { + public: + explicit InstanceValueHolderFactory(const T& value) : value_(value) {} + ValueHolder* MakeNewHolder() const override { + return new ValueHolder(value_); + } + + private: + const T value_; // The value for each thread. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory); + }; + + std::unique_ptr default_factory_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); +}; + +# elif GTEST_HAS_PTHREAD + +// MutexBase and Mutex implement mutex on pthreads-based platforms. +class MutexBase { + public: + // Acquires this mutex. + void Lock() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_)); + owner_ = pthread_self(); + has_owner_ = true; + } + + // Releases this mutex. + void Unlock() { + // Since the lock is being released the owner_ field should no longer be + // considered valid. We don't protect writing to has_owner_ here, as it's + // the caller's responsibility to ensure that the current thread holds the + // mutex when this is called. + has_owner_ = false; + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_)); + } + + // Does nothing if the current thread holds the mutex. Otherwise, crashes + // with high probability. + void AssertHeld() const { + GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self())) + << "The current thread is not holding the mutex @" << this; + } + + // A static mutex may be used before main() is entered. It may even + // be used before the dynamic initialization stage. Therefore we + // must be able to initialize a static mutex object at link time. + // This means MutexBase has to be a POD and its member variables + // have to be public. + public: + pthread_mutex_t mutex_; // The underlying pthread mutex. + // has_owner_ indicates whether the owner_ field below contains a valid thread + // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All + // accesses to the owner_ field should be protected by a check of this field. + // An alternative might be to memset() owner_ to all zeros, but there's no + // guarantee that a zero'd pthread_t is necessarily invalid or even different + // from pthread_self(). + bool has_owner_; + pthread_t owner_; // The thread holding the mutex. +}; + +// Forward-declares a static mutex. +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::MutexBase mutex + +// Defines and statically (i.e. at link time) initializes a static mutex. +// The initialization list here does not explicitly initialize each field, +// instead relying on default initialization for the unspecified fields. In +// particular, the owner_ field (a pthread_t) is not explicitly initialized. +// This allows initialization to work whether pthread_t is a scalar or struct. +// The flag -Wmissing-field-initializers must not be specified for this to work. +#define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::MutexBase mutex = {PTHREAD_MUTEX_INITIALIZER, false, 0} + +// The Mutex class can only be used for mutexes created at runtime. It +// shares its API with MutexBase otherwise. +class Mutex : public MutexBase { + public: + Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, nullptr)); + has_owner_ = false; + } + ~Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); +}; + +// We cannot name this class MutexLock because the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. That macro is used as a defensive measure to prevent against +// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than +// "MutexLock l(&mu)". Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(MutexBase* mutex) + : mutex_(mutex) { mutex_->Lock(); } + + ~GTestMutexLock() { mutex_->Unlock(); } + + private: + MutexBase* const mutex_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); +}; + +typedef GTestMutexLock MutexLock; + +// Helpers for ThreadLocal. + +// pthread_key_create() requires DeleteThreadLocalValue() to have +// C-linkage. Therefore it cannot be templatized to access +// ThreadLocal. Hence the need for class +// ThreadLocalValueHolderBase. +class ThreadLocalValueHolderBase { + public: + virtual ~ThreadLocalValueHolderBase() {} +}; + +// Called by pthread to delete thread-local data stored by +// pthread_setspecific(). +extern "C" inline void DeleteThreadLocalValue(void* value_holder) { + delete static_cast(value_holder); +} + +// Implements thread-local storage on pthreads-based systems. +template +class GTEST_API_ ThreadLocal { + public: + ThreadLocal() + : key_(CreateKey()), default_factory_(new DefaultValueHolderFactory()) {} + explicit ThreadLocal(const T& value) + : key_(CreateKey()), + default_factory_(new InstanceValueHolderFactory(value)) {} + + ~ThreadLocal() { + // Destroys the managed object for the current thread, if any. + DeleteThreadLocalValue(pthread_getspecific(key_)); + + // Releases resources associated with the key. This will *not* + // delete managed objects for other threads. + GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_)); + } + + T* pointer() { return GetOrCreateValue(); } + const T* pointer() const { return GetOrCreateValue(); } + const T& get() const { return *pointer(); } + void set(const T& value) { *pointer() = value; } + + private: + // Holds a value of type T. + class ValueHolder : public ThreadLocalValueHolderBase { + public: + ValueHolder() : value_() {} + explicit ValueHolder(const T& value) : value_(value) {} + + T* pointer() { return &value_; } + + private: + T value_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + }; + + static pthread_key_t CreateKey() { + pthread_key_t key; + // When a thread exits, DeleteThreadLocalValue() will be called on + // the object managed for that thread. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_key_create(&key, &DeleteThreadLocalValue)); + return key; + } + + T* GetOrCreateValue() const { + ThreadLocalValueHolderBase* const holder = + static_cast(pthread_getspecific(key_)); + if (holder != nullptr) { + return CheckedDowncastToActualType(holder)->pointer(); + } + + ValueHolder* const new_holder = default_factory_->MakeNewHolder(); + ThreadLocalValueHolderBase* const holder_base = new_holder; + GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base)); + return new_holder->pointer(); + } + + class ValueHolderFactory { + public: + ValueHolderFactory() {} + virtual ~ValueHolderFactory() {} + virtual ValueHolder* MakeNewHolder() const = 0; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory); + }; + + class DefaultValueHolderFactory : public ValueHolderFactory { + public: + DefaultValueHolderFactory() {} + ValueHolder* MakeNewHolder() const override { return new ValueHolder(); } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory); + }; + + class InstanceValueHolderFactory : public ValueHolderFactory { + public: + explicit InstanceValueHolderFactory(const T& value) : value_(value) {} + ValueHolder* MakeNewHolder() const override { + return new ValueHolder(value_); + } + + private: + const T value_; // The value for each thread. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory); + }; + + // A key pthreads uses for looking up per-thread values. + const pthread_key_t key_; + std::unique_ptr default_factory_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); +}; + +# endif // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ + +#else // GTEST_IS_THREADSAFE + +// A dummy implementation of synchronization primitives (mutex, lock, +// and thread-local variable). Necessary for compiling Google Test where +// mutex is not supported - using Google Test in multiple threads is not +// supported on such platforms. + +class Mutex { + public: + Mutex() {} + void Lock() {} + void Unlock() {} + void AssertHeld() const {} +}; + +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex + +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex + +// We cannot name this class MutexLock because the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. That macro is used as a defensive measure to prevent against +// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than +// "MutexLock l(&mu)". Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(Mutex*) {} // NOLINT +}; + +typedef GTestMutexLock MutexLock; + +template +class GTEST_API_ ThreadLocal { + public: + ThreadLocal() : value_() {} + explicit ThreadLocal(const T& value) : value_(value) {} + T* pointer() { return &value_; } + const T* pointer() const { return &value_; } + const T& get() const { return value_; } + void set(const T& value) { value_ = value; } + private: + T value_; +}; + +#endif // GTEST_IS_THREADSAFE + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +GTEST_API_ size_t GetThreadCount(); + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_SEP_ "\\" +# define GTEST_HAS_ALT_PATH_SEP_ 1 +#else +# define GTEST_PATH_SEP_ "/" +# define GTEST_HAS_ALT_PATH_SEP_ 0 +#endif // GTEST_OS_WINDOWS + +// Utilities for char. + +// isspace(int ch) and friends accept an unsigned char or EOF. char +// may be signed, depending on the compiler (or compiler flags). +// Therefore we need to cast a char to unsigned char before calling +// isspace(), etc. + +inline bool IsAlpha(char ch) { + return isalpha(static_cast(ch)) != 0; +} +inline bool IsAlNum(char ch) { + return isalnum(static_cast(ch)) != 0; +} +inline bool IsDigit(char ch) { + return isdigit(static_cast(ch)) != 0; +} +inline bool IsLower(char ch) { + return islower(static_cast(ch)) != 0; +} +inline bool IsSpace(char ch) { + return isspace(static_cast(ch)) != 0; +} +inline bool IsUpper(char ch) { + return isupper(static_cast(ch)) != 0; +} +inline bool IsXDigit(char ch) { + return isxdigit(static_cast(ch)) != 0; +} +inline bool IsXDigit(wchar_t ch) { + const unsigned char low_byte = static_cast(ch); + return ch == low_byte && isxdigit(low_byte) != 0; +} + +inline char ToLower(char ch) { + return static_cast(tolower(static_cast(ch))); +} +inline char ToUpper(char ch) { + return static_cast(toupper(static_cast(ch))); +} + +inline std::string StripTrailingSpaces(std::string str) { + std::string::iterator it = str.end(); + while (it != str.begin() && IsSpace(*--it)) + it = str.erase(it); + return str; +} + +// The testing::internal::posix namespace holds wrappers for common +// POSIX functions. These wrappers hide the differences between +// Windows/MSVC and POSIX systems. Since some compilers define these +// standard functions as macros, the wrapper cannot have the same name +// as the wrapped function. + +namespace posix { + +// Functions with a different name on Windows. + +#if GTEST_OS_WINDOWS + +typedef struct _stat StatStruct; + +# ifdef __BORLANDC__ +inline int IsATTY(int fd) { return isatty(fd); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +# else // !__BORLANDC__ +# if GTEST_OS_WINDOWS_MOBILE +inline int IsATTY(int /* fd */) { return 0; } +# else +inline int IsATTY(int fd) { return _isatty(fd); } +# endif // GTEST_OS_WINDOWS_MOBILE +inline int StrCaseCmp(const char* s1, const char* s2) { + return _stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return _strdup(src); } +# endif // __BORLANDC__ + +# if GTEST_OS_WINDOWS_MOBILE +inline int FileNo(FILE* file) { return reinterpret_cast(_fileno(file)); } +// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this +// time and thus not defined there. +# else +inline int FileNo(FILE* file) { return _fileno(file); } +inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); } +inline int RmDir(const char* dir) { return _rmdir(dir); } +inline bool IsDir(const StatStruct& st) { + return (_S_IFDIR & st.st_mode) != 0; +} +# endif // GTEST_OS_WINDOWS_MOBILE + +#elif GTEST_OS_ESP8266 +typedef struct stat StatStruct; + +inline int FileNo(FILE* file) { return fileno(file); } +inline int IsATTY(int fd) { return isatty(fd); } +inline int Stat(const char* path, StatStruct* buf) { + // stat function not implemented on ESP8266 + return 0; +} +inline int StrCaseCmp(const char* s1, const char* s2) { + return strcasecmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +inline int RmDir(const char* dir) { return rmdir(dir); } +inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); } + +#else + +typedef struct stat StatStruct; + +inline int FileNo(FILE* file) { return fileno(file); } +inline int IsATTY(int fd) { return isatty(fd); } +inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return strcasecmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +inline int RmDir(const char* dir) { return rmdir(dir); } +inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); } + +#endif // GTEST_OS_WINDOWS + +// Functions deprecated by MSVC 8.0. + +GTEST_DISABLE_MSC_DEPRECATED_PUSH_() + +// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and +// StrError() aren't needed on Windows CE at this time and thus not +// defined there. + +#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT +inline int ChDir(const char* dir) { return chdir(dir); } +#endif +inline FILE* FOpen(const char* path, const char* mode) { + return fopen(path, mode); +} +#if !GTEST_OS_WINDOWS_MOBILE +inline FILE *FReopen(const char* path, const char* mode, FILE* stream) { + return freopen(path, mode, stream); +} +inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); } +#endif +inline int FClose(FILE* fp) { return fclose(fp); } +#if !GTEST_OS_WINDOWS_MOBILE +inline int Read(int fd, void* buf, unsigned int count) { + return static_cast(read(fd, buf, count)); +} +inline int Write(int fd, const void* buf, unsigned int count) { + return static_cast(write(fd, buf, count)); +} +inline int Close(int fd) { return close(fd); } +inline const char* StrError(int errnum) { return strerror(errnum); } +#endif +inline const char* GetEnv(const char* name) { +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || \ + GTEST_OS_WINDOWS_RT || GTEST_OS_ESP8266 + // We are on an embedded platform, which has no environment variables. + static_cast(name); // To prevent 'unused argument' warning. + return nullptr; +#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9) + // Environment variables which we programmatically clear will be set to the + // empty string rather than unset (NULL). Handle that case. + const char* const env = getenv(name); + return (env != nullptr && env[0] != '\0') ? env : nullptr; +#else + return getenv(name); +#endif +} + +GTEST_DISABLE_MSC_DEPRECATED_POP_() + +#if GTEST_OS_WINDOWS_MOBILE +// Windows CE has no C library. The abort() function is used in +// several places in Google Test. This implementation provides a reasonable +// imitation of standard behaviour. +[[noreturn]] void Abort(); +#else +[[noreturn]] inline void Abort() { abort(); } +#endif // GTEST_OS_WINDOWS_MOBILE + +} // namespace posix + +// MSVC "deprecates" snprintf and issues warnings wherever it is used. In +// order to avoid these warnings, we need to use _snprintf or _snprintf_s on +// MSVC-based platforms. We map the GTEST_SNPRINTF_ macro to the appropriate +// function in order to achieve that. We use macro definition here because +// snprintf is a variadic function. +#if _MSC_VER && !GTEST_OS_WINDOWS_MOBILE +// MSVC 2005 and above support variadic macros. +# define GTEST_SNPRINTF_(buffer, size, format, ...) \ + _snprintf_s(buffer, size, size, format, __VA_ARGS__) +#elif defined(_MSC_VER) +// Windows CE does not define _snprintf_s +# define GTEST_SNPRINTF_ _snprintf +#else +# define GTEST_SNPRINTF_ snprintf +#endif + +// The biggest signed integer type the compiler supports. +// +// long long is guaranteed to be at least 64-bits in C++11. +using BiggestInt = long long; // NOLINT + +// The maximum number a BiggestInt can represent. +constexpr BiggestInt kMaxBiggestInt = (std::numeric_limits::max)(); + +// This template class serves as a compile-time function from size to +// type. It maps a size in bytes to a primitive type with that +// size. e.g. +// +// TypeWithSize<4>::UInt +// +// is typedef-ed to be unsigned int (unsigned integer made up of 4 +// bytes). +// +// Such functionality should belong to STL, but I cannot find it +// there. +// +// Google Test uses this class in the implementation of floating-point +// comparison. +// +// For now it only handles UInt (unsigned int) as that's all Google Test +// needs. Other types can be easily added in the future if need +// arises. +template +class TypeWithSize { + public: + // This prevents the user from using TypeWithSize with incorrect + // values of N. + using UInt = void; +}; + +// The specialization for size 4. +template <> +class TypeWithSize<4> { + public: + using Int = std::int32_t; + using UInt = std::uint32_t; +}; + +// The specialization for size 8. +template <> +class TypeWithSize<8> { + public: + using Int = std::int64_t; + using UInt = std::uint64_t; +}; + +// Integer types of known sizes. +using TimeInMillis = int64_t; // Represents time in milliseconds. + +// Utilities for command line flags and environment variables. + +// Macro for referencing flags. +#if !defined(GTEST_FLAG) +# define GTEST_FLAG(name) FLAGS_gtest_##name +#endif // !defined(GTEST_FLAG) + +#if !defined(GTEST_USE_OWN_FLAGFILE_FLAG_) +# define GTEST_USE_OWN_FLAGFILE_FLAG_ 1 +#endif // !defined(GTEST_USE_OWN_FLAGFILE_FLAG_) + +#if !defined(GTEST_DECLARE_bool_) +# define GTEST_FLAG_SAVER_ ::testing::internal::GTestFlagSaver + +// Macros for declaring flags. +# define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name) +# define GTEST_DECLARE_int32_(name) \ + GTEST_API_ extern std::int32_t GTEST_FLAG(name) +# define GTEST_DECLARE_string_(name) \ + GTEST_API_ extern ::std::string GTEST_FLAG(name) + +// Macros for defining flags. +# define GTEST_DEFINE_bool_(name, default_val, doc) \ + GTEST_API_ bool GTEST_FLAG(name) = (default_val) +# define GTEST_DEFINE_int32_(name, default_val, doc) \ + GTEST_API_ std::int32_t GTEST_FLAG(name) = (default_val) +# define GTEST_DEFINE_string_(name, default_val, doc) \ + GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val) + +#endif // !defined(GTEST_DECLARE_bool_) + +// Thread annotations +#if !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_) +# define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks) +# define GTEST_LOCK_EXCLUDED_(locks) +#endif // !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_) + +// Parses 'str' for a 32-bit signed integer. If successful, writes the result +// to *value and returns true; otherwise leaves *value unchanged and returns +// false. +bool ParseInt32(const Message& src_text, const char* str, int32_t* value); + +// Parses a bool/int32_t/string from the environment variable +// corresponding to the given Google Test flag. +bool BoolFromGTestEnv(const char* flag, bool default_val); +GTEST_API_ int32_t Int32FromGTestEnv(const char* flag, int32_t default_val); +std::string OutputFlagAlsoCheckEnvVar(); +const char* StringFromGTestEnv(const char* flag, const char* default_val); + +} // namespace internal +} // namespace testing + +#if !defined(GTEST_INTERNAL_DEPRECATED) + +// Internal Macro to mark an API deprecated, for googletest usage only +// Usage: class GTEST_INTERNAL_DEPRECATED(message) MyClass or +// GTEST_INTERNAL_DEPRECATED(message) myFunction(); Every usage of +// a deprecated entity will trigger a warning when compiled with +// `-Wdeprecated-declarations` option (clang, gcc, any __GNUC__ compiler). +// For msvc /W3 option will need to be used +// Note that for 'other' compilers this macro evaluates to nothing to prevent +// compilations errors. +#if defined(_MSC_VER) +#define GTEST_INTERNAL_DEPRECATED(message) __declspec(deprecated(message)) +#elif defined(__GNUC__) +#define GTEST_INTERNAL_DEPRECATED(message) __attribute__((deprecated(message))) +#else +#define GTEST_INTERNAL_DEPRECATED(message) +#endif + +#endif // !defined(GTEST_INTERNAL_DEPRECATED) + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/gtest-string.h b/source/3rdparty/gtest/include/gtest/internal/gtest-string.h new file mode 100644 index 0000000..0b2a91a --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/gtest-string.h @@ -0,0 +1,172 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This header file declares the String class and functions used internally by +// Google Test. They are subject to change without notice. They should not used +// by code external to Google Test. +// +// This header file is #included by gtest-internal.h. +// It should not be #included by other files. + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ + +#ifdef __BORLANDC__ +// string.h is not guaranteed to provide strcpy on C++ Builder. +# include +#endif + +#include +#include +#include + +#include "gtest/internal/gtest-port.h" + +namespace testing { +namespace internal { + +// String - an abstract class holding static string utilities. +class GTEST_API_ String { + public: + // Static utility methods + + // Clones a 0-terminated C string, allocating memory using new. The + // caller is responsible for deleting the return value using + // delete[]. Returns the cloned string, or NULL if the input is + // NULL. + // + // This is different from strdup() in string.h, which allocates + // memory using malloc(). + static const char* CloneCString(const char* c_str); + +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be + // able to pass strings to Win32 APIs on CE we need to convert them + // to 'Unicode', UTF-16. + + // Creates a UTF-16 wide string from the given ANSI string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the wide string, or NULL if the + // input is NULL. + // + // The wide string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static LPCWSTR AnsiToUtf16(const char* c_str); + + // Creates an ANSI string from the given wide string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the ANSI string, or NULL if the + // input is NULL. + // + // The returned string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static const char* Utf16ToAnsi(LPCWSTR utf16_str); +#endif + + // Compares two C strings. Returns true if and only if they have the same + // content. + // + // Unlike strcmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CStringEquals(const char* lhs, const char* rhs); + + // Converts a wide C string to a String using the UTF-8 encoding. + // NULL will be converted to "(null)". If an error occurred during + // the conversion, "(failed to convert from wide string)" is + // returned. + static std::string ShowWideCString(const wchar_t* wide_c_str); + + // Compares two wide C strings. Returns true if and only if they have the + // same content. + // + // Unlike wcscmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); + + // Compares two C strings, ignoring case. Returns true if and only if + // they have the same content. + // + // Unlike strcasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CaseInsensitiveCStringEquals(const char* lhs, + const char* rhs); + + // Compares two wide C strings, ignoring case. Returns true if and only if + // they have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. + static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs); + + // Returns true if and only if the given string ends with the given suffix, + // ignoring case. Any string is considered to end with an empty suffix. + static bool EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix); + + // Formats an int value as "%02d". + static std::string FormatIntWidth2(int value); // "%02d" for width == 2 + + // Formats an int value as "%X". + static std::string FormatHexInt(int value); + + // Formats an int value as "%X". + static std::string FormatHexUInt32(uint32_t value); + + // Formats a byte as "%02X". + static std::string FormatByte(unsigned char value); + + private: + String(); // Not meant to be instantiated. +}; // class String + +// Gets the content of the stringstream's buffer as an std::string. Each '\0' +// character in the buffer is replaced with "\\0". +GTEST_API_ std::string StringStreamToString(::std::stringstream* stream); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ diff --git a/source/3rdparty/gtest/include/gtest/internal/gtest-type-util.h b/source/3rdparty/gtest/include/gtest/internal/gtest-type-util.h new file mode 100644 index 0000000..082fdad --- /dev/null +++ b/source/3rdparty/gtest/include/gtest/internal/gtest-type-util.h @@ -0,0 +1,183 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Type utilities needed for implementing typed and type-parameterized +// tests. + +// GOOGLETEST_CM0001 DO NOT DELETE + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + +#include "gtest/internal/gtest-port.h" + +// #ifdef __GNUC__ is too general here. It is possible to use gcc without using +// libstdc++ (which is where cxxabi.h comes from). +# if GTEST_HAS_CXXABI_H_ +# include +# elif defined(__HP_aCC) +# include +# endif // GTEST_HASH_CXXABI_H_ + +namespace testing { +namespace internal { + +// Canonicalizes a given name with respect to the Standard C++ Library. +// This handles removing the inline namespace within `std` that is +// used by various standard libraries (e.g., `std::__1`). Names outside +// of namespace std are returned unmodified. +inline std::string CanonicalizeForStdLibVersioning(std::string s) { + static const char prefix[] = "std::__"; + if (s.compare(0, strlen(prefix), prefix) == 0) { + std::string::size_type end = s.find("::", strlen(prefix)); + if (end != s.npos) { + // Erase everything between the initial `std` and the second `::`. + s.erase(strlen("std"), end - strlen("std")); + } + } + return s; +} + +// GetTypeName() returns a human-readable name of type T. +// NB: This function is also used in Google Mock, so don't move it inside of +// the typed-test-only section below. +template +std::string GetTypeName() { +# if GTEST_HAS_RTTI + + const char* const name = typeid(T).name(); +# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC) + int status = 0; + // gcc's implementation of typeid(T).name() mangles the type name, + // so we have to demangle it. +# if GTEST_HAS_CXXABI_H_ + using abi::__cxa_demangle; +# endif // GTEST_HAS_CXXABI_H_ + char* const readable_name = __cxa_demangle(name, nullptr, nullptr, &status); + const std::string name_str(status == 0 ? readable_name : name); + free(readable_name); + return CanonicalizeForStdLibVersioning(name_str); +# else + return name; +# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC + +# else + + return ""; + +# endif // GTEST_HAS_RTTI +} + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// A unique type indicating an empty node +struct None {}; + +# define GTEST_TEMPLATE_ template class + +// The template "selector" struct TemplateSel is used to +// represent Tmpl, which must be a class template with one type +// parameter, as a type. TemplateSel::Bind::type is defined +// as the type Tmpl. This allows us to actually instantiate the +// template "selected" by TemplateSel. +// +// This trick is necessary for simulating typedef for class templates, +// which C++ doesn't support directly. +template +struct TemplateSel { + template + struct Bind { + typedef Tmpl type; + }; +}; + +# define GTEST_BIND_(TmplSel, T) \ + TmplSel::template Bind::type + +template +struct Templates { + using Head = TemplateSel; + using Tail = Templates; +}; + +template +struct Templates { + using Head = TemplateSel; + using Tail = None; +}; + +// Tuple-like type lists +template +struct Types { + using Head = Head_; + using Tail = Types; +}; + +template +struct Types { + using Head = Head_; + using Tail = None; +}; + +// Helper metafunctions to tell apart a single type from types +// generated by ::testing::Types +template +struct ProxyTypeList { + using type = Types; +}; + +template +struct is_proxy_type_list : std::false_type {}; + +template +struct is_proxy_type_list> : std::true_type {}; + +// Generator which conditionally creates type lists. +// It recognizes if a requested type list should be created +// and prevents creating a new type list nested within another one. +template +struct GenerateTypeList { + private: + using proxy = typename std::conditional::value, T, + ProxyTypeList>::type; + + public: + using type = typename proxy::type; +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +} // namespace internal + +template +using Types = internal::ProxyTypeList; + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ diff --git a/source/3rdparty/gtest/src/gtest-all.cc b/source/3rdparty/gtest/src/gtest-all.cc new file mode 100644 index 0000000..ad29290 --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-all.cc @@ -0,0 +1,48 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// Google C++ Testing and Mocking Framework (Google Test) +// +// Sometimes it's desirable to build Google Test by compiling a single file. +// This file serves this purpose. + +// This line ensures that gtest.h can be compiled on its own, even +// when it's fused. +#include "gtest/gtest.h" + +// The following lines pull in the real gtest *.cc files. +#include "src/gtest.cc" +#include "src/gtest-death-test.cc" +#include "src/gtest-filepath.cc" +#include "src/gtest-matchers.cc" +#include "src/gtest-port.cc" +#include "src/gtest-printers.cc" +#include "src/gtest-test-part.cc" +#include "src/gtest-typed-test.cc" diff --git a/source/3rdparty/gtest/src/gtest-death-test.cc b/source/3rdparty/gtest/src/gtest-death-test.cc new file mode 100644 index 0000000..5d1031b --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-death-test.cc @@ -0,0 +1,1653 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// This file implements death tests. + +#include "gtest/gtest-death-test.h" + +#include + +#include "gtest/internal/gtest-port.h" +#include "gtest/internal/custom/gtest.h" + +#if GTEST_HAS_DEATH_TEST + +# if GTEST_OS_MAC +# include +# endif // GTEST_OS_MAC + +# include +# include +# include + +# if GTEST_OS_LINUX +# include +# endif // GTEST_OS_LINUX + +# include + +# if GTEST_OS_WINDOWS +# include +# else +# include +# include +# endif // GTEST_OS_WINDOWS + +# if GTEST_OS_QNX +# include +# endif // GTEST_OS_QNX + +# if GTEST_OS_FUCHSIA +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# endif // GTEST_OS_FUCHSIA + +#endif // GTEST_HAS_DEATH_TEST + +#include "gtest/gtest-message.h" +#include "gtest/internal/gtest-string.h" +#include "src/gtest-internal-inl.h" + +namespace testing { + +// Constants. + +// The default death test style. +// +// This is defined in internal/gtest-port.h as "fast", but can be overridden by +// a definition in internal/custom/gtest-port.h. The recommended value, which is +// used internally at Google, is "threadsafe". +static const char kDefaultDeathTestStyle[] = GTEST_DEFAULT_DEATH_TEST_STYLE; + +GTEST_DEFINE_string_( + death_test_style, + internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle), + "Indicates how to run a death test in a forked child process: " + "\"threadsafe\" (child process re-executes the test binary " + "from the beginning, running only the specific death test) or " + "\"fast\" (child process runs the death test immediately " + "after forking)."); + +GTEST_DEFINE_bool_( + death_test_use_fork, + internal::BoolFromGTestEnv("death_test_use_fork", false), + "Instructs to use fork()/_exit() instead of clone() in death tests. " + "Ignored and always uses fork() on POSIX systems where clone() is not " + "implemented. Useful when running under valgrind or similar tools if " + "those do not support clone(). Valgrind 3.3.1 will just fail if " + "it sees an unsupported combination of clone() flags. " + "It is not recommended to use this flag w/o valgrind though it will " + "work in 99% of the cases. Once valgrind is fixed, this flag will " + "most likely be removed."); + +namespace internal { +GTEST_DEFINE_string_( + internal_run_death_test, "", + "Indicates the file, line number, temporal index of " + "the single death test to run, and a file descriptor to " + "which a success code may be sent, all separated by " + "the '|' characters. This flag is specified if and only if the " + "current process is a sub-process launched for running a thread-safe " + "death test. FOR INTERNAL USE ONLY."); +} // namespace internal + +#if GTEST_HAS_DEATH_TEST + +namespace internal { + +// Valid only for fast death tests. Indicates the code is running in the +// child process of a fast style death test. +# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +static bool g_in_fast_death_test_child = false; +# endif + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +bool InDeathTestChild() { +# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA + + // On Windows and Fuchsia, death tests are thread-safe regardless of the value + // of the death_test_style flag. + return !GTEST_FLAG(internal_run_death_test).empty(); + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") + return !GTEST_FLAG(internal_run_death_test).empty(); + else + return g_in_fast_death_test_child; +#endif +} + +} // namespace internal + +// ExitedWithCode constructor. +ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) { +} + +// ExitedWithCode function-call operator. +bool ExitedWithCode::operator()(int exit_status) const { +# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA + + return exit_status == exit_code_; + +# else + + return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_; + +# endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA +} + +# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +// KilledBySignal constructor. +KilledBySignal::KilledBySignal(int signum) : signum_(signum) { +} + +// KilledBySignal function-call operator. +bool KilledBySignal::operator()(int exit_status) const { +# if defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_) + { + bool result; + if (GTEST_KILLED_BY_SIGNAL_OVERRIDE_(signum_, exit_status, &result)) { + return result; + } + } +# endif // defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_) + return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_; +} +# endif // !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA + +namespace internal { + +// Utilities needed for death tests. + +// Generates a textual description of a given exit code, in the format +// specified by wait(2). +static std::string ExitSummary(int exit_code) { + Message m; + +# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA + + m << "Exited with exit status " << exit_code; + +# else + + if (WIFEXITED(exit_code)) { + m << "Exited with exit status " << WEXITSTATUS(exit_code); + } else if (WIFSIGNALED(exit_code)) { + m << "Terminated by signal " << WTERMSIG(exit_code); + } +# ifdef WCOREDUMP + if (WCOREDUMP(exit_code)) { + m << " (core dumped)"; + } +# endif +# endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA + + return m.GetString(); +} + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +bool ExitedUnsuccessfully(int exit_status) { + return !ExitedWithCode(0)(exit_status); +} + +# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +// Generates a textual failure message when a death test finds more than +// one thread running, or cannot determine the number of threads, prior +// to executing the given statement. It is the responsibility of the +// caller not to pass a thread_count of 1. +static std::string DeathTestThreadWarning(size_t thread_count) { + Message msg; + msg << "Death tests use fork(), which is unsafe particularly" + << " in a threaded context. For this test, " << GTEST_NAME_ << " "; + if (thread_count == 0) { + msg << "couldn't detect the number of threads."; + } else { + msg << "detected " << thread_count << " threads."; + } + msg << " See " + "https://github.com/google/googletest/blob/master/googletest/docs/" + "advanced.md#death-tests-and-threads" + << " for more explanation and suggested solutions, especially if" + << " this is the last message you see before your test times out."; + return msg.GetString(); +} +# endif // !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA + +// Flag characters for reporting a death test that did not die. +static const char kDeathTestLived = 'L'; +static const char kDeathTestReturned = 'R'; +static const char kDeathTestThrew = 'T'; +static const char kDeathTestInternalError = 'I'; + +#if GTEST_OS_FUCHSIA + +// File descriptor used for the pipe in the child process. +static const int kFuchsiaReadPipeFd = 3; + +#endif + +// An enumeration describing all of the possible ways that a death test can +// conclude. DIED means that the process died while executing the test +// code; LIVED means that process lived beyond the end of the test code; +// RETURNED means that the test statement attempted to execute a return +// statement, which is not allowed; THREW means that the test statement +// returned control by throwing an exception. IN_PROGRESS means the test +// has not yet concluded. +enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }; + +// Routine for aborting the program which is safe to call from an +// exec-style death test child process, in which case the error +// message is propagated back to the parent process. Otherwise, the +// message is simply printed to stderr. In either case, the program +// then exits with status 1. +static void DeathTestAbort(const std::string& message) { + // On a POSIX system, this function may be called from a threadsafe-style + // death test child process, which operates on a very small stack. Use + // the heap for any additional non-minuscule memory requirements. + const InternalRunDeathTestFlag* const flag = + GetUnitTestImpl()->internal_run_death_test_flag(); + if (flag != nullptr) { + FILE* parent = posix::FDOpen(flag->write_fd(), "w"); + fputc(kDeathTestInternalError, parent); + fprintf(parent, "%s", message.c_str()); + fflush(parent); + _exit(1); + } else { + fprintf(stderr, "%s", message.c_str()); + fflush(stderr); + posix::Abort(); + } +} + +// A replacement for CHECK that calls DeathTestAbort if the assertion +// fails. +# define GTEST_DEATH_TEST_CHECK_(expression) \ + do { \ + if (!::testing::internal::IsTrue(expression)) { \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for +// evaluating any system call that fulfills two conditions: it must return +// -1 on failure, and set errno to EINTR when it is interrupted and +// should be tried again. The macro expands to a loop that repeatedly +// evaluates the expression as long as it evaluates to -1 and sets +// errno to EINTR. If the expression evaluates to -1 but errno is +// something other than EINTR, DeathTestAbort is called. +# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \ + do { \ + int gtest_retval; \ + do { \ + gtest_retval = (expression); \ + } while (gtest_retval == -1 && errno == EINTR); \ + if (gtest_retval == -1) { \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression + " != -1"); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// Returns the message describing the last system error in errno. +std::string GetLastErrnoDescription() { + return errno == 0 ? "" : posix::StrError(errno); +} + +// This is called from a death test parent process to read a failure +// message from the death test child process and log it with the FATAL +// severity. On Windows, the message is read from a pipe handle. On other +// platforms, it is read from a file descriptor. +static void FailFromInternalError(int fd) { + Message error; + char buffer[256]; + int num_read; + + do { + while ((num_read = posix::Read(fd, buffer, 255)) > 0) { + buffer[num_read] = '\0'; + error << buffer; + } + } while (num_read == -1 && errno == EINTR); + + if (num_read == 0) { + GTEST_LOG_(FATAL) << error.GetString(); + } else { + const int last_error = errno; + GTEST_LOG_(FATAL) << "Error while reading death test internal: " + << GetLastErrnoDescription() << " [" << last_error << "]"; + } +} + +// Death test constructor. Increments the running death test count +// for the current test. +DeathTest::DeathTest() { + TestInfo* const info = GetUnitTestImpl()->current_test_info(); + if (info == nullptr) { + DeathTestAbort("Cannot run a death test outside of a TEST or " + "TEST_F construct"); + } +} + +// Creates and returns a death test by dispatching to the current +// death test factory. +bool DeathTest::Create(const char* statement, + Matcher matcher, const char* file, + int line, DeathTest** test) { + return GetUnitTestImpl()->death_test_factory()->Create( + statement, std::move(matcher), file, line, test); +} + +const char* DeathTest::LastMessage() { + return last_death_test_message_.c_str(); +} + +void DeathTest::set_last_death_test_message(const std::string& message) { + last_death_test_message_ = message; +} + +std::string DeathTest::last_death_test_message_; + +// Provides cross platform implementation for some death functionality. +class DeathTestImpl : public DeathTest { + protected: + DeathTestImpl(const char* a_statement, Matcher matcher) + : statement_(a_statement), + matcher_(std::move(matcher)), + spawned_(false), + status_(-1), + outcome_(IN_PROGRESS), + read_fd_(-1), + write_fd_(-1) {} + + // read_fd_ is expected to be closed and cleared by a derived class. + ~DeathTestImpl() override { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); } + + void Abort(AbortReason reason) override; + bool Passed(bool status_ok) override; + + const char* statement() const { return statement_; } + bool spawned() const { return spawned_; } + void set_spawned(bool is_spawned) { spawned_ = is_spawned; } + int status() const { return status_; } + void set_status(int a_status) { status_ = a_status; } + DeathTestOutcome outcome() const { return outcome_; } + void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; } + int read_fd() const { return read_fd_; } + void set_read_fd(int fd) { read_fd_ = fd; } + int write_fd() const { return write_fd_; } + void set_write_fd(int fd) { write_fd_ = fd; } + + // Called in the parent process only. Reads the result code of the death + // test child process via a pipe, interprets it to set the outcome_ + // member, and closes read_fd_. Outputs diagnostics and terminates in + // case of unexpected codes. + void ReadAndInterpretStatusByte(); + + // Returns stderr output from the child process. + virtual std::string GetErrorLogs(); + + private: + // The textual content of the code this object is testing. This class + // doesn't own this string and should not attempt to delete it. + const char* const statement_; + // A matcher that's expected to match the stderr output by the child process. + Matcher matcher_; + // True if the death test child process has been successfully spawned. + bool spawned_; + // The exit status of the child process. + int status_; + // How the death test concluded. + DeathTestOutcome outcome_; + // Descriptor to the read end of the pipe to the child process. It is + // always -1 in the child process. The child keeps its write end of the + // pipe in write_fd_. + int read_fd_; + // Descriptor to the child's write end of the pipe to the parent process. + // It is always -1 in the parent process. The parent keeps its end of the + // pipe in read_fd_. + int write_fd_; +}; + +// Called in the parent process only. Reads the result code of the death +// test child process via a pipe, interprets it to set the outcome_ +// member, and closes read_fd_. Outputs diagnostics and terminates in +// case of unexpected codes. +void DeathTestImpl::ReadAndInterpretStatusByte() { + char flag; + int bytes_read; + + // The read() here blocks until data is available (signifying the + // failure of the death test) or until the pipe is closed (signifying + // its success), so it's okay to call this in the parent before + // the child process has exited. + do { + bytes_read = posix::Read(read_fd(), &flag, 1); + } while (bytes_read == -1 && errno == EINTR); + + if (bytes_read == 0) { + set_outcome(DIED); + } else if (bytes_read == 1) { + switch (flag) { + case kDeathTestReturned: + set_outcome(RETURNED); + break; + case kDeathTestThrew: + set_outcome(THREW); + break; + case kDeathTestLived: + set_outcome(LIVED); + break; + case kDeathTestInternalError: + FailFromInternalError(read_fd()); // Does not return. + break; + default: + GTEST_LOG_(FATAL) << "Death test child process reported " + << "unexpected status byte (" + << static_cast(flag) << ")"; + } + } else { + GTEST_LOG_(FATAL) << "Read from death test child process failed: " + << GetLastErrnoDescription(); + } + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd())); + set_read_fd(-1); +} + +std::string DeathTestImpl::GetErrorLogs() { + return GetCapturedStderr(); +} + +// Signals that the death test code which should have exited, didn't. +// Should be called only in a death test child process. +// Writes a status byte to the child's status file descriptor, then +// calls _exit(1). +void DeathTestImpl::Abort(AbortReason reason) { + // The parent process considers the death test to be a failure if + // it finds any data in our pipe. So, here we write a single flag byte + // to the pipe, then exit. + const char status_ch = + reason == TEST_DID_NOT_DIE ? kDeathTestLived : + reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned; + + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1)); + // We are leaking the descriptor here because on some platforms (i.e., + // when built as Windows DLL), destructors of global objects will still + // run after calling _exit(). On such systems, write_fd_ will be + // indirectly closed from the destructor of UnitTestImpl, causing double + // close if it is also closed here. On debug configurations, double close + // may assert. As there are no in-process buffers to flush here, we are + // relying on the OS to close the descriptor after the process terminates + // when the destructors are not run. + _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash) +} + +// Returns an indented copy of stderr output for a death test. +// This makes distinguishing death test output lines from regular log lines +// much easier. +static ::std::string FormatDeathTestOutput(const ::std::string& output) { + ::std::string ret; + for (size_t at = 0; ; ) { + const size_t line_end = output.find('\n', at); + ret += "[ DEATH ] "; + if (line_end == ::std::string::npos) { + ret += output.substr(at); + break; + } + ret += output.substr(at, line_end + 1 - at); + at = line_end + 1; + } + return ret; +} + +// Assesses the success or failure of a death test, using both private +// members which have previously been set, and one argument: +// +// Private data members: +// outcome: An enumeration describing how the death test +// concluded: DIED, LIVED, THREW, or RETURNED. The death test +// fails in the latter three cases. +// status: The exit status of the child process. On *nix, it is in the +// in the format specified by wait(2). On Windows, this is the +// value supplied to the ExitProcess() API or a numeric code +// of the exception that terminated the program. +// matcher_: A matcher that's expected to match the stderr output by the child +// process. +// +// Argument: +// status_ok: true if exit_status is acceptable in the context of +// this particular death test, which fails if it is false +// +// Returns true if and only if all of the above conditions are met. Otherwise, +// the first failing condition, in the order given above, is the one that is +// reported. Also sets the last death test message string. +bool DeathTestImpl::Passed(bool status_ok) { + if (!spawned()) + return false; + + const std::string error_message = GetErrorLogs(); + + bool success = false; + Message buffer; + + buffer << "Death test: " << statement() << "\n"; + switch (outcome()) { + case LIVED: + buffer << " Result: failed to die.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case THREW: + buffer << " Result: threw an exception.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case RETURNED: + buffer << " Result: illegal return in test statement.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case DIED: + if (status_ok) { + if (matcher_.Matches(error_message)) { + success = true; + } else { + std::ostringstream stream; + matcher_.DescribeTo(&stream); + buffer << " Result: died but not with expected error.\n" + << " Expected: " << stream.str() << "\n" + << "Actual msg:\n" + << FormatDeathTestOutput(error_message); + } + } else { + buffer << " Result: died but not with expected exit code:\n" + << " " << ExitSummary(status()) << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + break; + case IN_PROGRESS: + default: + GTEST_LOG_(FATAL) + << "DeathTest::Passed somehow called before conclusion of test"; + } + + DeathTest::set_last_death_test_message(buffer.GetString()); + return success; +} + +# if GTEST_OS_WINDOWS +// WindowsDeathTest implements death tests on Windows. Due to the +// specifics of starting new processes on Windows, death tests there are +// always threadsafe, and Google Test considers the +// --gtest_death_test_style=fast setting to be equivalent to +// --gtest_death_test_style=threadsafe there. +// +// A few implementation notes: Like the Linux version, the Windows +// implementation uses pipes for child-to-parent communication. But due to +// the specifics of pipes on Windows, some extra steps are required: +// +// 1. The parent creates a communication pipe and stores handles to both +// ends of it. +// 2. The parent starts the child and provides it with the information +// necessary to acquire the handle to the write end of the pipe. +// 3. The child acquires the write end of the pipe and signals the parent +// using a Windows event. +// 4. Now the parent can release the write end of the pipe on its side. If +// this is done before step 3, the object's reference count goes down to +// 0 and it is destroyed, preventing the child from acquiring it. The +// parent now has to release it, or read operations on the read end of +// the pipe will not return when the child terminates. +// 5. The parent reads child's output through the pipe (outcome code and +// any possible error messages) from the pipe, and its stderr and then +// determines whether to fail the test. +// +// Note: to distinguish Win32 API calls from the local method and function +// calls, the former are explicitly resolved in the global namespace. +// +class WindowsDeathTest : public DeathTestImpl { + public: + WindowsDeathTest(const char* a_statement, Matcher matcher, + const char* file, int line) + : DeathTestImpl(a_statement, std::move(matcher)), + file_(file), + line_(line) {} + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + virtual TestRole AssumeRole(); + + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; + // Handle to the write end of the pipe to the child process. + AutoHandle write_handle_; + // Child process handle. + AutoHandle child_handle_; + // Event the child process uses to signal the parent that it has + // acquired the handle to the write end of the pipe. After seeing this + // event the parent can release its own handles to make sure its + // ReadFile() calls return when the child terminates. + AutoHandle event_handle_; +}; + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int WindowsDeathTest::Wait() { + if (!spawned()) + return 0; + + // Wait until the child either signals that it has acquired the write end + // of the pipe or it dies. + const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() }; + switch (::WaitForMultipleObjects(2, + wait_handles, + FALSE, // Waits for any of the handles. + INFINITE)) { + case WAIT_OBJECT_0: + case WAIT_OBJECT_0 + 1: + break; + default: + GTEST_DEATH_TEST_CHECK_(false); // Should not get here. + } + + // The child has acquired the write end of the pipe or exited. + // We release the handle on our side and continue. + write_handle_.Reset(); + event_handle_.Reset(); + + ReadAndInterpretStatusByte(); + + // Waits for the child process to exit if it haven't already. This + // returns immediately if the child has already exited, regardless of + // whether previous calls to WaitForMultipleObjects synchronized on this + // handle or not. + GTEST_DEATH_TEST_CHECK_( + WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(), + INFINITE)); + DWORD status_code; + GTEST_DEATH_TEST_CHECK_( + ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE); + child_handle_.Reset(); + set_status(static_cast(status_code)); + return status(); +} + +// The AssumeRole process for a Windows death test. It creates a child +// process with the same executable as the current process to run the +// death test. The child process is given the --gtest_filter and +// --gtest_internal_run_death_test flags such that it knows to run the +// current death test only. +DeathTest::TestRole WindowsDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != nullptr) { + // ParseInternalRunDeathTestFlag() has performed all the necessary + // processing. + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + // WindowsDeathTest uses an anonymous pipe to communicate results of + // a death test. + SECURITY_ATTRIBUTES handles_are_inheritable = {sizeof(SECURITY_ATTRIBUTES), + nullptr, TRUE}; + HANDLE read_handle, write_handle; + GTEST_DEATH_TEST_CHECK_( + ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable, + 0) // Default buffer size. + != FALSE); + set_read_fd(::_open_osfhandle(reinterpret_cast(read_handle), + O_RDONLY)); + write_handle_.Reset(write_handle); + event_handle_.Reset(::CreateEvent( + &handles_are_inheritable, + TRUE, // The event will automatically reset to non-signaled state. + FALSE, // The initial state is non-signalled. + nullptr)); // The even is unnamed. + GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != nullptr); + const std::string filter_flag = std::string("--") + GTEST_FLAG_PREFIX_ + + kFilterFlag + "=" + info->test_suite_name() + + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + + "=" + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(static_cast(::GetCurrentProcessId())) + + // size_t has the same width as pointers on both 32-bit and 64-bit + // Windows platforms. + // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx. + "|" + StreamableToString(reinterpret_cast(write_handle)) + + "|" + StreamableToString(reinterpret_cast(event_handle_.Get())); + + char executable_path[_MAX_PATH + 1]; // NOLINT + GTEST_DEATH_TEST_CHECK_(_MAX_PATH + 1 != ::GetModuleFileNameA(nullptr, + executable_path, + _MAX_PATH)); + + std::string command_line = + std::string(::GetCommandLineA()) + " " + filter_flag + " \"" + + internal_flag + "\""; + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // Flush the log buffers since the log streams are shared with the child. + FlushInfoLog(); + + // The child process will share the standard handles with the parent. + STARTUPINFOA startup_info; + memset(&startup_info, 0, sizeof(STARTUPINFO)); + startup_info.dwFlags = STARTF_USESTDHANDLES; + startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE); + startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE); + startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE); + + PROCESS_INFORMATION process_info; + GTEST_DEATH_TEST_CHECK_( + ::CreateProcessA( + executable_path, const_cast(command_line.c_str()), + nullptr, // Retuned process handle is not inheritable. + nullptr, // Retuned thread handle is not inheritable. + TRUE, // Child inherits all inheritable handles (for write_handle_). + 0x0, // Default creation flags. + nullptr, // Inherit the parent's environment. + UnitTest::GetInstance()->original_working_dir(), &startup_info, + &process_info) != FALSE); + child_handle_.Reset(process_info.hProcess); + ::CloseHandle(process_info.hThread); + set_spawned(true); + return OVERSEE_TEST; +} + +# elif GTEST_OS_FUCHSIA + +class FuchsiaDeathTest : public DeathTestImpl { + public: + FuchsiaDeathTest(const char* a_statement, Matcher matcher, + const char* file, int line) + : DeathTestImpl(a_statement, std::move(matcher)), + file_(file), + line_(line) {} + + // All of these virtual functions are inherited from DeathTest. + int Wait() override; + TestRole AssumeRole() override; + std::string GetErrorLogs() override; + + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; + // The stderr data captured by the child process. + std::string captured_stderr_; + + zx::process child_process_; + zx::channel exception_channel_; + zx::socket stderr_socket_; +}; + +// Utility class for accumulating command-line arguments. +class Arguments { + public: + Arguments() { args_.push_back(nullptr); } + + ~Arguments() { + for (std::vector::iterator i = args_.begin(); i != args_.end(); + ++i) { + free(*i); + } + } + void AddArgument(const char* argument) { + args_.insert(args_.end() - 1, posix::StrDup(argument)); + } + + template + void AddArguments(const ::std::vector& arguments) { + for (typename ::std::vector::const_iterator i = arguments.begin(); + i != arguments.end(); + ++i) { + args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); + } + } + char* const* Argv() { + return &args_[0]; + } + + int size() { + return args_.size() - 1; + } + + private: + std::vector args_; +}; + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int FuchsiaDeathTest::Wait() { + const int kProcessKey = 0; + const int kSocketKey = 1; + const int kExceptionKey = 2; + + if (!spawned()) + return 0; + + // Create a port to wait for socket/task/exception events. + zx_status_t status_zx; + zx::port port; + status_zx = zx::port::create(0, &port); + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); + + // Register to wait for the child process to terminate. + status_zx = child_process_.wait_async( + port, kProcessKey, ZX_PROCESS_TERMINATED, ZX_WAIT_ASYNC_ONCE); + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); + + // Register to wait for the socket to be readable or closed. + status_zx = stderr_socket_.wait_async( + port, kSocketKey, ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED, + ZX_WAIT_ASYNC_ONCE); + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); + + // Register to wait for an exception. + status_zx = exception_channel_.wait_async( + port, kExceptionKey, ZX_CHANNEL_READABLE, ZX_WAIT_ASYNC_ONCE); + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); + + bool process_terminated = false; + bool socket_closed = false; + do { + zx_port_packet_t packet = {}; + status_zx = port.wait(zx::time::infinite(), &packet); + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); + + if (packet.key == kExceptionKey) { + // Process encountered an exception. Kill it directly rather than + // letting other handlers process the event. We will get a kProcessKey + // event when the process actually terminates. + status_zx = child_process_.kill(); + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); + } else if (packet.key == kProcessKey) { + // Process terminated. + GTEST_DEATH_TEST_CHECK_(ZX_PKT_IS_SIGNAL_ONE(packet.type)); + GTEST_DEATH_TEST_CHECK_(packet.signal.observed & ZX_PROCESS_TERMINATED); + process_terminated = true; + } else if (packet.key == kSocketKey) { + GTEST_DEATH_TEST_CHECK_(ZX_PKT_IS_SIGNAL_ONE(packet.type)); + if (packet.signal.observed & ZX_SOCKET_READABLE) { + // Read data from the socket. + constexpr size_t kBufferSize = 1024; + do { + size_t old_length = captured_stderr_.length(); + size_t bytes_read = 0; + captured_stderr_.resize(old_length + kBufferSize); + status_zx = stderr_socket_.read( + 0, &captured_stderr_.front() + old_length, kBufferSize, + &bytes_read); + captured_stderr_.resize(old_length + bytes_read); + } while (status_zx == ZX_OK); + if (status_zx == ZX_ERR_PEER_CLOSED) { + socket_closed = true; + } else { + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_ERR_SHOULD_WAIT); + status_zx = stderr_socket_.wait_async( + port, kSocketKey, ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED, + ZX_WAIT_ASYNC_ONCE); + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); + } + } else { + GTEST_DEATH_TEST_CHECK_(packet.signal.observed & ZX_SOCKET_PEER_CLOSED); + socket_closed = true; + } + } + } while (!process_terminated && !socket_closed); + + ReadAndInterpretStatusByte(); + + zx_info_process_t buffer; + status_zx = child_process_.get_info( + ZX_INFO_PROCESS, &buffer, sizeof(buffer), nullptr, nullptr); + GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); + + GTEST_DEATH_TEST_CHECK_(buffer.exited); + set_status(buffer.return_code); + return status(); +} + +// The AssumeRole process for a Fuchsia death test. It creates a child +// process with the same executable as the current process to run the +// death test. The child process is given the --gtest_filter and +// --gtest_internal_run_death_test flags such that it knows to run the +// current death test only. +DeathTest::TestRole FuchsiaDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != nullptr) { + // ParseInternalRunDeathTestFlag() has performed all the necessary + // processing. + set_write_fd(kFuchsiaReadPipeFd); + return EXECUTE_TEST; + } + + // Flush the log buffers since the log streams are shared with the child. + FlushInfoLog(); + + // Build the child process command line. + const std::string filter_flag = std::string("--") + GTEST_FLAG_PREFIX_ + + kFilterFlag + "=" + info->test_suite_name() + + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "=" + + file_ + "|" + + StreamableToString(line_) + "|" + + StreamableToString(death_test_index); + Arguments args; + args.AddArguments(GetInjectableArgvs()); + args.AddArgument(filter_flag.c_str()); + args.AddArgument(internal_flag.c_str()); + + // Build the pipe for communication with the child. + zx_status_t status; + zx_handle_t child_pipe_handle; + int child_pipe_fd; + status = fdio_pipe_half(&child_pipe_fd, &child_pipe_handle); + GTEST_DEATH_TEST_CHECK_(status == ZX_OK); + set_read_fd(child_pipe_fd); + + // Set the pipe handle for the child. + fdio_spawn_action_t spawn_actions[2] = {}; + fdio_spawn_action_t* add_handle_action = &spawn_actions[0]; + add_handle_action->action = FDIO_SPAWN_ACTION_ADD_HANDLE; + add_handle_action->h.id = PA_HND(PA_FD, kFuchsiaReadPipeFd); + add_handle_action->h.handle = child_pipe_handle; + + // Create a socket pair will be used to receive the child process' stderr. + zx::socket stderr_producer_socket; + status = + zx::socket::create(0, &stderr_producer_socket, &stderr_socket_); + GTEST_DEATH_TEST_CHECK_(status >= 0); + int stderr_producer_fd = -1; + status = + fdio_fd_create(stderr_producer_socket.release(), &stderr_producer_fd); + GTEST_DEATH_TEST_CHECK_(status >= 0); + + // Make the stderr socket nonblocking. + GTEST_DEATH_TEST_CHECK_(fcntl(stderr_producer_fd, F_SETFL, 0) == 0); + + fdio_spawn_action_t* add_stderr_action = &spawn_actions[1]; + add_stderr_action->action = FDIO_SPAWN_ACTION_CLONE_FD; + add_stderr_action->fd.local_fd = stderr_producer_fd; + add_stderr_action->fd.target_fd = STDERR_FILENO; + + // Create a child job. + zx_handle_t child_job = ZX_HANDLE_INVALID; + status = zx_job_create(zx_job_default(), 0, & child_job); + GTEST_DEATH_TEST_CHECK_(status == ZX_OK); + zx_policy_basic_t policy; + policy.condition = ZX_POL_NEW_ANY; + policy.policy = ZX_POL_ACTION_ALLOW; + status = zx_job_set_policy( + child_job, ZX_JOB_POL_RELATIVE, ZX_JOB_POL_BASIC, &policy, 1); + GTEST_DEATH_TEST_CHECK_(status == ZX_OK); + + // Create an exception channel attached to the |child_job|, to allow + // us to suppress the system default exception handler from firing. + status = + zx_task_create_exception_channel( + child_job, 0, exception_channel_.reset_and_get_address()); + GTEST_DEATH_TEST_CHECK_(status == ZX_OK); + + // Spawn the child process. + status = fdio_spawn_etc( + child_job, FDIO_SPAWN_CLONE_ALL, args.Argv()[0], args.Argv(), nullptr, + 2, spawn_actions, child_process_.reset_and_get_address(), nullptr); + GTEST_DEATH_TEST_CHECK_(status == ZX_OK); + + set_spawned(true); + return OVERSEE_TEST; +} + +std::string FuchsiaDeathTest::GetErrorLogs() { + return captured_stderr_; +} + +#else // We are neither on Windows, nor on Fuchsia. + +// ForkingDeathTest provides implementations for most of the abstract +// methods of the DeathTest interface. Only the AssumeRole method is +// left undefined. +class ForkingDeathTest : public DeathTestImpl { + public: + ForkingDeathTest(const char* statement, Matcher matcher); + + // All of these virtual functions are inherited from DeathTest. + int Wait() override; + + protected: + void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; } + + private: + // PID of child process during death test; 0 in the child process itself. + pid_t child_pid_; +}; + +// Constructs a ForkingDeathTest. +ForkingDeathTest::ForkingDeathTest(const char* a_statement, + Matcher matcher) + : DeathTestImpl(a_statement, std::move(matcher)), child_pid_(-1) {} + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int ForkingDeathTest::Wait() { + if (!spawned()) + return 0; + + ReadAndInterpretStatusByte(); + + int status_value; + GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0)); + set_status(status_value); + return status_value; +} + +// A concrete death test class that forks, then immediately runs the test +// in the child process. +class NoExecDeathTest : public ForkingDeathTest { + public: + NoExecDeathTest(const char* a_statement, Matcher matcher) + : ForkingDeathTest(a_statement, std::move(matcher)) {} + TestRole AssumeRole() override; +}; + +// The AssumeRole process for a fork-and-run death test. It implements a +// straightforward fork, with a simple pipe to transmit the status byte. +DeathTest::TestRole NoExecDeathTest::AssumeRole() { + const size_t thread_count = GetThreadCount(); + if (thread_count != 1) { + GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count); + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + + DeathTest::set_last_death_test_message(""); + CaptureStderr(); + // When we fork the process below, the log file buffers are copied, but the + // file descriptors are shared. We flush all log files here so that closing + // the file descriptors in the child process doesn't throw off the + // synchronization between descriptors and buffers in the parent process. + // This is as close to the fork as possible to avoid a race condition in case + // there are multiple threads running before the death test, and another + // thread writes to the log file. + FlushInfoLog(); + + const pid_t child_pid = fork(); + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + set_child_pid(child_pid); + if (child_pid == 0) { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0])); + set_write_fd(pipe_fd[1]); + // Redirects all logging to stderr in the child process to prevent + // concurrent writes to the log files. We capture stderr in the parent + // process and append the child process' output to a log. + LogToStderr(); + // Event forwarding to the listeners of event listener API mush be shut + // down in death test subprocesses. + GetUnitTestImpl()->listeners()->SuppressEventForwarding(); + g_in_fast_death_test_child = true; + return EXECUTE_TEST; + } else { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; + } +} + +// A concrete death test class that forks and re-executes the main +// program from the beginning, with command-line flags set that cause +// only this specific death test to be run. +class ExecDeathTest : public ForkingDeathTest { + public: + ExecDeathTest(const char* a_statement, Matcher matcher, + const char* file, int line) + : ForkingDeathTest(a_statement, std::move(matcher)), + file_(file), + line_(line) {} + TestRole AssumeRole() override; + + private: + static ::std::vector GetArgvsForDeathTestChildProcess() { + ::std::vector args = GetInjectableArgvs(); +# if defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_) + ::std::vector extra_args = + GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_(); + args.insert(args.end(), extra_args.begin(), extra_args.end()); +# endif // defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_) + return args; + } + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; +}; + +// Utility class for accumulating command-line arguments. +class Arguments { + public: + Arguments() { args_.push_back(nullptr); } + + ~Arguments() { + for (std::vector::iterator i = args_.begin(); i != args_.end(); + ++i) { + free(*i); + } + } + void AddArgument(const char* argument) { + args_.insert(args_.end() - 1, posix::StrDup(argument)); + } + + template + void AddArguments(const ::std::vector& arguments) { + for (typename ::std::vector::const_iterator i = arguments.begin(); + i != arguments.end(); + ++i) { + args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); + } + } + char* const* Argv() { + return &args_[0]; + } + + private: + std::vector args_; +}; + +// A struct that encompasses the arguments to the child process of a +// threadsafe-style death test process. +struct ExecDeathTestArgs { + char* const* argv; // Command-line arguments for the child's call to exec + int close_fd; // File descriptor to close; the read end of a pipe +}; + +# if GTEST_OS_MAC +inline char** GetEnviron() { + // When Google Test is built as a framework on MacOS X, the environ variable + // is unavailable. Apple's documentation (man environ) recommends using + // _NSGetEnviron() instead. + return *_NSGetEnviron(); +} +# else +// Some POSIX platforms expect you to declare environ. extern "C" makes +// it reside in the global namespace. +extern "C" char** environ; +inline char** GetEnviron() { return environ; } +# endif // GTEST_OS_MAC + +# if !GTEST_OS_QNX +// The main function for a threadsafe-style death test child process. +// This function is called in a clone()-ed process and thus must avoid +// any potentially unsafe operations like malloc or libc functions. +static int ExecDeathTestChildMain(void* child_arg) { + ExecDeathTestArgs* const args = static_cast(child_arg); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd)); + + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + // We can safely call execve() as it's a direct system call. We + // cannot use execvp() as it's a libc function and thus potentially + // unsafe. Since execve() doesn't search the PATH, the user must + // invoke the test program via a valid path that contains at least + // one path separator. + execve(args->argv[0], args->argv, GetEnviron()); + DeathTestAbort(std::string("execve(") + args->argv[0] + ", ...) in " + + original_dir + " failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; +} +# endif // !GTEST_OS_QNX + +# if GTEST_HAS_CLONE +// Two utility routines that together determine the direction the stack +// grows. +// This could be accomplished more elegantly by a single recursive +// function, but we want to guard against the unlikely possibility of +// a smart compiler optimizing the recursion away. +// +// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining +// StackLowerThanAddress into StackGrowsDown, which then doesn't give +// correct answer. +static void StackLowerThanAddress(const void* ptr, + bool* result) GTEST_NO_INLINE_; +// HWAddressSanitizer add a random tag to the MSB of the local variable address, +// making comparison result unpredictable. +GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +static void StackLowerThanAddress(const void* ptr, bool* result) { + int dummy; + *result = (&dummy < ptr); +} + +// Make sure AddressSanitizer does not tamper with the stack here. +GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +static bool StackGrowsDown() { + int dummy; + bool result; + StackLowerThanAddress(&dummy, &result); + return result; +} +# endif // GTEST_HAS_CLONE + +// Spawns a child process with the same executable as the current process in +// a thread-safe manner and instructs it to run the death test. The +// implementation uses fork(2) + exec. On systems where clone(2) is +// available, it is used instead, being slightly more thread-safe. On QNX, +// fork supports only single-threaded environments, so this function uses +// spawn(2) there instead. The function dies with an error message if +// anything goes wrong. +static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { + ExecDeathTestArgs args = { argv, close_fd }; + pid_t child_pid = -1; + +# if GTEST_OS_QNX + // Obtains the current directory and sets it to be closed in the child + // process. + const int cwd_fd = open(".", O_RDONLY); + GTEST_DEATH_TEST_CHECK_(cwd_fd != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC)); + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + int fd_flags; + // Set close_fd to be closed after spawn. + GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD)); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD, + fd_flags | FD_CLOEXEC)); + struct inheritance inherit = {0}; + // spawn is a system call. + child_pid = + spawn(args.argv[0], 0, nullptr, &inherit, args.argv, GetEnviron()); + // Restores the current working directory. + GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd)); + +# else // GTEST_OS_QNX +# if GTEST_OS_LINUX + // When a SIGPROF signal is received while fork() or clone() are executing, + // the process may hang. To avoid this, we ignore SIGPROF here and re-enable + // it after the call to fork()/clone() is complete. + struct sigaction saved_sigprof_action; + struct sigaction ignore_sigprof_action; + memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action)); + sigemptyset(&ignore_sigprof_action.sa_mask); + ignore_sigprof_action.sa_handler = SIG_IGN; + GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction( + SIGPROF, &ignore_sigprof_action, &saved_sigprof_action)); +# endif // GTEST_OS_LINUX + +# if GTEST_HAS_CLONE + const bool use_fork = GTEST_FLAG(death_test_use_fork); + + if (!use_fork) { + static const bool stack_grows_down = StackGrowsDown(); + const auto stack_size = static_cast(getpagesize() * 2); + // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead. + void* const stack = mmap(nullptr, stack_size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED); + + // Maximum stack alignment in bytes: For a downward-growing stack, this + // amount is subtracted from size of the stack space to get an address + // that is within the stack space and is aligned on all systems we care + // about. As far as I know there is no ABI with stack alignment greater + // than 64. We assume stack and stack_size already have alignment of + // kMaxStackAlignment. + const size_t kMaxStackAlignment = 64; + void* const stack_top = + static_cast(stack) + + (stack_grows_down ? stack_size - kMaxStackAlignment : 0); + GTEST_DEATH_TEST_CHECK_( + static_cast(stack_size) > kMaxStackAlignment && + reinterpret_cast(stack_top) % kMaxStackAlignment == 0); + + child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args); + + GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1); + } +# else + const bool use_fork = true; +# endif // GTEST_HAS_CLONE + + if (use_fork && (child_pid = fork()) == 0) { + ExecDeathTestChildMain(&args); + _exit(0); + } +# endif // GTEST_OS_QNX +# if GTEST_OS_LINUX + GTEST_DEATH_TEST_CHECK_SYSCALL_( + sigaction(SIGPROF, &saved_sigprof_action, nullptr)); +# endif // GTEST_OS_LINUX + + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + return child_pid; +} + +// The AssumeRole process for a fork-and-exec death test. It re-executes the +// main program from the beginning, setting the --gtest_filter +// and --gtest_internal_run_death_test flags to cause only the current +// death test to be re-run. +DeathTest::TestRole ExecDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != nullptr) { + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + // Clear the close-on-exec flag on the write end of the pipe, lest + // it be closed when the child process does an exec: + GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1); + + const std::string filter_flag = std::string("--") + GTEST_FLAG_PREFIX_ + + kFilterFlag + "=" + info->test_suite_name() + + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "=" + + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(pipe_fd[1]); + Arguments args; + args.AddArguments(GetArgvsForDeathTestChildProcess()); + args.AddArgument(filter_flag.c_str()); + args.AddArgument(internal_flag.c_str()); + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // See the comment in NoExecDeathTest::AssumeRole for why the next line + // is necessary. + FlushInfoLog(); + + const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_child_pid(child_pid); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; +} + +# endif // !GTEST_OS_WINDOWS + +// Creates a concrete DeathTest-derived class that depends on the +// --gtest_death_test_style flag, and sets the pointer pointed to +// by the "test" argument to its address. If the test should be +// skipped, sets that pointer to NULL. Returns true, unless the +// flag is set to an invalid value. +bool DefaultDeathTestFactory::Create(const char* statement, + Matcher matcher, + const char* file, int line, + DeathTest** test) { + UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const int death_test_index = impl->current_test_info() + ->increment_death_test_count(); + + if (flag != nullptr) { + if (death_test_index > flag->index()) { + DeathTest::set_last_death_test_message( + "Death test count (" + StreamableToString(death_test_index) + + ") somehow exceeded expected maximum (" + + StreamableToString(flag->index()) + ")"); + return false; + } + + if (!(flag->file() == file && flag->line() == line && + flag->index() == death_test_index)) { + *test = nullptr; + return true; + } + } + +# if GTEST_OS_WINDOWS + + if (GTEST_FLAG(death_test_style) == "threadsafe" || + GTEST_FLAG(death_test_style) == "fast") { + *test = new WindowsDeathTest(statement, std::move(matcher), file, line); + } + +# elif GTEST_OS_FUCHSIA + + if (GTEST_FLAG(death_test_style) == "threadsafe" || + GTEST_FLAG(death_test_style) == "fast") { + *test = new FuchsiaDeathTest(statement, std::move(matcher), file, line); + } + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") { + *test = new ExecDeathTest(statement, std::move(matcher), file, line); + } else if (GTEST_FLAG(death_test_style) == "fast") { + *test = new NoExecDeathTest(statement, std::move(matcher)); + } + +# endif // GTEST_OS_WINDOWS + + else { // NOLINT - this is more readable than unbalanced brackets inside #if. + DeathTest::set_last_death_test_message( + "Unknown death test style \"" + GTEST_FLAG(death_test_style) + + "\" encountered"); + return false; + } + + return true; +} + +# if GTEST_OS_WINDOWS +// Recreates the pipe and event handles from the provided parameters, +// signals the event, and returns a file descriptor wrapped around the pipe +// handle. This function is called in the child process only. +static int GetStatusFileDescriptor(unsigned int parent_process_id, + size_t write_handle_as_size_t, + size_t event_handle_as_size_t) { + AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE, + FALSE, // Non-inheritable. + parent_process_id)); + if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) { + DeathTestAbort("Unable to open parent process " + + StreamableToString(parent_process_id)); + } + + GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t)); + + const HANDLE write_handle = + reinterpret_cast(write_handle_as_size_t); + HANDLE dup_write_handle; + + // The newly initialized handle is accessible only in the parent + // process. To obtain one accessible within the child, we need to use + // DuplicateHandle. + if (!::DuplicateHandle(parent_process_handle.Get(), write_handle, + ::GetCurrentProcess(), &dup_write_handle, + 0x0, // Requested privileges ignored since + // DUPLICATE_SAME_ACCESS is used. + FALSE, // Request non-inheritable handler. + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort("Unable to duplicate the pipe handle " + + StreamableToString(write_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); + } + + const HANDLE event_handle = reinterpret_cast(event_handle_as_size_t); + HANDLE dup_event_handle; + + if (!::DuplicateHandle(parent_process_handle.Get(), event_handle, + ::GetCurrentProcess(), &dup_event_handle, + 0x0, + FALSE, + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort("Unable to duplicate the event handle " + + StreamableToString(event_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); + } + + const int write_fd = + ::_open_osfhandle(reinterpret_cast(dup_write_handle), O_APPEND); + if (write_fd == -1) { + DeathTestAbort("Unable to convert pipe handle " + + StreamableToString(write_handle_as_size_t) + + " to a file descriptor"); + } + + // Signals the parent that the write end of the pipe has been acquired + // so the parent can release its own write end. + ::SetEvent(dup_event_handle); + + return write_fd; +} +# endif // GTEST_OS_WINDOWS + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { + if (GTEST_FLAG(internal_run_death_test) == "") return nullptr; + + // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we + // can use it here. + int line = -1; + int index = -1; + ::std::vector< ::std::string> fields; + SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields); + int write_fd = -1; + +# if GTEST_OS_WINDOWS + + unsigned int parent_process_id = 0; + size_t write_handle_as_size_t = 0; + size_t event_handle_as_size_t = 0; + + if (fields.size() != 6 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &parent_process_id) + || !ParseNaturalNumber(fields[4], &write_handle_as_size_t) + || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + write_fd = GetStatusFileDescriptor(parent_process_id, + write_handle_as_size_t, + event_handle_as_size_t); + +# elif GTEST_OS_FUCHSIA + + if (fields.size() != 3 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + +# else + + if (fields.size() != 4 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &write_fd)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + +# endif // GTEST_OS_WINDOWS + + return new InternalRunDeathTestFlag(fields[0], line, index, write_fd); +} + +} // namespace internal + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace testing diff --git a/source/3rdparty/gtest/src/gtest-filepath.cc b/source/3rdparty/gtest/src/gtest-filepath.cc new file mode 100644 index 0000000..9aad12f --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-filepath.cc @@ -0,0 +1,382 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gtest/internal/gtest-filepath.h" + +#include +#include "gtest/internal/gtest-port.h" +#include "gtest/gtest-message.h" + +#if GTEST_OS_WINDOWS_MOBILE +# include +#elif GTEST_OS_WINDOWS +# include +# include +#else +# include +# include // Some Linux distributions define PATH_MAX here. +#endif // GTEST_OS_WINDOWS_MOBILE + +#include "gtest/internal/gtest-string.h" + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_MAX_ _MAX_PATH +#elif defined(PATH_MAX) +# define GTEST_PATH_MAX_ PATH_MAX +#elif defined(_XOPEN_PATH_MAX) +# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX +#else +# define GTEST_PATH_MAX_ _POSIX_PATH_MAX +#endif // GTEST_OS_WINDOWS + +namespace testing { +namespace internal { + +#if GTEST_OS_WINDOWS +// On Windows, '\\' is the standard path separator, but many tools and the +// Windows API also accept '/' as an alternate path separator. Unless otherwise +// noted, a file path can contain either kind of path separators, or a mixture +// of them. +const char kPathSeparator = '\\'; +const char kAlternatePathSeparator = '/'; +const char kAlternatePathSeparatorString[] = "/"; +# if GTEST_OS_WINDOWS_MOBILE +// Windows CE doesn't have a current directory. You should not use +// the current directory in tests on Windows CE, but this at least +// provides a reasonable fallback. +const char kCurrentDirectoryString[] = "\\"; +// Windows CE doesn't define INVALID_FILE_ATTRIBUTES +const DWORD kInvalidFileAttributes = 0xffffffff; +# else +const char kCurrentDirectoryString[] = ".\\"; +# endif // GTEST_OS_WINDOWS_MOBILE +#else +const char kPathSeparator = '/'; +const char kCurrentDirectoryString[] = "./"; +#endif // GTEST_OS_WINDOWS + +// Returns whether the given character is a valid path separator. +static bool IsPathSeparator(char c) { +#if GTEST_HAS_ALT_PATH_SEP_ + return (c == kPathSeparator) || (c == kAlternatePathSeparator); +#else + return c == kPathSeparator; +#endif +} + +// Returns the current working directory, or "" if unsuccessful. +FilePath FilePath::GetCurrentDir() { +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || \ + GTEST_OS_WINDOWS_RT || GTEST_OS_ESP8266 || GTEST_OS_ESP32 + // These platforms do not have a current directory, so we just return + // something reasonable. + return FilePath(kCurrentDirectoryString); +#elif GTEST_OS_WINDOWS + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(_getcwd(cwd, sizeof(cwd)) == nullptr ? "" : cwd); +#else + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + char* result = getcwd(cwd, sizeof(cwd)); +# if GTEST_OS_NACL + // getcwd will likely fail in NaCl due to the sandbox, so return something + // reasonable. The user may have provided a shim implementation for getcwd, + // however, so fallback only when failure is detected. + return FilePath(result == nullptr ? kCurrentDirectoryString : cwd); +# endif // GTEST_OS_NACL + return FilePath(result == nullptr ? "" : cwd); +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns a copy of the FilePath with the case-insensitive extension removed. +// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns +// FilePath("dir/file"). If a case-insensitive extension is not +// found, returns a copy of the original FilePath. +FilePath FilePath::RemoveExtension(const char* extension) const { + const std::string dot_extension = std::string(".") + extension; + if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) { + return FilePath(pathname_.substr( + 0, pathname_.length() - dot_extension.length())); + } + return *this; +} + +// Returns a pointer to the last occurrence of a valid path separator in +// the FilePath. On Windows, for example, both '/' and '\' are valid path +// separators. Returns NULL if no path separator was found. +const char* FilePath::FindLastPathSeparator() const { + const char* const last_sep = strrchr(c_str(), kPathSeparator); +#if GTEST_HAS_ALT_PATH_SEP_ + const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator); + // Comparing two pointers of which only one is NULL is undefined. + if (last_alt_sep != nullptr && + (last_sep == nullptr || last_alt_sep > last_sep)) { + return last_alt_sep; + } +#endif + return last_sep; +} + +// Returns a copy of the FilePath with the directory part removed. +// Example: FilePath("path/to/file").RemoveDirectoryName() returns +// FilePath("file"). If there is no directory part ("just_a_file"), it returns +// the FilePath unmodified. If there is no file part ("just_a_dir/") it +// returns an empty FilePath (""). +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveDirectoryName() const { + const char* const last_sep = FindLastPathSeparator(); + return last_sep ? FilePath(last_sep + 1) : *this; +} + +// RemoveFileName returns the directory path with the filename removed. +// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". +// If the FilePath is "a_file" or "/a_file", RemoveFileName returns +// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does +// not have a file, like "just/a/dir/", it returns the FilePath unmodified. +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveFileName() const { + const char* const last_sep = FindLastPathSeparator(); + std::string dir; + if (last_sep) { + dir = std::string(c_str(), static_cast(last_sep + 1 - c_str())); + } else { + dir = kCurrentDirectoryString; + } + return FilePath(dir); +} + +// Helper functions for naming files in a directory for xml output. + +// Given directory = "dir", base_name = "test", number = 0, +// extension = "xml", returns "dir/test.xml". If number is greater +// than zero (e.g., 12), returns "dir/test_12.xml". +// On Windows platform, uses \ as the separator rather than /. +FilePath FilePath::MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension) { + std::string file; + if (number == 0) { + file = base_name.string() + "." + extension; + } else { + file = base_name.string() + "_" + StreamableToString(number) + + "." + extension; + } + return ConcatPaths(directory, FilePath(file)); +} + +// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml". +// On Windows, uses \ as the separator rather than /. +FilePath FilePath::ConcatPaths(const FilePath& directory, + const FilePath& relative_path) { + if (directory.IsEmpty()) + return relative_path; + const FilePath dir(directory.RemoveTrailingPathSeparator()); + return FilePath(dir.string() + kPathSeparator + relative_path.string()); +} + +// Returns true if pathname describes something findable in the file-system, +// either a file, directory, or whatever. +bool FilePath::FileOrDirectoryExists() const { +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + return attributes != kInvalidFileAttributes; +#else + posix::StatStruct file_stat; + return posix::Stat(pathname_.c_str(), &file_stat) == 0; +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns true if pathname describes a directory in the file-system +// that exists. +bool FilePath::DirectoryExists() const { + bool result = false; +#if GTEST_OS_WINDOWS + // Don't strip off trailing separator if path is a root directory on + // Windows (like "C:\\"). + const FilePath& path(IsRootDirectory() ? *this : + RemoveTrailingPathSeparator()); +#else + const FilePath& path(*this); +#endif + +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(path.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + if ((attributes != kInvalidFileAttributes) && + (attributes & FILE_ATTRIBUTE_DIRECTORY)) { + result = true; + } +#else + posix::StatStruct file_stat; + result = posix::Stat(path.c_str(), &file_stat) == 0 && + posix::IsDir(file_stat); +#endif // GTEST_OS_WINDOWS_MOBILE + + return result; +} + +// Returns true if pathname describes a root directory. (Windows has one +// root directory per disk drive.) +bool FilePath::IsRootDirectory() const { +#if GTEST_OS_WINDOWS + return pathname_.length() == 3 && IsAbsolutePath(); +#else + return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]); +#endif +} + +// Returns true if pathname describes an absolute path. +bool FilePath::IsAbsolutePath() const { + const char* const name = pathname_.c_str(); +#if GTEST_OS_WINDOWS + return pathname_.length() >= 3 && + ((name[0] >= 'a' && name[0] <= 'z') || + (name[0] >= 'A' && name[0] <= 'Z')) && + name[1] == ':' && + IsPathSeparator(name[2]); +#else + return IsPathSeparator(name[0]); +#endif +} + +// Returns a pathname for a file that does not currently exist. The pathname +// will be directory/base_name.extension or +// directory/base_name_.extension if directory/base_name.extension +// already exists. The number will be incremented until a pathname is found +// that does not already exist. +// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. +// There could be a race condition if two or more processes are calling this +// function at the same time -- they could both pick the same filename. +FilePath FilePath::GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension) { + FilePath full_pathname; + int number = 0; + do { + full_pathname.Set(MakeFileName(directory, base_name, number++, extension)); + } while (full_pathname.FileOrDirectoryExists()); + return full_pathname; +} + +// Returns true if FilePath ends with a path separator, which indicates that +// it is intended to represent a directory. Returns false otherwise. +// This does NOT check that a directory (or file) actually exists. +bool FilePath::IsDirectory() const { + return !pathname_.empty() && + IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]); +} + +// Create directories so that path exists. Returns true if successful or if +// the directories already exist; returns false if unable to create directories +// for any reason. +bool FilePath::CreateDirectoriesRecursively() const { + if (!this->IsDirectory()) { + return false; + } + + if (pathname_.length() == 0 || this->DirectoryExists()) { + return true; + } + + const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName()); + return parent.CreateDirectoriesRecursively() && this->CreateFolder(); +} + +// Create the directory so that path exists. Returns true if successful or +// if the directory already exists; returns false if unable to create the +// directory for any reason, including if the parent directory does not +// exist. Not named "CreateDirectory" because that's a macro on Windows. +bool FilePath::CreateFolder() const { +#if GTEST_OS_WINDOWS_MOBILE + FilePath removed_sep(this->RemoveTrailingPathSeparator()); + LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str()); + int result = CreateDirectory(unicode, nullptr) ? 0 : -1; + delete [] unicode; +#elif GTEST_OS_WINDOWS + int result = _mkdir(pathname_.c_str()); +#elif GTEST_OS_ESP8266 + // do nothing + int result = 0; +#else + int result = mkdir(pathname_.c_str(), 0777); +#endif // GTEST_OS_WINDOWS_MOBILE + + if (result == -1) { + return this->DirectoryExists(); // An error is OK if the directory exists. + } + return true; // No error. +} + +// If input name has a trailing separator character, remove it and return the +// name, otherwise return the name string unmodified. +// On Windows platform, uses \ as the separator, other platforms use /. +FilePath FilePath::RemoveTrailingPathSeparator() const { + return IsDirectory() + ? FilePath(pathname_.substr(0, pathname_.length() - 1)) + : *this; +} + +// Removes any redundant separators that might be in the pathname. +// For example, "bar///foo" becomes "bar/foo". Does not eliminate other +// redundancies that might be in a pathname involving "." or "..". +void FilePath::Normalize() { + if (pathname_.c_str() == nullptr) { + pathname_ = ""; + return; + } + const char* src = pathname_.c_str(); + char* const dest = new char[pathname_.length() + 1]; + char* dest_ptr = dest; + memset(dest_ptr, 0, pathname_.length() + 1); + + while (*src != '\0') { + *dest_ptr = *src; + if (!IsPathSeparator(*src)) { + src++; + } else { +#if GTEST_HAS_ALT_PATH_SEP_ + if (*dest_ptr == kAlternatePathSeparator) { + *dest_ptr = kPathSeparator; + } +#endif + while (IsPathSeparator(*src)) + src++; + } + dest_ptr++; + } + *dest_ptr = '\0'; + pathname_ = dest; + delete[] dest; +} + +} // namespace internal +} // namespace testing diff --git a/source/3rdparty/gtest/src/gtest-internal-inl.h b/source/3rdparty/gtest/src/gtest-internal-inl.h new file mode 100644 index 0000000..e42ff47 --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-internal-inl.h @@ -0,0 +1,1218 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Utility functions and classes used by the Google C++ testing framework.// +// This file contains purely Google Test's internal implementation. Please +// DO NOT #INCLUDE IT IN A USER PROGRAM. + +#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_ +#define GTEST_SRC_GTEST_INTERNAL_INL_H_ + +#ifndef _WIN32_WCE +# include +#endif // !_WIN32_WCE +#include +#include // For strtoll/_strtoul64/malloc/free. +#include // For memmove. + +#include +#include +#include +#include +#include + +#include "gtest/internal/gtest-port.h" + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + +#if GTEST_OS_WINDOWS +# include // NOLINT +#endif // GTEST_OS_WINDOWS + +#include "gtest/gtest.h" +#include "gtest/gtest-spi.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +namespace testing { + +// Declares the flags. +// +// We don't want the users to modify this flag in the code, but want +// Google Test's own unit tests to be able to access it. Therefore we +// declare it here as opposed to in gtest.h. +GTEST_DECLARE_bool_(death_test_use_fork); + +namespace internal { + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest; + +// Names of the flags (needed for parsing Google Test flags). +const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests"; +const char kBreakOnFailureFlag[] = "break_on_failure"; +const char kCatchExceptionsFlag[] = "catch_exceptions"; +const char kColorFlag[] = "color"; +const char kFilterFlag[] = "filter"; +const char kListTestsFlag[] = "list_tests"; +const char kOutputFlag[] = "output"; +const char kPrintTimeFlag[] = "print_time"; +const char kPrintUTF8Flag[] = "print_utf8"; +const char kRandomSeedFlag[] = "random_seed"; +const char kRepeatFlag[] = "repeat"; +const char kShuffleFlag[] = "shuffle"; +const char kStackTraceDepthFlag[] = "stack_trace_depth"; +const char kStreamResultToFlag[] = "stream_result_to"; +const char kThrowOnFailureFlag[] = "throw_on_failure"; +const char kFlagfileFlag[] = "flagfile"; + +// A valid random seed must be in [1, kMaxRandomSeed]. +const int kMaxRandomSeed = 99999; + +// g_help_flag is true if and only if the --help flag or an equivalent form +// is specified on the command line. +GTEST_API_ extern bool g_help_flag; + +// Returns the current time in milliseconds. +GTEST_API_ TimeInMillis GetTimeInMillis(); + +// Returns true if and only if Google Test should use colors in the output. +GTEST_API_ bool ShouldUseColor(bool stdout_is_tty); + +// Formats the given time in milliseconds as seconds. +GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms); + +// Converts the given time in milliseconds to a date string in the ISO 8601 +// format, without the timezone information. N.B.: due to the use the +// non-reentrant localtime() function, this function is not thread safe. Do +// not use it in any code that can be called from multiple threads. +GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms); + +// Parses a string for an Int32 flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +GTEST_API_ bool ParseInt32Flag( + const char* str, const char* flag, int32_t* value); + +// Returns a random seed in range [1, kMaxRandomSeed] based on the +// given --gtest_random_seed flag value. +inline int GetRandomSeedFromFlag(int32_t random_seed_flag) { + const unsigned int raw_seed = (random_seed_flag == 0) ? + static_cast(GetTimeInMillis()) : + static_cast(random_seed_flag); + + // Normalizes the actual seed to range [1, kMaxRandomSeed] such that + // it's easy to type. + const int normalized_seed = + static_cast((raw_seed - 1U) % + static_cast(kMaxRandomSeed)) + 1; + return normalized_seed; +} + +// Returns the first valid random seed after 'seed'. The behavior is +// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is +// considered to be 1. +inline int GetNextRandomSeed(int seed) { + GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed) + << "Invalid random seed " << seed << " - must be in [1, " + << kMaxRandomSeed << "]."; + const int next_seed = seed + 1; + return (next_seed > kMaxRandomSeed) ? 1 : next_seed; +} + +// This class saves the values of all Google Test flags in its c'tor, and +// restores them in its d'tor. +class GTestFlagSaver { + public: + // The c'tor. + GTestFlagSaver() { + also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests); + break_on_failure_ = GTEST_FLAG(break_on_failure); + catch_exceptions_ = GTEST_FLAG(catch_exceptions); + color_ = GTEST_FLAG(color); + death_test_style_ = GTEST_FLAG(death_test_style); + death_test_use_fork_ = GTEST_FLAG(death_test_use_fork); + filter_ = GTEST_FLAG(filter); + internal_run_death_test_ = GTEST_FLAG(internal_run_death_test); + list_tests_ = GTEST_FLAG(list_tests); + output_ = GTEST_FLAG(output); + print_time_ = GTEST_FLAG(print_time); + print_utf8_ = GTEST_FLAG(print_utf8); + random_seed_ = GTEST_FLAG(random_seed); + repeat_ = GTEST_FLAG(repeat); + shuffle_ = GTEST_FLAG(shuffle); + stack_trace_depth_ = GTEST_FLAG(stack_trace_depth); + stream_result_to_ = GTEST_FLAG(stream_result_to); + throw_on_failure_ = GTEST_FLAG(throw_on_failure); + } + + // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS. + ~GTestFlagSaver() { + GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_; + GTEST_FLAG(break_on_failure) = break_on_failure_; + GTEST_FLAG(catch_exceptions) = catch_exceptions_; + GTEST_FLAG(color) = color_; + GTEST_FLAG(death_test_style) = death_test_style_; + GTEST_FLAG(death_test_use_fork) = death_test_use_fork_; + GTEST_FLAG(filter) = filter_; + GTEST_FLAG(internal_run_death_test) = internal_run_death_test_; + GTEST_FLAG(list_tests) = list_tests_; + GTEST_FLAG(output) = output_; + GTEST_FLAG(print_time) = print_time_; + GTEST_FLAG(print_utf8) = print_utf8_; + GTEST_FLAG(random_seed) = random_seed_; + GTEST_FLAG(repeat) = repeat_; + GTEST_FLAG(shuffle) = shuffle_; + GTEST_FLAG(stack_trace_depth) = stack_trace_depth_; + GTEST_FLAG(stream_result_to) = stream_result_to_; + GTEST_FLAG(throw_on_failure) = throw_on_failure_; + } + + private: + // Fields for saving the original values of flags. + bool also_run_disabled_tests_; + bool break_on_failure_; + bool catch_exceptions_; + std::string color_; + std::string death_test_style_; + bool death_test_use_fork_; + std::string filter_; + std::string internal_run_death_test_; + bool list_tests_; + std::string output_; + bool print_time_; + bool print_utf8_; + int32_t random_seed_; + int32_t repeat_; + bool shuffle_; + int32_t stack_trace_depth_; + std::string stream_result_to_; + bool throw_on_failure_; +} GTEST_ATTRIBUTE_UNUSED_; + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +GTEST_API_ std::string CodePointToUtf8(uint32_t code_point); + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars); + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded(); + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (e.g., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +GTEST_API_ bool ShouldShard(const char* total_shards_str, + const char* shard_index_str, + bool in_subprocess_for_death_test); + +// Parses the environment variable var as a 32-bit integer. If it is unset, +// returns default_val. If it is not a 32-bit integer, prints an error and +// and aborts. +GTEST_API_ int32_t Int32FromEnvOrDie(const char* env_var, int32_t default_val); + +// Given the total number of shards, the shard index, and the test id, +// returns true if and only if the test should be run on this shard. The test id +// is some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +GTEST_API_ bool ShouldRunTestOnShard( + int total_shards, int shard_index, int test_id); + +// STL container utilities. + +// Returns the number of elements in the given container that satisfy +// the given predicate. +template +inline int CountIf(const Container& c, Predicate predicate) { + // Implemented as an explicit loop since std::count_if() in libCstd on + // Solaris has a non-standard signature. + int count = 0; + for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { + if (predicate(*it)) + ++count; + } + return count; +} + +// Applies a function/functor to each element in the container. +template +void ForEach(const Container& c, Functor functor) { + std::for_each(c.begin(), c.end(), functor); +} + +// Returns the i-th element of the vector, or default_value if i is not +// in range [0, v.size()). +template +inline E GetElementOr(const std::vector& v, int i, E default_value) { + return (i < 0 || i >= static_cast(v.size())) ? default_value + : v[static_cast(i)]; +} + +// Performs an in-place shuffle of a range of the vector's elements. +// 'begin' and 'end' are element indices as an STL-style range; +// i.e. [begin, end) are shuffled, where 'end' == size() means to +// shuffle to the end of the vector. +template +void ShuffleRange(internal::Random* random, int begin, int end, + std::vector* v) { + const int size = static_cast(v->size()); + GTEST_CHECK_(0 <= begin && begin <= size) + << "Invalid shuffle range start " << begin << ": must be in range [0, " + << size << "]."; + GTEST_CHECK_(begin <= end && end <= size) + << "Invalid shuffle range finish " << end << ": must be in range [" + << begin << ", " << size << "]."; + + // Fisher-Yates shuffle, from + // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle + for (int range_width = end - begin; range_width >= 2; range_width--) { + const int last_in_range = begin + range_width - 1; + const int selected = + begin + + static_cast(random->Generate(static_cast(range_width))); + std::swap((*v)[static_cast(selected)], + (*v)[static_cast(last_in_range)]); + } +} + +// Performs an in-place shuffle of the vector's elements. +template +inline void Shuffle(internal::Random* random, std::vector* v) { + ShuffleRange(random, 0, static_cast(v->size()), v); +} + +// A function for deleting an object. Handy for being used as a +// functor. +template +static void Delete(T* x) { + delete x; +} + +// A predicate that checks the key of a TestProperty against a known key. +// +// TestPropertyKeyIs is copyable. +class TestPropertyKeyIs { + public: + // Constructor. + // + // TestPropertyKeyIs has NO default constructor. + explicit TestPropertyKeyIs(const std::string& key) : key_(key) {} + + // Returns true if and only if the test name of test property matches on key_. + bool operator()(const TestProperty& test_property) const { + return test_property.key() == key_; + } + + private: + std::string key_; +}; + +// Class UnitTestOptions. +// +// This class contains functions for processing options the user +// specifies when running the tests. It has only static members. +// +// In most cases, the user can specify an option using either an +// environment variable or a command line flag. E.g. you can set the +// test filter using either GTEST_FILTER or --gtest_filter. If both +// the variable and the flag are present, the latter overrides the +// former. +class GTEST_API_ UnitTestOptions { + public: + // Functions for processing the gtest_output flag. + + // Returns the output format, or "" for normal printed output. + static std::string GetOutputFormat(); + + // Returns the absolute path of the requested output file, or the + // default (test_detail.xml in the original working directory) if + // none was explicitly specified. + static std::string GetAbsolutePathToOutputFile(); + + // Functions for processing the gtest_filter flag. + + // Returns true if and only if the wildcard pattern matches the string. + // The first ':' or '\0' character in pattern marks the end of it. + // + // This recursive algorithm isn't very efficient, but is clear and + // works well enough for matching test names, which are short. + static bool PatternMatchesString(const char *pattern, const char *str); + + // Returns true if and only if the user-specified filter matches the test + // suite name and the test name. + static bool FilterMatchesTest(const std::string& test_suite_name, + const std::string& test_name); + +#if GTEST_OS_WINDOWS + // Function for supporting the gtest_catch_exception flag. + + // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the + // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. + // This function is useful as an __except condition. + static int GTestShouldProcessSEH(DWORD exception_code); +#endif // GTEST_OS_WINDOWS + + // Returns true if "name" matches the ':' separated list of glob-style + // filters in "filter". + static bool MatchesFilter(const std::string& name, const char* filter); +}; + +// Returns the current application's name, removing directory path if that +// is present. Used by UnitTestOptions::GetOutputFile. +GTEST_API_ FilePath GetCurrentExecutableName(); + +// The role interface for getting the OS stack trace as a string. +class OsStackTraceGetterInterface { + public: + OsStackTraceGetterInterface() {} + virtual ~OsStackTraceGetterInterface() {} + + // Returns the current OS stack trace as an std::string. Parameters: + // + // max_depth - the maximum number of stack frames to be included + // in the trace. + // skip_count - the number of top frames to be skipped; doesn't count + // against max_depth. + virtual std::string CurrentStackTrace(int max_depth, int skip_count) = 0; + + // UponLeavingGTest() should be called immediately before Google Test calls + // user code. It saves some information about the current stack that + // CurrentStackTrace() will use to find and hide Google Test stack frames. + virtual void UponLeavingGTest() = 0; + + // This string is inserted in place of stack frames that are part of + // Google Test's implementation. + static const char* const kElidedFramesMarker; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface); +}; + +// A working implementation of the OsStackTraceGetterInterface interface. +class OsStackTraceGetter : public OsStackTraceGetterInterface { + public: + OsStackTraceGetter() {} + + std::string CurrentStackTrace(int max_depth, int skip_count) override; + void UponLeavingGTest() override; + + private: +#if GTEST_HAS_ABSL + Mutex mutex_; // Protects all internal state. + + // We save the stack frame below the frame that calls user code. + // We do this because the address of the frame immediately below + // the user code changes between the call to UponLeavingGTest() + // and any calls to the stack trace code from within the user code. + void* caller_frame_ = nullptr; +#endif // GTEST_HAS_ABSL + + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter); +}; + +// Information about a Google Test trace point. +struct TraceInfo { + const char* file; + int line; + std::string message; +}; + +// This is the default global test part result reporter used in UnitTestImpl. +// This class should only be used by UnitTestImpl. +class DefaultGlobalTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. Reports the test part + // result in the current test. + void ReportTestPartResult(const TestPartResult& result) override; + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter); +}; + +// This is the default per thread test part result reporter used in +// UnitTestImpl. This class should only be used by UnitTestImpl. +class DefaultPerThreadTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. The implementation just + // delegates to the current global test part result reporter of *unit_test_. + void ReportTestPartResult(const TestPartResult& result) override; + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter); +}; + +// The private implementation of the UnitTest class. We don't protect +// the methods under a mutex, as this class is not accessible by a +// user and the UnitTest class that delegates work to this class does +// proper locking. +class GTEST_API_ UnitTestImpl { + public: + explicit UnitTestImpl(UnitTest* parent); + virtual ~UnitTestImpl(); + + // There are two different ways to register your own TestPartResultReporter. + // You can register your own repoter to listen either only for test results + // from the current thread or for results from all threads. + // By default, each per-thread test result repoter just passes a new + // TestPartResult to the global test result reporter, which registers the + // test part result for the currently running test. + + // Returns the global test part result reporter. + TestPartResultReporterInterface* GetGlobalTestPartResultReporter(); + + // Sets the global test part result reporter. + void SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter); + + // Returns the test part result reporter for the current thread. + TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread(); + + // Sets the test part result reporter for the current thread. + void SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter); + + // Gets the number of successful test suites. + int successful_test_suite_count() const; + + // Gets the number of failed test suites. + int failed_test_suite_count() const; + + // Gets the number of all test suites. + int total_test_suite_count() const; + + // Gets the number of all test suites that contain at least one test + // that should run. + int test_suite_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of skipped tests. + int skipped_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const { return start_timestamp_; } + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns true if and only if the unit test passed (i.e. all test suites + // passed). + bool Passed() const { return !Failed(); } + + // Returns true if and only if the unit test failed (i.e. some test suite + // failed or something outside of all tests failed). + bool Failed() const { + return failed_test_suite_count() > 0 || ad_hoc_test_result()->Failed(); + } + + // Gets the i-th test suite among all the test suites. i can range from 0 to + // total_test_suite_count() - 1. If i is not in that range, returns NULL. + const TestSuite* GetTestSuite(int i) const { + const int index = GetElementOr(test_suite_indices_, i, -1); + return index < 0 ? nullptr : test_suites_[static_cast(i)]; + } + + // Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + const TestCase* GetTestCase(int i) const { return GetTestSuite(i); } +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + // Gets the i-th test suite among all the test suites. i can range from 0 to + // total_test_suite_count() - 1. If i is not in that range, returns NULL. + TestSuite* GetMutableSuiteCase(int i) { + const int index = GetElementOr(test_suite_indices_, i, -1); + return index < 0 ? nullptr : test_suites_[static_cast(index)]; + } + + // Provides access to the event listener list. + TestEventListeners* listeners() { return &listeners_; } + + // Returns the TestResult for the test that's currently running, or + // the TestResult for the ad hoc test if no test is running. + TestResult* current_test_result(); + + // Returns the TestResult for the ad hoc test. + const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; } + + // Sets the OS stack trace getter. + // + // Does nothing if the input and the current OS stack trace getter + // are the same; otherwise, deletes the old getter and makes the + // input the current getter. + void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter); + + // Returns the current OS stack trace getter if it is not NULL; + // otherwise, creates an OsStackTraceGetter, makes it the current + // getter, and returns it. + OsStackTraceGetterInterface* os_stack_trace_getter(); + + // Returns the current OS stack trace as an std::string. + // + // The maximum number of stack frames to be included is specified by + // the gtest_stack_trace_depth flag. The skip_count parameter + // specifies the number of top frames to be skipped, which doesn't + // count against the number of frames to be included. + // + // For example, if Foo() calls Bar(), which in turn calls + // CurrentOsStackTraceExceptTop(1), Foo() will be included in the + // trace but Bar() and CurrentOsStackTraceExceptTop() won't. + std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_; + + // Finds and returns a TestSuite with the given name. If one doesn't + // exist, creates one and returns it. + // + // Arguments: + // + // test_suite_name: name of the test suite + // type_param: the name of the test's type parameter, or NULL if + // this is not a typed or a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test suite + // tear_down_tc: pointer to the function that tears down the test suite + TestSuite* GetTestSuite(const char* test_suite_name, const char* type_param, + internal::SetUpTestSuiteFunc set_up_tc, + internal::TearDownTestSuiteFunc tear_down_tc); + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + TestCase* GetTestCase(const char* test_case_name, const char* type_param, + internal::SetUpTestSuiteFunc set_up_tc, + internal::TearDownTestSuiteFunc tear_down_tc) { + return GetTestSuite(test_case_name, type_param, set_up_tc, tear_down_tc); + } +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + // Adds a TestInfo to the unit test. + // + // Arguments: + // + // set_up_tc: pointer to the function that sets up the test suite + // tear_down_tc: pointer to the function that tears down the test suite + // test_info: the TestInfo object + void AddTestInfo(internal::SetUpTestSuiteFunc set_up_tc, + internal::TearDownTestSuiteFunc tear_down_tc, + TestInfo* test_info) { + // In order to support thread-safe death tests, we need to + // remember the original working directory when the test program + // was first invoked. We cannot do this in RUN_ALL_TESTS(), as + // the user may have changed the current directory before calling + // RUN_ALL_TESTS(). Therefore we capture the current directory in + // AddTestInfo(), which is called to register a TEST or TEST_F + // before main() is reached. + if (original_working_dir_.IsEmpty()) { + original_working_dir_.Set(FilePath::GetCurrentDir()); + GTEST_CHECK_(!original_working_dir_.IsEmpty()) + << "Failed to get the current working directory."; + } + + GetTestSuite(test_info->test_suite_name(), test_info->type_param(), + set_up_tc, tear_down_tc) + ->AddTestInfo(test_info); + } + + // Returns ParameterizedTestSuiteRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + internal::ParameterizedTestSuiteRegistry& parameterized_test_registry() { + return parameterized_test_registry_; + } + + std::set* ignored_parameterized_test_suites() { + return &ignored_parameterized_test_suites_; + } + + // Returns TypeParameterizedTestSuiteRegistry object used to keep track of + // type-parameterized tests and instantiations of them. + internal::TypeParameterizedTestSuiteRegistry& + type_parameterized_test_registry() { + return type_parameterized_test_registry_; + } + + // Sets the TestSuite object for the test that's currently running. + void set_current_test_suite(TestSuite* a_current_test_suite) { + current_test_suite_ = a_current_test_suite; + } + + // Sets the TestInfo object for the test that's currently running. If + // current_test_info is NULL, the assertion results will be stored in + // ad_hoc_test_result_. + void set_current_test_info(TestInfo* a_current_test_info) { + current_test_info_ = a_current_test_info; + } + + // Registers all parameterized tests defined using TEST_P and + // INSTANTIATE_TEST_SUITE_P, creating regular tests for each test/parameter + // combination. This method can be called more then once; it has guards + // protecting from registering the tests more then once. If + // value-parameterized tests are disabled, RegisterParameterizedTests is + // present but does nothing. + void RegisterParameterizedTests(); + + // Runs all tests in this UnitTest object, prints the result, and + // returns true if all tests are successful. If any exception is + // thrown during a test, this test is considered to be failed, but + // the rest of the tests will still be run. + bool RunAllTests(); + + // Clears the results of all tests, except the ad hoc tests. + void ClearNonAdHocTestResult() { + ForEach(test_suites_, TestSuite::ClearTestSuiteResult); + } + + // Clears the results of ad-hoc test assertions. + void ClearAdHocTestResult() { + ad_hoc_test_result_.Clear(); + } + + // Adds a TestProperty to the current TestResult object when invoked in a + // context of a test or a test suite, or to the global property set. If the + // result already contains a property with the same key, the value will be + // updated. + void RecordProperty(const TestProperty& test_property); + + enum ReactionToSharding { + HONOR_SHARDING_PROTOCOL, + IGNORE_SHARDING_PROTOCOL + }; + + // Matches the full name of each test against the user-specified + // filter to decide whether the test should run, then records the + // result in each TestSuite and TestInfo object. + // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests + // based on sharding variables in the environment. + // Returns the number of tests that should run. + int FilterTests(ReactionToSharding shard_tests); + + // Prints the names of the tests matching the user-specified filter flag. + void ListTestsMatchingFilter(); + + const TestSuite* current_test_suite() const { return current_test_suite_; } + TestInfo* current_test_info() { return current_test_info_; } + const TestInfo* current_test_info() const { return current_test_info_; } + + // Returns the vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector& environments() { return environments_; } + + // Getters for the per-thread Google Test trace stack. + std::vector& gtest_trace_stack() { + return *(gtest_trace_stack_.pointer()); + } + const std::vector& gtest_trace_stack() const { + return gtest_trace_stack_.get(); + } + +#if GTEST_HAS_DEATH_TEST + void InitDeathTestSubprocessControlInfo() { + internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag()); + } + // Returns a pointer to the parsed --gtest_internal_run_death_test + // flag, or NULL if that flag was not specified. + // This information is useful only in a death test child process. + // Must not be called before a call to InitGoogleTest. + const InternalRunDeathTestFlag* internal_run_death_test_flag() const { + return internal_run_death_test_flag_.get(); + } + + // Returns a pointer to the current death test factory. + internal::DeathTestFactory* death_test_factory() { + return death_test_factory_.get(); + } + + void SuppressTestEventsIfInSubprocess(); + + friend class ReplaceDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + + // Initializes the event listener performing XML output as specified by + // UnitTestOptions. Must not be called before InitGoogleTest. + void ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Initializes the event listener for streaming test results to a socket. + // Must not be called before InitGoogleTest. + void ConfigureStreamingOutput(); +#endif + + // Performs initialization dependent upon flag values obtained in + // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to + // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest + // this function is also called from RunAllTests. Since this function can be + // called more than once, it has to be idempotent. + void PostFlagParsingInit(); + + // Gets the random seed used at the start of the current test iteration. + int random_seed() const { return random_seed_; } + + // Gets the random number generator. + internal::Random* random() { return &random_; } + + // Shuffles all test suites, and the tests within each test suite, + // making sure that death tests are still run first. + void ShuffleTests(); + + // Restores the test suites and tests to their order before the first shuffle. + void UnshuffleTests(); + + // Returns the value of GTEST_FLAG(catch_exceptions) at the moment + // UnitTest::Run() starts. + bool catch_exceptions() const { return catch_exceptions_; } + + private: + friend class ::testing::UnitTest; + + // Used by UnitTest::Run() to capture the state of + // GTEST_FLAG(catch_exceptions) at the moment it starts. + void set_catch_exceptions(bool value) { catch_exceptions_ = value; } + + // The UnitTest object that owns this implementation object. + UnitTest* const parent_; + + // The working directory when the first TEST() or TEST_F() was + // executed. + internal::FilePath original_working_dir_; + + // The default test part result reporters. + DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_; + DefaultPerThreadTestPartResultReporter + default_per_thread_test_part_result_reporter_; + + // Points to (but doesn't own) the global test part result reporter. + TestPartResultReporterInterface* global_test_part_result_repoter_; + + // Protects read and write access to global_test_part_result_reporter_. + internal::Mutex global_test_part_result_reporter_mutex_; + + // Points to (but doesn't own) the per-thread test part result reporter. + internal::ThreadLocal + per_thread_test_part_result_reporter_; + + // The vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector environments_; + + // The vector of TestSuites in their original order. It owns the + // elements in the vector. + std::vector test_suites_; + + // Provides a level of indirection for the test suite list to allow + // easy shuffling and restoring the test suite order. The i-th + // element of this vector is the index of the i-th test suite in the + // shuffled order. + std::vector test_suite_indices_; + + // ParameterizedTestRegistry object used to register value-parameterized + // tests. + internal::ParameterizedTestSuiteRegistry parameterized_test_registry_; + internal::TypeParameterizedTestSuiteRegistry + type_parameterized_test_registry_; + + // The set holding the name of parameterized + // test suites that may go uninstantiated. + std::set ignored_parameterized_test_suites_; + + // Indicates whether RegisterParameterizedTests() has been called already. + bool parameterized_tests_registered_; + + // Index of the last death test suite registered. Initially -1. + int last_death_test_suite_; + + // This points to the TestSuite for the currently running test. It + // changes as Google Test goes through one test suite after another. + // When no test is running, this is set to NULL and Google Test + // stores assertion results in ad_hoc_test_result_. Initially NULL. + TestSuite* current_test_suite_; + + // This points to the TestInfo for the currently running test. It + // changes as Google Test goes through one test after another. When + // no test is running, this is set to NULL and Google Test stores + // assertion results in ad_hoc_test_result_. Initially NULL. + TestInfo* current_test_info_; + + // Normally, a user only writes assertions inside a TEST or TEST_F, + // or inside a function called by a TEST or TEST_F. Since Google + // Test keeps track of which test is current running, it can + // associate such an assertion with the test it belongs to. + // + // If an assertion is encountered when no TEST or TEST_F is running, + // Google Test attributes the assertion result to an imaginary "ad hoc" + // test, and records the result in ad_hoc_test_result_. + TestResult ad_hoc_test_result_; + + // The list of event listeners that can be used to track events inside + // Google Test. + TestEventListeners listeners_; + + // The OS stack trace getter. Will be deleted when the UnitTest + // object is destructed. By default, an OsStackTraceGetter is used, + // but the user can set this field to use a custom getter if that is + // desired. + OsStackTraceGetterInterface* os_stack_trace_getter_; + + // True if and only if PostFlagParsingInit() has been called. + bool post_flag_parse_init_performed_; + + // The random number seed used at the beginning of the test run. + int random_seed_; + + // Our random number generator. + internal::Random random_; + + // The time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp_; + + // How long the test took to run, in milliseconds. + TimeInMillis elapsed_time_; + +#if GTEST_HAS_DEATH_TEST + // The decomposed components of the gtest_internal_run_death_test flag, + // parsed when RUN_ALL_TESTS is called. + std::unique_ptr internal_run_death_test_flag_; + std::unique_ptr death_test_factory_; +#endif // GTEST_HAS_DEATH_TEST + + // A per-thread stack of traces created by the SCOPED_TRACE() macro. + internal::ThreadLocal > gtest_trace_stack_; + + // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests() + // starts. + bool catch_exceptions_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl); +}; // class UnitTestImpl + +// Convenience function for accessing the global UnitTest +// implementation object. +inline UnitTestImpl* GetUnitTestImpl() { + return UnitTest::GetInstance()->impl(); +} + +#if GTEST_USES_SIMPLE_RE + +// Internal helper functions for implementing the simple regular +// expression matcher. +GTEST_API_ bool IsInSet(char ch, const char* str); +GTEST_API_ bool IsAsciiDigit(char ch); +GTEST_API_ bool IsAsciiPunct(char ch); +GTEST_API_ bool IsRepeat(char ch); +GTEST_API_ bool IsAsciiWhiteSpace(char ch); +GTEST_API_ bool IsAsciiWordChar(char ch); +GTEST_API_ bool IsValidEscape(char ch); +GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch); +GTEST_API_ bool ValidateRegex(const char* regex); +GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str); +GTEST_API_ bool MatchRepetitionAndRegexAtHead( + bool escaped, char ch, char repeat, const char* regex, const char* str); +GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str); + +#endif // GTEST_USES_SIMPLE_RE + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv); +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv); + +#if GTEST_HAS_DEATH_TEST + +// Returns the message describing the last system error, regardless of the +// platform. +GTEST_API_ std::string GetLastErrnoDescription(); + +// Attempts to parse a string into a positive integer pointed to by the +// number parameter. Returns true if that is possible. +// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use +// it here. +template +bool ParseNaturalNumber(const ::std::string& str, Integer* number) { + // Fail fast if the given string does not begin with a digit; + // this bypasses strtoXXX's "optional leading whitespace and plus + // or minus sign" semantics, which are undesirable here. + if (str.empty() || !IsDigit(str[0])) { + return false; + } + errno = 0; + + char* end; + // BiggestConvertible is the largest integer type that system-provided + // string-to-number conversion routines can return. + using BiggestConvertible = unsigned long long; // NOLINT + + const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10); // NOLINT + const bool parse_success = *end == '\0' && errno == 0; + + GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed)); + + const Integer result = static_cast(parsed); + if (parse_success && static_cast(result) == parsed) { + *number = result; + return true; + } + return false; +} +#endif // GTEST_HAS_DEATH_TEST + +// TestResult contains some private methods that should be hidden from +// Google Test user but are required for testing. This class allow our tests +// to access them. +// +// This class is supplied only for the purpose of testing Google Test's own +// constructs. Do not use it in user tests, either directly or indirectly. +class TestResultAccessor { + public: + static void RecordProperty(TestResult* test_result, + const std::string& xml_element, + const TestProperty& property) { + test_result->RecordProperty(xml_element, property); + } + + static void ClearTestPartResults(TestResult* test_result) { + test_result->ClearTestPartResults(); + } + + static const std::vector& test_part_results( + const TestResult& test_result) { + return test_result.test_part_results(); + } +}; + +#if GTEST_CAN_STREAM_RESULTS_ + +// Streams test results to the given port on the given host machine. +class StreamingListener : public EmptyTestEventListener { + public: + // Abstract base class for writing strings to a socket. + class AbstractSocketWriter { + public: + virtual ~AbstractSocketWriter() {} + + // Sends a string to the socket. + virtual void Send(const std::string& message) = 0; + + // Closes the socket. + virtual void CloseConnection() {} + + // Sends a string and a newline to the socket. + void SendLn(const std::string& message) { Send(message + "\n"); } + }; + + // Concrete class for actually writing strings to a socket. + class SocketWriter : public AbstractSocketWriter { + public: + SocketWriter(const std::string& host, const std::string& port) + : sockfd_(-1), host_name_(host), port_num_(port) { + MakeConnection(); + } + + ~SocketWriter() override { + if (sockfd_ != -1) + CloseConnection(); + } + + // Sends a string to the socket. + void Send(const std::string& message) override { + GTEST_CHECK_(sockfd_ != -1) + << "Send() can be called only when there is a connection."; + + const auto len = static_cast(message.length()); + if (write(sockfd_, message.c_str(), len) != static_cast(len)) { + GTEST_LOG_(WARNING) + << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; + } + } + + private: + // Creates a client socket and connects to the server. + void MakeConnection(); + + // Closes the socket. + void CloseConnection() override { + GTEST_CHECK_(sockfd_ != -1) + << "CloseConnection() can be called only when there is a connection."; + + close(sockfd_); + sockfd_ = -1; + } + + int sockfd_; // socket file descriptor + const std::string host_name_; + const std::string port_num_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter); + }; // class SocketWriter + + // Escapes '=', '&', '%', and '\n' characters in str as "%xx". + static std::string UrlEncode(const char* str); + + StreamingListener(const std::string& host, const std::string& port) + : socket_writer_(new SocketWriter(host, port)) { + Start(); + } + + explicit StreamingListener(AbstractSocketWriter* socket_writer) + : socket_writer_(socket_writer) { Start(); } + + void OnTestProgramStart(const UnitTest& /* unit_test */) override { + SendLn("event=TestProgramStart"); + } + + void OnTestProgramEnd(const UnitTest& unit_test) override { + // Note that Google Test current only report elapsed time for each + // test iteration, not for the entire test program. + SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed())); + + // Notify the streaming server to stop. + socket_writer_->CloseConnection(); + } + + void OnTestIterationStart(const UnitTest& /* unit_test */, + int iteration) override { + SendLn("event=TestIterationStart&iteration=" + + StreamableToString(iteration)); + } + + void OnTestIterationEnd(const UnitTest& unit_test, + int /* iteration */) override { + SendLn("event=TestIterationEnd&passed=" + + FormatBool(unit_test.Passed()) + "&elapsed_time=" + + StreamableToString(unit_test.elapsed_time()) + "ms"); + } + + // Note that "event=TestCaseStart" is a wire format and has to remain + // "case" for compatibilty + void OnTestCaseStart(const TestCase& test_case) override { + SendLn(std::string("event=TestCaseStart&name=") + test_case.name()); + } + + // Note that "event=TestCaseEnd" is a wire format and has to remain + // "case" for compatibilty + void OnTestCaseEnd(const TestCase& test_case) override { + SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) + + "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) + + "ms"); + } + + void OnTestStart(const TestInfo& test_info) override { + SendLn(std::string("event=TestStart&name=") + test_info.name()); + } + + void OnTestEnd(const TestInfo& test_info) override { + SendLn("event=TestEnd&passed=" + + FormatBool((test_info.result())->Passed()) + + "&elapsed_time=" + + StreamableToString((test_info.result())->elapsed_time()) + "ms"); + } + + void OnTestPartResult(const TestPartResult& test_part_result) override { + const char* file_name = test_part_result.file_name(); + if (file_name == nullptr) file_name = ""; + SendLn("event=TestPartResult&file=" + UrlEncode(file_name) + + "&line=" + StreamableToString(test_part_result.line_number()) + + "&message=" + UrlEncode(test_part_result.message())); + } + + private: + // Sends the given message and a newline to the socket. + void SendLn(const std::string& message) { socket_writer_->SendLn(message); } + + // Called at the start of streaming to notify the receiver what + // protocol we are using. + void Start() { SendLn("gtest_streaming_protocol_version=1.0"); } + + std::string FormatBool(bool value) { return value ? "1" : "0"; } + + const std::unique_ptr socket_writer_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); +}; // class StreamingListener + +#endif // GTEST_CAN_STREAM_RESULTS_ + +} // namespace internal +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_ diff --git a/source/3rdparty/gtest/src/gtest-matchers.cc b/source/3rdparty/gtest/src/gtest-matchers.cc new file mode 100644 index 0000000..7d2fb68 --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-matchers.cc @@ -0,0 +1,97 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This file implements just enough of the matcher interface to allow +// EXPECT_DEATH and friends to accept a matcher argument. + +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" +#include "gtest/gtest-matchers.h" + +#include + +namespace testing { + +// Constructs a matcher that matches a const std::string& whose value is +// equal to s. +Matcher::Matcher(const std::string& s) { *this = Eq(s); } + +// Constructs a matcher that matches a const std::string& whose value is +// equal to s. +Matcher::Matcher(const char* s) { + *this = Eq(std::string(s)); +} + +// Constructs a matcher that matches a std::string whose value is equal to +// s. +Matcher::Matcher(const std::string& s) { *this = Eq(s); } + +// Constructs a matcher that matches a std::string whose value is equal to +// s. +Matcher::Matcher(const char* s) { *this = Eq(std::string(s)); } + +#if GTEST_HAS_ABSL +// Constructs a matcher that matches a const absl::string_view& whose value is +// equal to s. +Matcher::Matcher(const std::string& s) { + *this = Eq(s); +} + +// Constructs a matcher that matches a const absl::string_view& whose value is +// equal to s. +Matcher::Matcher(const char* s) { + *this = Eq(std::string(s)); +} + +// Constructs a matcher that matches a const absl::string_view& whose value is +// equal to s. +Matcher::Matcher(absl::string_view s) { + *this = Eq(std::string(s)); +} + +// Constructs a matcher that matches a absl::string_view whose value is equal to +// s. +Matcher::Matcher(const std::string& s) { *this = Eq(s); } + +// Constructs a matcher that matches a absl::string_view whose value is equal to +// s. +Matcher::Matcher(const char* s) { + *this = Eq(std::string(s)); +} + +// Constructs a matcher that matches a absl::string_view whose value is equal to +// s. +Matcher::Matcher(absl::string_view s) { + *this = Eq(std::string(s)); +} +#endif // GTEST_HAS_ABSL + +} // namespace testing diff --git a/source/3rdparty/gtest/src/gtest-port.cc b/source/3rdparty/gtest/src/gtest-port.cc new file mode 100644 index 0000000..a05c50a --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-port.cc @@ -0,0 +1,1403 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include "gtest/internal/gtest-port.h" + +#include +#include +#include +#include +#include +#include +#include + +#if GTEST_OS_WINDOWS +# include +# include +# include +# include // Used in ThreadLocal. +# ifdef _MSC_VER +# include +# endif // _MSC_VER +#else +# include +#endif // GTEST_OS_WINDOWS + +#if GTEST_OS_MAC +# include +# include +# include +#endif // GTEST_OS_MAC + +#if GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD || \ + GTEST_OS_NETBSD || GTEST_OS_OPENBSD +# include +# if GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD +# include +# endif +#endif + +#if GTEST_OS_QNX +# include +# include +# include +#endif // GTEST_OS_QNX + +#if GTEST_OS_AIX +# include +# include +#endif // GTEST_OS_AIX + +#if GTEST_OS_FUCHSIA +# include +# include +#endif // GTEST_OS_FUCHSIA + +#include "gtest/gtest-spi.h" +#include "gtest/gtest-message.h" +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-string.h" +#include "src/gtest-internal-inl.h" + +namespace testing { +namespace internal { + +#if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC and C++Builder do not provide a definition of STDERR_FILENO. +const int kStdOutFileno = 1; +const int kStdErrFileno = 2; +#else +const int kStdOutFileno = STDOUT_FILENO; +const int kStdErrFileno = STDERR_FILENO; +#endif // _MSC_VER + +#if GTEST_OS_LINUX + +namespace { +template +T ReadProcFileField(const std::string& filename, int field) { + std::string dummy; + std::ifstream file(filename.c_str()); + while (field-- > 0) { + file >> dummy; + } + T output = 0; + file >> output; + return output; +} +} // namespace + +// Returns the number of active threads, or 0 when there is an error. +size_t GetThreadCount() { + const std::string filename = + (Message() << "/proc/" << getpid() << "/stat").GetString(); + return ReadProcFileField(filename, 19); +} + +#elif GTEST_OS_MAC + +size_t GetThreadCount() { + const task_t task = mach_task_self(); + mach_msg_type_number_t thread_count; + thread_act_array_t thread_list; + const kern_return_t status = task_threads(task, &thread_list, &thread_count); + if (status == KERN_SUCCESS) { + // task_threads allocates resources in thread_list and we need to free them + // to avoid leaks. + vm_deallocate(task, + reinterpret_cast(thread_list), + sizeof(thread_t) * thread_count); + return static_cast(thread_count); + } else { + return 0; + } +} + +#elif GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD || \ + GTEST_OS_NETBSD + +#if GTEST_OS_NETBSD +#undef KERN_PROC +#define KERN_PROC KERN_PROC2 +#define kinfo_proc kinfo_proc2 +#endif + +#if GTEST_OS_DRAGONFLY +#define KP_NLWP(kp) (kp.kp_nthreads) +#elif GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD +#define KP_NLWP(kp) (kp.ki_numthreads) +#elif GTEST_OS_NETBSD +#define KP_NLWP(kp) (kp.p_nlwps) +#endif + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + int mib[] = { + CTL_KERN, + KERN_PROC, + KERN_PROC_PID, + getpid(), +#if GTEST_OS_NETBSD + sizeof(struct kinfo_proc), + 1, +#endif + }; + u_int miblen = sizeof(mib) / sizeof(mib[0]); + struct kinfo_proc info; + size_t size = sizeof(info); + if (sysctl(mib, miblen, &info, &size, NULL, 0)) { + return 0; + } + return static_cast(KP_NLWP(info)); +} +#elif GTEST_OS_OPENBSD + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + int mib[] = { + CTL_KERN, + KERN_PROC, + KERN_PROC_PID | KERN_PROC_SHOW_THREADS, + getpid(), + sizeof(struct kinfo_proc), + 0, + }; + u_int miblen = sizeof(mib) / sizeof(mib[0]); + + // get number of structs + size_t size; + if (sysctl(mib, miblen, NULL, &size, NULL, 0)) { + return 0; + } + mib[5] = size / mib[4]; + + // populate array of structs + struct kinfo_proc info[mib[5]]; + if (sysctl(mib, miblen, &info, &size, NULL, 0)) { + return 0; + } + + // exclude empty members + int nthreads = 0; + for (int i = 0; i < size / mib[4]; i++) { + if (info[i].p_tid != -1) + nthreads++; + } + return nthreads; +} + +#elif GTEST_OS_QNX + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const int fd = open("/proc/self/as", O_RDONLY); + if (fd < 0) { + return 0; + } + procfs_info process_info; + const int status = + devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), nullptr); + close(fd); + if (status == EOK) { + return static_cast(process_info.num_threads); + } else { + return 0; + } +} + +#elif GTEST_OS_AIX + +size_t GetThreadCount() { + struct procentry64 entry; + pid_t pid = getpid(); + int status = getprocs64(&entry, sizeof(entry), nullptr, 0, &pid, 1); + if (status == 1) { + return entry.pi_thcount; + } else { + return 0; + } +} + +#elif GTEST_OS_FUCHSIA + +size_t GetThreadCount() { + int dummy_buffer; + size_t avail; + zx_status_t status = zx_object_get_info( + zx_process_self(), + ZX_INFO_PROCESS_THREADS, + &dummy_buffer, + 0, + nullptr, + &avail); + if (status == ZX_OK) { + return avail; + } else { + return 0; + } +} + +#else + +size_t GetThreadCount() { + // There's no portable way to detect the number of threads, so we just + // return 0 to indicate that we cannot detect it. + return 0; +} + +#endif // GTEST_OS_LINUX + +#if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS + +void SleepMilliseconds(int n) { + ::Sleep(static_cast(n)); +} + +AutoHandle::AutoHandle() + : handle_(INVALID_HANDLE_VALUE) {} + +AutoHandle::AutoHandle(Handle handle) + : handle_(handle) {} + +AutoHandle::~AutoHandle() { + Reset(); +} + +AutoHandle::Handle AutoHandle::Get() const { + return handle_; +} + +void AutoHandle::Reset() { + Reset(INVALID_HANDLE_VALUE); +} + +void AutoHandle::Reset(HANDLE handle) { + // Resetting with the same handle we already own is invalid. + if (handle_ != handle) { + if (IsCloseable()) { + ::CloseHandle(handle_); + } + handle_ = handle; + } else { + GTEST_CHECK_(!IsCloseable()) + << "Resetting a valid handle to itself is likely a programmer error " + "and thus not allowed."; + } +} + +bool AutoHandle::IsCloseable() const { + // Different Windows APIs may use either of these values to represent an + // invalid handle. + return handle_ != nullptr && handle_ != INVALID_HANDLE_VALUE; +} + +Notification::Notification() + : event_(::CreateEvent(nullptr, // Default security attributes. + TRUE, // Do not reset automatically. + FALSE, // Initially unset. + nullptr)) { // Anonymous event. + GTEST_CHECK_(event_.Get() != nullptr); +} + +void Notification::Notify() { + GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE); +} + +void Notification::WaitForNotification() { + GTEST_CHECK_( + ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0); +} + +Mutex::Mutex() + : owner_thread_id_(0), + type_(kDynamic), + critical_section_init_phase_(0), + critical_section_(new CRITICAL_SECTION) { + ::InitializeCriticalSection(critical_section_); +} + +Mutex::~Mutex() { + // Static mutexes are leaked intentionally. It is not thread-safe to try + // to clean them up. + if (type_ == kDynamic) { + ::DeleteCriticalSection(critical_section_); + delete critical_section_; + critical_section_ = nullptr; + } +} + +void Mutex::Lock() { + ThreadSafeLazyInit(); + ::EnterCriticalSection(critical_section_); + owner_thread_id_ = ::GetCurrentThreadId(); +} + +void Mutex::Unlock() { + ThreadSafeLazyInit(); + // We don't protect writing to owner_thread_id_ here, as it's the + // caller's responsibility to ensure that the current thread holds the + // mutex when this is called. + owner_thread_id_ = 0; + ::LeaveCriticalSection(critical_section_); +} + +// Does nothing if the current thread holds the mutex. Otherwise, crashes +// with high probability. +void Mutex::AssertHeld() { + ThreadSafeLazyInit(); + GTEST_CHECK_(owner_thread_id_ == ::GetCurrentThreadId()) + << "The current thread is not holding the mutex @" << this; +} + +namespace { + +#ifdef _MSC_VER +// Use the RAII idiom to flag mem allocs that are intentionally never +// deallocated. The motivation is to silence the false positive mem leaks +// that are reported by the debug version of MS's CRT which can only detect +// if an alloc is missing a matching deallocation. +// Example: +// MemoryIsNotDeallocated memory_is_not_deallocated; +// critical_section_ = new CRITICAL_SECTION; +// +class MemoryIsNotDeallocated +{ + public: + MemoryIsNotDeallocated() : old_crtdbg_flag_(0) { + old_crtdbg_flag_ = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG); + // Set heap allocation block type to _IGNORE_BLOCK so that MS debug CRT + // doesn't report mem leak if there's no matching deallocation. + _CrtSetDbgFlag(old_crtdbg_flag_ & ~_CRTDBG_ALLOC_MEM_DF); + } + + ~MemoryIsNotDeallocated() { + // Restore the original _CRTDBG_ALLOC_MEM_DF flag + _CrtSetDbgFlag(old_crtdbg_flag_); + } + + private: + int old_crtdbg_flag_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(MemoryIsNotDeallocated); +}; +#endif // _MSC_VER + +} // namespace + +// Initializes owner_thread_id_ and critical_section_ in static mutexes. +void Mutex::ThreadSafeLazyInit() { + // Dynamic mutexes are initialized in the constructor. + if (type_ == kStatic) { + switch ( + ::InterlockedCompareExchange(&critical_section_init_phase_, 1L, 0L)) { + case 0: + // If critical_section_init_phase_ was 0 before the exchange, we + // are the first to test it and need to perform the initialization. + owner_thread_id_ = 0; + { + // Use RAII to flag that following mem alloc is never deallocated. +#ifdef _MSC_VER + MemoryIsNotDeallocated memory_is_not_deallocated; +#endif // _MSC_VER + critical_section_ = new CRITICAL_SECTION; + } + ::InitializeCriticalSection(critical_section_); + // Updates the critical_section_init_phase_ to 2 to signal + // initialization complete. + GTEST_CHECK_(::InterlockedCompareExchange( + &critical_section_init_phase_, 2L, 1L) == + 1L); + break; + case 1: + // Somebody else is already initializing the mutex; spin until they + // are done. + while (::InterlockedCompareExchange(&critical_section_init_phase_, + 2L, + 2L) != 2L) { + // Possibly yields the rest of the thread's time slice to other + // threads. + ::Sleep(0); + } + break; + + case 2: + break; // The mutex is already initialized and ready for use. + + default: + GTEST_CHECK_(false) + << "Unexpected value of critical_section_init_phase_ " + << "while initializing a static mutex."; + } + } +} + +namespace { + +class ThreadWithParamSupport : public ThreadWithParamBase { + public: + static HANDLE CreateThread(Runnable* runnable, + Notification* thread_can_start) { + ThreadMainParam* param = new ThreadMainParam(runnable, thread_can_start); + DWORD thread_id; + HANDLE thread_handle = ::CreateThread( + nullptr, // Default security. + 0, // Default stack size. + &ThreadWithParamSupport::ThreadMain, + param, // Parameter to ThreadMainStatic + 0x0, // Default creation flags. + &thread_id); // Need a valid pointer for the call to work under Win98. + GTEST_CHECK_(thread_handle != nullptr) + << "CreateThread failed with error " << ::GetLastError() << "."; + if (thread_handle == nullptr) { + delete param; + } + return thread_handle; + } + + private: + struct ThreadMainParam { + ThreadMainParam(Runnable* runnable, Notification* thread_can_start) + : runnable_(runnable), + thread_can_start_(thread_can_start) { + } + std::unique_ptr runnable_; + // Does not own. + Notification* thread_can_start_; + }; + + static DWORD WINAPI ThreadMain(void* ptr) { + // Transfers ownership. + std::unique_ptr param(static_cast(ptr)); + if (param->thread_can_start_ != nullptr) + param->thread_can_start_->WaitForNotification(); + param->runnable_->Run(); + return 0; + } + + // Prohibit instantiation. + ThreadWithParamSupport(); + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParamSupport); +}; + +} // namespace + +ThreadWithParamBase::ThreadWithParamBase(Runnable *runnable, + Notification* thread_can_start) + : thread_(ThreadWithParamSupport::CreateThread(runnable, + thread_can_start)) { +} + +ThreadWithParamBase::~ThreadWithParamBase() { + Join(); +} + +void ThreadWithParamBase::Join() { + GTEST_CHECK_(::WaitForSingleObject(thread_.Get(), INFINITE) == WAIT_OBJECT_0) + << "Failed to join the thread with error " << ::GetLastError() << "."; +} + +// Maps a thread to a set of ThreadIdToThreadLocals that have values +// instantiated on that thread and notifies them when the thread exits. A +// ThreadLocal instance is expected to persist until all threads it has +// values on have terminated. +class ThreadLocalRegistryImpl { + public: + // Registers thread_local_instance as having value on the current thread. + // Returns a value that can be used to identify the thread from other threads. + static ThreadLocalValueHolderBase* GetValueOnCurrentThread( + const ThreadLocalBase* thread_local_instance) { +#ifdef _MSC_VER + MemoryIsNotDeallocated memory_is_not_deallocated; +#endif // _MSC_VER + DWORD current_thread = ::GetCurrentThreadId(); + MutexLock lock(&mutex_); + ThreadIdToThreadLocals* const thread_to_thread_locals = + GetThreadLocalsMapLocked(); + ThreadIdToThreadLocals::iterator thread_local_pos = + thread_to_thread_locals->find(current_thread); + if (thread_local_pos == thread_to_thread_locals->end()) { + thread_local_pos = thread_to_thread_locals->insert( + std::make_pair(current_thread, ThreadLocalValues())).first; + StartWatcherThreadFor(current_thread); + } + ThreadLocalValues& thread_local_values = thread_local_pos->second; + ThreadLocalValues::iterator value_pos = + thread_local_values.find(thread_local_instance); + if (value_pos == thread_local_values.end()) { + value_pos = + thread_local_values + .insert(std::make_pair( + thread_local_instance, + std::shared_ptr( + thread_local_instance->NewValueForCurrentThread()))) + .first; + } + return value_pos->second.get(); + } + + static void OnThreadLocalDestroyed( + const ThreadLocalBase* thread_local_instance) { + std::vector > value_holders; + // Clean up the ThreadLocalValues data structure while holding the lock, but + // defer the destruction of the ThreadLocalValueHolderBases. + { + MutexLock lock(&mutex_); + ThreadIdToThreadLocals* const thread_to_thread_locals = + GetThreadLocalsMapLocked(); + for (ThreadIdToThreadLocals::iterator it = + thread_to_thread_locals->begin(); + it != thread_to_thread_locals->end(); + ++it) { + ThreadLocalValues& thread_local_values = it->second; + ThreadLocalValues::iterator value_pos = + thread_local_values.find(thread_local_instance); + if (value_pos != thread_local_values.end()) { + value_holders.push_back(value_pos->second); + thread_local_values.erase(value_pos); + // This 'if' can only be successful at most once, so theoretically we + // could break out of the loop here, but we don't bother doing so. + } + } + } + // Outside the lock, let the destructor for 'value_holders' deallocate the + // ThreadLocalValueHolderBases. + } + + static void OnThreadExit(DWORD thread_id) { + GTEST_CHECK_(thread_id != 0) << ::GetLastError(); + std::vector > value_holders; + // Clean up the ThreadIdToThreadLocals data structure while holding the + // lock, but defer the destruction of the ThreadLocalValueHolderBases. + { + MutexLock lock(&mutex_); + ThreadIdToThreadLocals* const thread_to_thread_locals = + GetThreadLocalsMapLocked(); + ThreadIdToThreadLocals::iterator thread_local_pos = + thread_to_thread_locals->find(thread_id); + if (thread_local_pos != thread_to_thread_locals->end()) { + ThreadLocalValues& thread_local_values = thread_local_pos->second; + for (ThreadLocalValues::iterator value_pos = + thread_local_values.begin(); + value_pos != thread_local_values.end(); + ++value_pos) { + value_holders.push_back(value_pos->second); + } + thread_to_thread_locals->erase(thread_local_pos); + } + } + // Outside the lock, let the destructor for 'value_holders' deallocate the + // ThreadLocalValueHolderBases. + } + + private: + // In a particular thread, maps a ThreadLocal object to its value. + typedef std::map > + ThreadLocalValues; + // Stores all ThreadIdToThreadLocals having values in a thread, indexed by + // thread's ID. + typedef std::map ThreadIdToThreadLocals; + + // Holds the thread id and thread handle that we pass from + // StartWatcherThreadFor to WatcherThreadFunc. + typedef std::pair ThreadIdAndHandle; + + static void StartWatcherThreadFor(DWORD thread_id) { + // The returned handle will be kept in thread_map and closed by + // watcher_thread in WatcherThreadFunc. + HANDLE thread = ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION, + FALSE, + thread_id); + GTEST_CHECK_(thread != nullptr); + // We need to pass a valid thread ID pointer into CreateThread for it + // to work correctly under Win98. + DWORD watcher_thread_id; + HANDLE watcher_thread = ::CreateThread( + nullptr, // Default security. + 0, // Default stack size + &ThreadLocalRegistryImpl::WatcherThreadFunc, + reinterpret_cast(new ThreadIdAndHandle(thread_id, thread)), + CREATE_SUSPENDED, &watcher_thread_id); + GTEST_CHECK_(watcher_thread != nullptr); + // Give the watcher thread the same priority as ours to avoid being + // blocked by it. + ::SetThreadPriority(watcher_thread, + ::GetThreadPriority(::GetCurrentThread())); + ::ResumeThread(watcher_thread); + ::CloseHandle(watcher_thread); + } + + // Monitors exit from a given thread and notifies those + // ThreadIdToThreadLocals about thread termination. + static DWORD WINAPI WatcherThreadFunc(LPVOID param) { + const ThreadIdAndHandle* tah = + reinterpret_cast(param); + GTEST_CHECK_( + ::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0); + OnThreadExit(tah->first); + ::CloseHandle(tah->second); + delete tah; + return 0; + } + + // Returns map of thread local instances. + static ThreadIdToThreadLocals* GetThreadLocalsMapLocked() { + mutex_.AssertHeld(); +#ifdef _MSC_VER + MemoryIsNotDeallocated memory_is_not_deallocated; +#endif // _MSC_VER + static ThreadIdToThreadLocals* map = new ThreadIdToThreadLocals(); + return map; + } + + // Protects access to GetThreadLocalsMapLocked() and its return value. + static Mutex mutex_; + // Protects access to GetThreadMapLocked() and its return value. + static Mutex thread_map_mutex_; +}; + +Mutex ThreadLocalRegistryImpl::mutex_(Mutex::kStaticMutex); +Mutex ThreadLocalRegistryImpl::thread_map_mutex_(Mutex::kStaticMutex); + +ThreadLocalValueHolderBase* ThreadLocalRegistry::GetValueOnCurrentThread( + const ThreadLocalBase* thread_local_instance) { + return ThreadLocalRegistryImpl::GetValueOnCurrentThread( + thread_local_instance); +} + +void ThreadLocalRegistry::OnThreadLocalDestroyed( + const ThreadLocalBase* thread_local_instance) { + ThreadLocalRegistryImpl::OnThreadLocalDestroyed(thread_local_instance); +} + +#endif // GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS + +#if GTEST_USES_POSIX_RE + +// Implements RE. Currently only needed for death tests. + +RE::~RE() { + if (is_valid_) { + // regfree'ing an invalid regex might crash because the content + // of the regex is undefined. Since the regex's are essentially + // the same, one cannot be valid (or invalid) without the other + // being so too. + regfree(&partial_regex_); + regfree(&full_regex_); + } + free(const_cast(pattern_)); +} + +// Returns true if and only if regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.full_regex_, str, 1, &match, 0) == 0; +} + +// Returns true if and only if regular expression re matches a substring of +// str (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.partial_regex_, str, 1, &match, 0) == 0; +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = posix::StrDup(regex); + + // Reserves enough bytes to hold the regular expression used for a + // full match. + const size_t full_regex_len = strlen(regex) + 10; + char* const full_pattern = new char[full_regex_len]; + + snprintf(full_pattern, full_regex_len, "^(%s)$", regex); + is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0; + // We want to call regcomp(&partial_regex_, ...) even if the + // previous expression returns false. Otherwise partial_regex_ may + // not be properly initialized can may cause trouble when it's + // freed. + // + // Some implementation of POSIX regex (e.g. on at least some + // versions of Cygwin) doesn't accept the empty string as a valid + // regex. We change it to an equivalent form "()" to be safe. + if (is_valid_) { + const char* const partial_regex = (*regex == '\0') ? "()" : regex; + is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0; + } + EXPECT_TRUE(is_valid_) + << "Regular expression \"" << regex + << "\" is not a valid POSIX Extended regular expression."; + + delete[] full_pattern; +} + +#elif GTEST_USES_SIMPLE_RE + +// Returns true if and only if ch appears anywhere in str (excluding the +// terminating '\0' character). +bool IsInSet(char ch, const char* str) { + return ch != '\0' && strchr(str, ch) != nullptr; +} + +// Returns true if and only if ch belongs to the given classification. +// Unlike similar functions in , these aren't affected by the +// current locale. +bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; } +bool IsAsciiPunct(char ch) { + return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~"); +} +bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); } +bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); } +bool IsAsciiWordChar(char ch) { + return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || + ('0' <= ch && ch <= '9') || ch == '_'; +} + +// Returns true if and only if "\\c" is a supported escape sequence. +bool IsValidEscape(char c) { + return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW")); +} + +// Returns true if and only if the given atom (specified by escaped and +// pattern) matches ch. The result is undefined if the atom is invalid. +bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { + if (escaped) { // "\\p" where p is pattern_char. + switch (pattern_char) { + case 'd': return IsAsciiDigit(ch); + case 'D': return !IsAsciiDigit(ch); + case 'f': return ch == '\f'; + case 'n': return ch == '\n'; + case 'r': return ch == '\r'; + case 's': return IsAsciiWhiteSpace(ch); + case 'S': return !IsAsciiWhiteSpace(ch); + case 't': return ch == '\t'; + case 'v': return ch == '\v'; + case 'w': return IsAsciiWordChar(ch); + case 'W': return !IsAsciiWordChar(ch); + } + return IsAsciiPunct(pattern_char) && pattern_char == ch; + } + + return (pattern_char == '.' && ch != '\n') || pattern_char == ch; +} + +// Helper function used by ValidateRegex() to format error messages. +static std::string FormatRegexSyntaxError(const char* regex, int index) { + return (Message() << "Syntax error at index " << index + << " in simple regular expression \"" << regex << "\": ").GetString(); +} + +// Generates non-fatal failures and returns false if regex is invalid; +// otherwise returns true. +bool ValidateRegex(const char* regex) { + if (regex == nullptr) { + ADD_FAILURE() << "NULL is not a valid simple regular expression."; + return false; + } + + bool is_valid = true; + + // True if and only if ?, *, or + can follow the previous atom. + bool prev_repeatable = false; + for (int i = 0; regex[i]; i++) { + if (regex[i] == '\\') { // An escape sequence + i++; + if (regex[i] == '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "'\\' cannot appear at the end."; + return false; + } + + if (!IsValidEscape(regex[i])) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "invalid escape sequence \"\\" << regex[i] << "\"."; + is_valid = false; + } + prev_repeatable = true; + } else { // Not an escape sequence. + const char ch = regex[i]; + + if (ch == '^' && i > 0) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'^' can only appear at the beginning."; + is_valid = false; + } else if (ch == '$' && regex[i + 1] != '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'$' can only appear at the end."; + is_valid = false; + } else if (IsInSet(ch, "()[]{}|")) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' is unsupported."; + is_valid = false; + } else if (IsRepeat(ch) && !prev_repeatable) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' can only follow a repeatable token."; + is_valid = false; + } + + prev_repeatable = !IsInSet(ch, "^$?*+"); + } + } + + return is_valid; +} + +// Matches a repeated regex atom followed by a valid simple regular +// expression. The regex atom is defined as c if escaped is false, +// or \c otherwise. repeat is the repetition meta character (?, *, +// or +). The behavior is undefined if str contains too many +// characters to be indexable by size_t, in which case the test will +// probably time out anyway. We are fine with this limitation as +// std::string has it too. +bool MatchRepetitionAndRegexAtHead( + bool escaped, char c, char repeat, const char* regex, + const char* str) { + const size_t min_count = (repeat == '+') ? 1 : 0; + const size_t max_count = (repeat == '?') ? 1 : + static_cast(-1) - 1; + // We cannot call numeric_limits::max() as it conflicts with the + // max() macro on Windows. + + for (size_t i = 0; i <= max_count; ++i) { + // We know that the atom matches each of the first i characters in str. + if (i >= min_count && MatchRegexAtHead(regex, str + i)) { + // We have enough matches at the head, and the tail matches too. + // Since we only care about *whether* the pattern matches str + // (as opposed to *how* it matches), there is no need to find a + // greedy match. + return true; + } + if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i])) + return false; + } + return false; +} + +// Returns true if and only if regex matches a prefix of str. regex must +// be a valid simple regular expression and not start with "^", or the +// result is undefined. +bool MatchRegexAtHead(const char* regex, const char* str) { + if (*regex == '\0') // An empty regex matches a prefix of anything. + return true; + + // "$" only matches the end of a string. Note that regex being + // valid guarantees that there's nothing after "$" in it. + if (*regex == '$') + return *str == '\0'; + + // Is the first thing in regex an escape sequence? + const bool escaped = *regex == '\\'; + if (escaped) + ++regex; + if (IsRepeat(regex[1])) { + // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so + // here's an indirect recursion. It terminates as the regex gets + // shorter in each recursion. + return MatchRepetitionAndRegexAtHead( + escaped, regex[0], regex[1], regex + 2, str); + } else { + // regex isn't empty, isn't "$", and doesn't start with a + // repetition. We match the first atom of regex with the first + // character of str and recurse. + return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) && + MatchRegexAtHead(regex + 1, str + 1); + } +} + +// Returns true if and only if regex matches any substring of str. regex must +// be a valid simple regular expression, or the result is undefined. +// +// The algorithm is recursive, but the recursion depth doesn't exceed +// the regex length, so we won't need to worry about running out of +// stack space normally. In rare cases the time complexity can be +// exponential with respect to the regex length + the string length, +// but usually it's must faster (often close to linear). +bool MatchRegexAnywhere(const char* regex, const char* str) { + if (regex == nullptr || str == nullptr) return false; + + if (*regex == '^') + return MatchRegexAtHead(regex + 1, str); + + // A successful match can be anywhere in str. + do { + if (MatchRegexAtHead(regex, str)) + return true; + } while (*str++ != '\0'); + return false; +} + +// Implements the RE class. + +RE::~RE() { + free(const_cast(pattern_)); + free(const_cast(full_pattern_)); +} + +// Returns true if and only if regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str); +} + +// Returns true if and only if regular expression re matches a substring of +// str (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str); +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = full_pattern_ = nullptr; + if (regex != nullptr) { + pattern_ = posix::StrDup(regex); + } + + is_valid_ = ValidateRegex(regex); + if (!is_valid_) { + // No need to calculate the full pattern when the regex is invalid. + return; + } + + const size_t len = strlen(regex); + // Reserves enough bytes to hold the regular expression used for a + // full match: we need space to prepend a '^', append a '$', and + // terminate the string with '\0'. + char* buffer = static_cast(malloc(len + 3)); + full_pattern_ = buffer; + + if (*regex != '^') + *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'. + + // We don't use snprintf or strncpy, as they trigger a warning when + // compiled with VC++ 8.0. + memcpy(buffer, regex, len); + buffer += len; + + if (len == 0 || regex[len - 1] != '$') + *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'. + + *buffer = '\0'; +} + +#endif // GTEST_USES_POSIX_RE + +const char kUnknownFile[] = "unknown file"; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { + const std::string file_name(file == nullptr ? kUnknownFile : file); + + if (line < 0) { + return file_name + ":"; + } +#ifdef _MSC_VER + return file_name + "(" + StreamableToString(line) + "):"; +#else + return file_name + ":" + StreamableToString(line) + ":"; +#endif // _MSC_VER +} + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +// Note that FormatCompilerIndependentFileLocation() does NOT append colon +// to the file location it produces, unlike FormatFileLocation(). +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( + const char* file, int line) { + const std::string file_name(file == nullptr ? kUnknownFile : file); + + if (line < 0) + return file_name; + else + return file_name + ":" + StreamableToString(line); +} + +GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line) + : severity_(severity) { + const char* const marker = + severity == GTEST_INFO ? "[ INFO ]" : + severity == GTEST_WARNING ? "[WARNING]" : + severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]"; + GetStream() << ::std::endl << marker << " " + << FormatFileLocation(file, line).c_str() << ": "; +} + +// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. +GTestLog::~GTestLog() { + GetStream() << ::std::endl; + if (severity_ == GTEST_FATAL) { + fflush(stderr); + posix::Abort(); + } +} + +// Disable Microsoft deprecation warnings for POSIX functions called from +// this class (creat, dup, dup2, and close) +GTEST_DISABLE_MSC_DEPRECATED_PUSH_() + +#if GTEST_HAS_STREAM_REDIRECTION + +// Object that captures an output stream (stdout/stderr). +class CapturedStream { + public: + // The ctor redirects the stream to a temporary file. + explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { +# if GTEST_OS_WINDOWS + char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT + char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT + + ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path); + const UINT success = ::GetTempFileNameA(temp_dir_path, + "gtest_redir", + 0, // Generate unique file name. + temp_file_path); + GTEST_CHECK_(success != 0) + << "Unable to create a temporary file in " << temp_dir_path; + const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE); + GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file " + << temp_file_path; + filename_ = temp_file_path; +# else + // There's no guarantee that a test has write access to the current + // directory, so we create the temporary file in the /tmp directory + // instead. We use /tmp on most systems, and /sdcard on Android. + // That's because Android doesn't have /tmp. +# if GTEST_OS_LINUX_ANDROID + // Note: Android applications are expected to call the framework's + // Context.getExternalStorageDirectory() method through JNI to get + // the location of the world-writable SD Card directory. However, + // this requires a Context handle, which cannot be retrieved + // globally from native code. Doing so also precludes running the + // code as part of a regular standalone executable, which doesn't + // run in a Dalvik process (e.g. when running it through 'adb shell'). + // + // The location /data/local/tmp is directly accessible from native code. + // '/sdcard' and other variants cannot be relied on, as they are not + // guaranteed to be mounted, or may have a delay in mounting. + char name_template[] = "/data/local/tmp/gtest_captured_stream.XXXXXX"; +# else + char name_template[] = "/tmp/captured_stream.XXXXXX"; +# endif // GTEST_OS_LINUX_ANDROID + const int captured_fd = mkstemp(name_template); + if (captured_fd == -1) { + GTEST_LOG_(WARNING) + << "Failed to create tmp file " << name_template + << " for test; does the test have access to the /tmp directory?"; + } + filename_ = name_template; +# endif // GTEST_OS_WINDOWS + fflush(nullptr); + dup2(captured_fd, fd_); + close(captured_fd); + } + + ~CapturedStream() { + remove(filename_.c_str()); + } + + std::string GetCapturedString() { + if (uncaptured_fd_ != -1) { + // Restores the original stream. + fflush(nullptr); + dup2(uncaptured_fd_, fd_); + close(uncaptured_fd_); + uncaptured_fd_ = -1; + } + + FILE* const file = posix::FOpen(filename_.c_str(), "r"); + if (file == nullptr) { + GTEST_LOG_(FATAL) << "Failed to open tmp file " << filename_ + << " for capturing stream."; + } + const std::string content = ReadEntireFile(file); + posix::FClose(file); + return content; + } + + private: + const int fd_; // A stream to capture. + int uncaptured_fd_; + // Name of the temporary file holding the stderr output. + ::std::string filename_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream); +}; + +GTEST_DISABLE_MSC_DEPRECATED_POP_() + +static CapturedStream* g_captured_stderr = nullptr; +static CapturedStream* g_captured_stdout = nullptr; + +// Starts capturing an output stream (stdout/stderr). +static void CaptureStream(int fd, const char* stream_name, + CapturedStream** stream) { + if (*stream != nullptr) { + GTEST_LOG_(FATAL) << "Only one " << stream_name + << " capturer can exist at a time."; + } + *stream = new CapturedStream(fd); +} + +// Stops capturing the output stream and returns the captured string. +static std::string GetCapturedStream(CapturedStream** captured_stream) { + const std::string content = (*captured_stream)->GetCapturedString(); + + delete *captured_stream; + *captured_stream = nullptr; + + return content; +} + +// Starts capturing stdout. +void CaptureStdout() { + CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout); +} + +// Starts capturing stderr. +void CaptureStderr() { + CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr); +} + +// Stops capturing stdout and returns the captured string. +std::string GetCapturedStdout() { + return GetCapturedStream(&g_captured_stdout); +} + +// Stops capturing stderr and returns the captured string. +std::string GetCapturedStderr() { + return GetCapturedStream(&g_captured_stderr); +} + +#endif // GTEST_HAS_STREAM_REDIRECTION + + + + + +size_t GetFileSize(FILE* file) { + fseek(file, 0, SEEK_END); + return static_cast(ftell(file)); +} + +std::string ReadEntireFile(FILE* file) { + const size_t file_size = GetFileSize(file); + char* const buffer = new char[file_size]; + + size_t bytes_last_read = 0; // # of bytes read in the last fread() + size_t bytes_read = 0; // # of bytes read so far + + fseek(file, 0, SEEK_SET); + + // Keeps reading the file until we cannot read further or the + // pre-determined file size is reached. + do { + bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file); + bytes_read += bytes_last_read; + } while (bytes_last_read > 0 && bytes_read < file_size); + + const std::string content(buffer, bytes_read); + delete[] buffer; + + return content; +} + +#if GTEST_HAS_DEATH_TEST +static const std::vector* g_injected_test_argvs = + nullptr; // Owned. + +std::vector GetInjectableArgvs() { + if (g_injected_test_argvs != nullptr) { + return *g_injected_test_argvs; + } + return GetArgvs(); +} + +void SetInjectableArgvs(const std::vector* new_argvs) { + if (g_injected_test_argvs != new_argvs) delete g_injected_test_argvs; + g_injected_test_argvs = new_argvs; +} + +void SetInjectableArgvs(const std::vector& new_argvs) { + SetInjectableArgvs( + new std::vector(new_argvs.begin(), new_argvs.end())); +} + +void ClearInjectableArgvs() { + delete g_injected_test_argvs; + g_injected_test_argvs = nullptr; +} +#endif // GTEST_HAS_DEATH_TEST + +#if GTEST_OS_WINDOWS_MOBILE +namespace posix { +void Abort() { + DebugBreak(); + TerminateProcess(GetCurrentProcess(), 1); +} +} // namespace posix +#endif // GTEST_OS_WINDOWS_MOBILE + +// Returns the name of the environment variable corresponding to the +// given flag. For example, FlagToEnvVar("foo") will return +// "GTEST_FOO" in the open-source version. +static std::string FlagToEnvVar(const char* flag) { + const std::string full_flag = + (Message() << GTEST_FLAG_PREFIX_ << flag).GetString(); + + Message env_var; + for (size_t i = 0; i != full_flag.length(); i++) { + env_var << ToUpper(full_flag.c_str()[i]); + } + + return env_var.GetString(); +} + +// Parses 'str' for a 32-bit signed integer. If successful, writes +// the result to *value and returns true; otherwise leaves *value +// unchanged and returns false. +bool ParseInt32(const Message& src_text, const char* str, int32_t* value) { + // Parses the environment variable as a decimal integer. + char* end = nullptr; + const long long_value = strtol(str, &end, 10); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value \"" << str << "\".\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + // Is the parsed value in the range of an int32_t? + const auto result = static_cast(long_value); + if (long_value == LONG_MAX || long_value == LONG_MIN || + // The parsed value overflows as a long. (strtol() returns + // LONG_MAX or LONG_MIN when the input overflows.) + result != long_value + // The parsed value overflows as an int32_t. + ) { + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value " << str << ", which overflows.\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + *value = result; + return true; +} + +// Reads and returns the Boolean environment variable corresponding to +// the given flag; if it's not set, returns default_value. +// +// The value is considered true if and only if it's not "0". +bool BoolFromGTestEnv(const char* flag, bool default_value) { +#if defined(GTEST_GET_BOOL_FROM_ENV_) + return GTEST_GET_BOOL_FROM_ENV_(flag, default_value); +#else + const std::string env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + return string_value == nullptr ? default_value + : strcmp(string_value, "0") != 0; +#endif // defined(GTEST_GET_BOOL_FROM_ENV_) +} + +// Reads and returns a 32-bit integer stored in the environment +// variable corresponding to the given flag; if it isn't set or +// doesn't represent a valid 32-bit integer, returns default_value. +int32_t Int32FromGTestEnv(const char* flag, int32_t default_value) { +#if defined(GTEST_GET_INT32_FROM_ENV_) + return GTEST_GET_INT32_FROM_ENV_(flag, default_value); +#else + const std::string env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + if (string_value == nullptr) { + // The environment variable is not set. + return default_value; + } + + int32_t result = default_value; + if (!ParseInt32(Message() << "Environment variable " << env_var, + string_value, &result)) { + printf("The default value %s is used.\n", + (Message() << default_value).GetString().c_str()); + fflush(stdout); + return default_value; + } + + return result; +#endif // defined(GTEST_GET_INT32_FROM_ENV_) +} + +// As a special case for the 'output' flag, if GTEST_OUTPUT is not +// set, we look for XML_OUTPUT_FILE, which is set by the Bazel build +// system. The value of XML_OUTPUT_FILE is a filename without the +// "xml:" prefix of GTEST_OUTPUT. +// Note that this is meant to be called at the call site so it does +// not check that the flag is 'output' +// In essence this checks an env variable called XML_OUTPUT_FILE +// and if it is set we prepend "xml:" to its value, if it not set we return "" +std::string OutputFlagAlsoCheckEnvVar(){ + std::string default_value_for_output_flag = ""; + const char* xml_output_file_env = posix::GetEnv("XML_OUTPUT_FILE"); + if (nullptr != xml_output_file_env) { + default_value_for_output_flag = std::string("xml:") + xml_output_file_env; + } + return default_value_for_output_flag; +} + +// Reads and returns the string environment variable corresponding to +// the given flag; if it's not set, returns default_value. +const char* StringFromGTestEnv(const char* flag, const char* default_value) { +#if defined(GTEST_GET_STRING_FROM_ENV_) + return GTEST_GET_STRING_FROM_ENV_(flag, default_value); +#else + const std::string env_var = FlagToEnvVar(flag); + const char* const value = posix::GetEnv(env_var.c_str()); + return value == nullptr ? default_value : value; +#endif // defined(GTEST_GET_STRING_FROM_ENV_) +} + +} // namespace internal +} // namespace testing diff --git a/source/3rdparty/gtest/src/gtest-printers.cc b/source/3rdparty/gtest/src/gtest-printers.cc new file mode 100644 index 0000000..3337be3 --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-printers.cc @@ -0,0 +1,442 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Google Test - The Google C++ Testing and Mocking Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// It uses the << operator when possible, and prints the bytes in the +// object otherwise. A user can override its behavior for a class +// type Foo by defining either operator<<(::std::ostream&, const Foo&) +// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that +// defines Foo. + +#include "gtest/gtest-printers.h" +#include +#include +#include +#include // NOLINT +#include +#include "gtest/internal/gtest-port.h" +#include "src/gtest-internal-inl.h" + +namespace testing { + +namespace { + +using ::std::ostream; + +// Prints a segment of bytes in the given object. +GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start, + size_t count, ostream* os) { + char text[5] = ""; + for (size_t i = 0; i != count; i++) { + const size_t j = start + i; + if (i != 0) { + // Organizes the bytes into groups of 2 for easy parsing by + // human. + if ((j % 2) == 0) + *os << ' '; + else + *os << '-'; + } + GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]); + *os << text; + } +} + +// Prints the bytes in the given value to the given ostream. +void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count, + ostream* os) { + // Tells the user how big the object is. + *os << count << "-byte object <"; + + const size_t kThreshold = 132; + const size_t kChunkSize = 64; + // If the object size is bigger than kThreshold, we'll have to omit + // some details by printing only the first and the last kChunkSize + // bytes. + if (count < kThreshold) { + PrintByteSegmentInObjectTo(obj_bytes, 0, count, os); + } else { + PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os); + *os << " ... "; + // Rounds up to 2-byte boundary. + const size_t resume_pos = (count - kChunkSize + 1)/2*2; + PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os); + } + *os << ">"; +} + +} // namespace + +namespace internal2 { + +// Delegates to PrintBytesInObjectToImpl() to print the bytes in the +// given object. The delegation simplifies the implementation, which +// uses the << operator and thus is easier done outside of the +// ::testing::internal namespace, which contains a << operator that +// sometimes conflicts with the one in STL. +void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count, + ostream* os) { + PrintBytesInObjectToImpl(obj_bytes, count, os); +} + +} // namespace internal2 + +namespace internal { + +// Depending on the value of a char (or wchar_t), we print it in one +// of three formats: +// - as is if it's a printable ASCII (e.g. 'a', '2', ' '), +// - as a hexadecimal escape sequence (e.g. '\x7F'), or +// - as a special escape sequence (e.g. '\r', '\n'). +enum CharFormat { + kAsIs, + kHexEscape, + kSpecialEscape +}; + +// Returns true if c is a printable ASCII character. We test the +// value of c directly instead of calling isprint(), which is buggy on +// Windows Mobile. +inline bool IsPrintableAscii(wchar_t c) { + return 0x20 <= c && c <= 0x7E; +} + +// Prints a wide or narrow char c as a character literal without the +// quotes, escaping it when necessary; returns how c was formatted. +// The template argument UnsignedChar is the unsigned version of Char, +// which is the type of c. +template +static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) { + wchar_t w_c = static_cast(c); + switch (w_c) { + case L'\0': + *os << "\\0"; + break; + case L'\'': + *os << "\\'"; + break; + case L'\\': + *os << "\\\\"; + break; + case L'\a': + *os << "\\a"; + break; + case L'\b': + *os << "\\b"; + break; + case L'\f': + *os << "\\f"; + break; + case L'\n': + *os << "\\n"; + break; + case L'\r': + *os << "\\r"; + break; + case L'\t': + *os << "\\t"; + break; + case L'\v': + *os << "\\v"; + break; + default: + if (IsPrintableAscii(w_c)) { + *os << static_cast(c); + return kAsIs; + } else { + ostream::fmtflags flags = os->flags(); + *os << "\\x" << std::hex << std::uppercase + << static_cast(static_cast(c)); + os->flags(flags); + return kHexEscape; + } + } + return kSpecialEscape; +} + +// Prints a wchar_t c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) { + switch (c) { + case L'\'': + *os << "'"; + return kAsIs; + case L'"': + *os << "\\\""; + return kSpecialEscape; + default: + return PrintAsCharLiteralTo(c, os); + } +} + +// Prints a char c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsStringLiteralTo(char c, ostream* os) { + return PrintAsStringLiteralTo( + static_cast(static_cast(c)), os); +} + +// Prints a wide or narrow character c and its code. '\0' is printed +// as "'\\0'", other unprintable characters are also properly escaped +// using the standard C++ escape sequence. The template argument +// UnsignedChar is the unsigned version of Char, which is the type of c. +template +void PrintCharAndCodeTo(Char c, ostream* os) { + // First, print c as a literal in the most readable form we can find. + *os << ((sizeof(c) > 1) ? "L'" : "'"); + const CharFormat format = PrintAsCharLiteralTo(c, os); + *os << "'"; + + // To aid user debugging, we also print c's code in decimal, unless + // it's 0 (in which case c was printed as '\\0', making the code + // obvious). + if (c == 0) + return; + *os << " (" << static_cast(c); + + // For more convenience, we print c's code again in hexadecimal, + // unless c was already printed in the form '\x##' or the code is in + // [1, 9]. + if (format == kHexEscape || (1 <= c && c <= 9)) { + // Do nothing. + } else { + *os << ", 0x" << String::FormatHexInt(static_cast(c)); + } + *os << ")"; +} + +void PrintTo(unsigned char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} +void PrintTo(signed char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} + +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its code. L'\0' is printed as "L'\\0'". +void PrintTo(wchar_t wc, ostream* os) { + PrintCharAndCodeTo(wc, os); +} + +// Prints the given array of characters to the ostream. CharType must be either +// char or wchar_t. +// The array starts at begin, the length is len, it may include '\0' characters +// and may not be NUL-terminated. +template +GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +static CharFormat PrintCharsAsStringTo( + const CharType* begin, size_t len, ostream* os) { + const char* const kQuoteBegin = sizeof(CharType) == 1 ? "\"" : "L\""; + *os << kQuoteBegin; + bool is_previous_hex = false; + CharFormat print_format = kAsIs; + for (size_t index = 0; index < len; ++index) { + const CharType cur = begin[index]; + if (is_previous_hex && IsXDigit(cur)) { + // Previous character is of '\x..' form and this character can be + // interpreted as another hexadecimal digit in its number. Break string to + // disambiguate. + *os << "\" " << kQuoteBegin; + } + is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape; + // Remember if any characters required hex escaping. + if (is_previous_hex) { + print_format = kHexEscape; + } + } + *os << "\""; + return print_format; +} + +// Prints a (const) char/wchar_t array of 'len' elements, starting at address +// 'begin'. CharType must be either char or wchar_t. +template +GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +static void UniversalPrintCharArray( + const CharType* begin, size_t len, ostream* os) { + // The code + // const char kFoo[] = "foo"; + // generates an array of 4, not 3, elements, with the last one being '\0'. + // + // Therefore when printing a char array, we don't print the last element if + // it's '\0', such that the output matches the string literal as it's + // written in the source code. + if (len > 0 && begin[len - 1] == '\0') { + PrintCharsAsStringTo(begin, len - 1, os); + return; + } + + // If, however, the last element in the array is not '\0', e.g. + // const char kFoo[] = { 'f', 'o', 'o' }; + // we must print the entire array. We also print a message to indicate + // that the array is not NUL-terminated. + PrintCharsAsStringTo(begin, len, os); + *os << " (no terminating NUL)"; +} + +// Prints a (const) char array of 'len' elements, starting at address 'begin'. +void UniversalPrintArray(const char* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); +} + +// Prints a (const) wchar_t array of 'len' elements, starting at address +// 'begin'. +void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); +} + +// Prints the given C string to the ostream. +void PrintTo(const char* s, ostream* os) { + if (s == nullptr) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, strlen(s), os); + } +} + +// MSVC compiler can be configured to define whar_t as a typedef +// of unsigned short. Defining an overload for const wchar_t* in that case +// would cause pointers to unsigned shorts be printed as wide strings, +// possibly accessing more memory than intended and causing invalid +// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when +// wchar_t is implemented as a native type. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Prints the given wide C string to the ostream. +void PrintTo(const wchar_t* s, ostream* os) { + if (s == nullptr) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, wcslen(s), os); + } +} +#endif // wchar_t is native + +namespace { + +bool ContainsUnprintableControlCodes(const char* str, size_t length) { + const unsigned char *s = reinterpret_cast(str); + + for (size_t i = 0; i < length; i++) { + unsigned char ch = *s++; + if (std::iscntrl(ch)) { + switch (ch) { + case '\t': + case '\n': + case '\r': + break; + default: + return true; + } + } + } + return false; +} + +bool IsUTF8TrailByte(unsigned char t) { return 0x80 <= t && t<= 0xbf; } + +bool IsValidUTF8(const char* str, size_t length) { + const unsigned char *s = reinterpret_cast(str); + + for (size_t i = 0; i < length;) { + unsigned char lead = s[i++]; + + if (lead <= 0x7f) { + continue; // single-byte character (ASCII) 0..7F + } + if (lead < 0xc2) { + return false; // trail byte or non-shortest form + } else if (lead <= 0xdf && (i + 1) <= length && IsUTF8TrailByte(s[i])) { + ++i; // 2-byte character + } else if (0xe0 <= lead && lead <= 0xef && (i + 2) <= length && + IsUTF8TrailByte(s[i]) && + IsUTF8TrailByte(s[i + 1]) && + // check for non-shortest form and surrogate + (lead != 0xe0 || s[i] >= 0xa0) && + (lead != 0xed || s[i] < 0xa0)) { + i += 2; // 3-byte character + } else if (0xf0 <= lead && lead <= 0xf4 && (i + 3) <= length && + IsUTF8TrailByte(s[i]) && + IsUTF8TrailByte(s[i + 1]) && + IsUTF8TrailByte(s[i + 2]) && + // check for non-shortest form + (lead != 0xf0 || s[i] >= 0x90) && + (lead != 0xf4 || s[i] < 0x90)) { + i += 3; // 4-byte character + } else { + return false; + } + } + return true; +} + +void ConditionalPrintAsText(const char* str, size_t length, ostream* os) { + if (!ContainsUnprintableControlCodes(str, length) && + IsValidUTF8(str, length)) { + *os << "\n As Text: \"" << str << "\""; + } +} + +} // anonymous namespace + +void PrintStringTo(const ::std::string& s, ostream* os) { + if (PrintCharsAsStringTo(s.data(), s.size(), os) == kHexEscape) { + if (GTEST_FLAG(print_utf8)) { + ConditionalPrintAsText(s.data(), s.size(), os); + } + } +} + +#if GTEST_HAS_STD_WSTRING +void PrintWideStringTo(const ::std::wstring& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_STD_WSTRING + +} // namespace internal + +} // namespace testing diff --git a/source/3rdparty/gtest/src/gtest-test-part.cc b/source/3rdparty/gtest/src/gtest-test-part.cc new file mode 100644 index 0000000..a938683 --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-test-part.cc @@ -0,0 +1,108 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// The Google C++ Testing and Mocking Framework (Google Test) + +#include "gtest/gtest-test-part.h" + +#include "gtest/internal/gtest-port.h" +#include "src/gtest-internal-inl.h" + +namespace testing { + +using internal::GetUnitTestImpl; + +// Gets the summary of the failure message by omitting the stack trace +// in it. +std::string TestPartResult::ExtractSummary(const char* message) { + const char* const stack_trace = strstr(message, internal::kStackTraceMarker); + return stack_trace == nullptr ? message : std::string(message, stack_trace); +} + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result) { + return os << internal::FormatFileLocation(result.file_name(), + result.line_number()) + << " " + << (result.type() == TestPartResult::kSuccess + ? "Success" + : result.type() == TestPartResult::kSkip + ? "Skipped" + : result.type() == TestPartResult::kFatalFailure + ? "Fatal failure" + : "Non-fatal failure") + << ":\n" + << result.message() << std::endl; +} + +// Appends a TestPartResult to the array. +void TestPartResultArray::Append(const TestPartResult& result) { + array_.push_back(result); +} + +// Returns the TestPartResult at the given index (0-based). +const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const { + if (index < 0 || index >= size()) { + printf("\nInvalid index (%d) into TestPartResultArray.\n", index); + internal::posix::Abort(); + } + + return array_[static_cast(index)]; +} + +// Returns the number of TestPartResult objects in the array. +int TestPartResultArray::size() const { + return static_cast(array_.size()); +} + +namespace internal { + +HasNewFatalFailureHelper::HasNewFatalFailureHelper() + : has_new_fatal_failure_(false), + original_reporter_(GetUnitTestImpl()-> + GetTestPartResultReporterForCurrentThread()) { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this); +} + +HasNewFatalFailureHelper::~HasNewFatalFailureHelper() { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread( + original_reporter_); +} + +void HasNewFatalFailureHelper::ReportTestPartResult( + const TestPartResult& result) { + if (result.fatally_failed()) + has_new_fatal_failure_ = true; + original_reporter_->ReportTestPartResult(result); +} + +} // namespace internal + +} // namespace testing diff --git a/source/3rdparty/gtest/src/gtest-typed-test.cc b/source/3rdparty/gtest/src/gtest-typed-test.cc new file mode 100644 index 0000000..1b1cfb0 --- /dev/null +++ b/source/3rdparty/gtest/src/gtest-typed-test.cc @@ -0,0 +1,121 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include "gtest/gtest-typed-test.h" + +#include "gtest/gtest.h" + +namespace testing { +namespace internal { + +#if GTEST_HAS_TYPED_TEST_P + +// Skips to the first non-space char in str. Returns an empty string if str +// contains only whitespace characters. +static const char* SkipSpaces(const char* str) { + while (IsSpace(*str)) + str++; + return str; +} + +static std::vector SplitIntoTestNames(const char* src) { + std::vector name_vec; + src = SkipSpaces(src); + for (; src != nullptr; src = SkipComma(src)) { + name_vec.push_back(StripTrailingSpaces(GetPrefixUntilComma(src))); + } + return name_vec; +} + +// Verifies that registered_tests match the test names in +// registered_tests_; returns registered_tests if successful, or +// aborts the program otherwise. +const char* TypedTestSuitePState::VerifyRegisteredTestNames( + const char* test_suite_name, const char* file, int line, + const char* registered_tests) { + RegisterTypeParameterizedTestSuite(test_suite_name, CodeLocation(file, line)); + + typedef RegisteredTestsMap::const_iterator RegisteredTestIter; + registered_ = true; + + std::vector name_vec = SplitIntoTestNames(registered_tests); + + Message errors; + + std::set tests; + for (std::vector::const_iterator name_it = name_vec.begin(); + name_it != name_vec.end(); ++name_it) { + const std::string& name = *name_it; + if (tests.count(name) != 0) { + errors << "Test " << name << " is listed more than once.\n"; + continue; + } + + bool found = false; + for (RegisteredTestIter it = registered_tests_.begin(); + it != registered_tests_.end(); + ++it) { + if (name == it->first) { + found = true; + break; + } + } + + if (found) { + tests.insert(name); + } else { + errors << "No test named " << name + << " can be found in this test suite.\n"; + } + } + + for (RegisteredTestIter it = registered_tests_.begin(); + it != registered_tests_.end(); + ++it) { + if (tests.count(it->first) == 0) { + errors << "You forgot to list test " << it->first << ".\n"; + } + } + + const std::string& errors_str = errors.GetString(); + if (errors_str != "") { + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors_str.c_str()); + fflush(stderr); + posix::Abort(); + } + + return registered_tests; +} + +#endif // GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing diff --git a/source/3rdparty/gtest/src/gtest.cc b/source/3rdparty/gtest/src/gtest.cc new file mode 100644 index 0000000..10dbee4 --- /dev/null +++ b/source/3rdparty/gtest/src/gtest.cc @@ -0,0 +1,6359 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +// The Google C++ Testing and Mocking Framework (Google Test) + +#include "gtest/gtest.h" +#include "gtest/internal/custom/gtest.h" +#include "gtest/gtest-spi.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include // NOLINT +#include +#include + +#if GTEST_OS_LINUX + +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +# include // NOLINT +# include // NOLINT +# include // NOLINT +// Declares vsnprintf(). This header is not available on Windows. +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include + +#elif GTEST_OS_ZOS +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +// On z/OS we additionally need strings.h for strcasecmp. +# include // NOLINT + +#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE. + +# include // NOLINT +# undef min + +#elif GTEST_OS_WINDOWS // We are on Windows proper. + +# include // NOLINT +# undef min + +#ifdef _MSC_VER +# include // NOLINT +# include // NOLINT +#endif + +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT + +# if GTEST_OS_WINDOWS_MINGW +// MinGW has gettimeofday() but not _ftime64(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT +# endif // GTEST_OS_WINDOWS_MINGW + +#else + +// Assume other platforms have gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT +# include // NOLINT + +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT +#endif + +#include "src/gtest-internal-inl.h" + +#if GTEST_OS_WINDOWS +# define vsnprintf _vsnprintf +#endif // GTEST_OS_WINDOWS + +#if GTEST_OS_MAC +#ifndef GTEST_OS_IOS +#include +#endif +#endif + +#if GTEST_HAS_ABSL +#include "absl/debugging/failure_signal_handler.h" +#include "absl/debugging/stacktrace.h" +#include "absl/debugging/symbolize.h" +#include "absl/strings/str_cat.h" +#endif // GTEST_HAS_ABSL + +namespace testing { + +using internal::CountIf; +using internal::ForEach; +using internal::GetElementOr; +using internal::Shuffle; + +// Constants. + +// A test whose test suite name or test name matches this filter is +// disabled and not run. +static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*"; + +// A test suite whose name matches this filter is considered a death +// test suite and will be run before test suites whose name doesn't +// match this filter. +static const char kDeathTestSuiteFilter[] = "*DeathTest:*DeathTest/*"; + +// A test filter that matches everything. +static const char kUniversalFilter[] = "*"; + +// The default output format. +static const char kDefaultOutputFormat[] = "xml"; +// The default output file. +static const char kDefaultOutputFile[] = "test_detail"; + +// The environment variable name for the test shard index. +static const char kTestShardIndex[] = "GTEST_SHARD_INDEX"; +// The environment variable name for the total number of test shards. +static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS"; +// The environment variable name for the test shard status file. +static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE"; + +namespace internal { + +// The text used in failure messages to indicate the start of the +// stack trace. +const char kStackTraceMarker[] = "\nStack trace:\n"; + +// g_help_flag is true if and only if the --help flag or an equivalent form +// is specified on the command line. +bool g_help_flag = false; + +// Utilty function to Open File for Writing +static FILE* OpenFileForWriting(const std::string& output_file) { + FILE* fileout = nullptr; + FilePath output_file_path(output_file); + FilePath output_dir(output_file_path.RemoveFileName()); + + if (output_dir.CreateDirectoriesRecursively()) { + fileout = posix::FOpen(output_file.c_str(), "w"); + } + if (fileout == nullptr) { + GTEST_LOG_(FATAL) << "Unable to open file \"" << output_file << "\""; + } + return fileout; +} + +} // namespace internal + +// Bazel passes in the argument to '--test_filter' via the TESTBRIDGE_TEST_ONLY +// environment variable. +static const char* GetDefaultFilter() { + const char* const testbridge_test_only = + internal::posix::GetEnv("TESTBRIDGE_TEST_ONLY"); + if (testbridge_test_only != nullptr) { + return testbridge_test_only; + } + return kUniversalFilter; +} + +GTEST_DEFINE_bool_( + also_run_disabled_tests, + internal::BoolFromGTestEnv("also_run_disabled_tests", false), + "Run disabled tests too, in addition to the tests normally being run."); + +GTEST_DEFINE_bool_( + break_on_failure, internal::BoolFromGTestEnv("break_on_failure", false), + "True if and only if a failed assertion should be a debugger " + "break-point."); + +GTEST_DEFINE_bool_(catch_exceptions, + internal::BoolFromGTestEnv("catch_exceptions", true), + "True if and only if " GTEST_NAME_ + " should catch exceptions and treat them as test failures."); + +GTEST_DEFINE_string_( + color, + internal::StringFromGTestEnv("color", "auto"), + "Whether to use colors in the output. Valid values: yes, no, " + "and auto. 'auto' means to use colors if the output is " + "being sent to a terminal and the TERM environment variable " + "is set to a terminal type that supports colors."); + +GTEST_DEFINE_string_( + filter, + internal::StringFromGTestEnv("filter", GetDefaultFilter()), + "A colon-separated list of glob (not regex) patterns " + "for filtering the tests to run, optionally followed by a " + "'-' and a : separated list of negative patterns (tests to " + "exclude). A test is run if it matches one of the positive " + "patterns and does not match any of the negative patterns."); + +GTEST_DEFINE_bool_( + install_failure_signal_handler, + internal::BoolFromGTestEnv("install_failure_signal_handler", false), + "If true and supported on the current platform, " GTEST_NAME_ " should " + "install a signal handler that dumps debugging information when fatal " + "signals are raised."); + +GTEST_DEFINE_bool_(list_tests, false, + "List all tests without running them."); + +// The net priority order after flag processing is thus: +// --gtest_output command line flag +// GTEST_OUTPUT environment variable +// XML_OUTPUT_FILE environment variable +// '' +GTEST_DEFINE_string_( + output, + internal::StringFromGTestEnv("output", + internal::OutputFlagAlsoCheckEnvVar().c_str()), + "A format (defaults to \"xml\" but can be specified to be \"json\"), " + "optionally followed by a colon and an output file name or directory. " + "A directory is indicated by a trailing pathname separator. " + "Examples: \"xml:filename.xml\", \"xml::directoryname/\". " + "If a directory is specified, output files will be created " + "within that directory, with file-names based on the test " + "executable's name and, if necessary, made unique by adding " + "digits."); + +GTEST_DEFINE_bool_(print_time, internal::BoolFromGTestEnv("print_time", true), + "True if and only if " GTEST_NAME_ + " should display elapsed time in text output."); + +GTEST_DEFINE_bool_(print_utf8, internal::BoolFromGTestEnv("print_utf8", true), + "True if and only if " GTEST_NAME_ + " prints UTF8 characters as text."); + +GTEST_DEFINE_int32_( + random_seed, + internal::Int32FromGTestEnv("random_seed", 0), + "Random number seed to use when shuffling test orders. Must be in range " + "[1, 99999], or 0 to use a seed based on the current time."); + +GTEST_DEFINE_int32_( + repeat, + internal::Int32FromGTestEnv("repeat", 1), + "How many times to repeat each test. Specify a negative number " + "for repeating forever. Useful for shaking out flaky tests."); + +GTEST_DEFINE_bool_(show_internal_stack_frames, false, + "True if and only if " GTEST_NAME_ + " should include internal stack frames when " + "printing test failure stack traces."); + +GTEST_DEFINE_bool_(shuffle, internal::BoolFromGTestEnv("shuffle", false), + "True if and only if " GTEST_NAME_ + " should randomize tests' order on every run."); + +GTEST_DEFINE_int32_( + stack_trace_depth, + internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth), + "The maximum number of stack frames to print when an " + "assertion fails. The valid range is 0 through 100, inclusive."); + +GTEST_DEFINE_string_( + stream_result_to, + internal::StringFromGTestEnv("stream_result_to", ""), + "This flag specifies the host name and the port number on which to stream " + "test results. Example: \"localhost:555\". The flag is effective only on " + "Linux."); + +GTEST_DEFINE_bool_( + throw_on_failure, + internal::BoolFromGTestEnv("throw_on_failure", false), + "When this flag is specified, a failed assertion will throw an exception " + "if exceptions are enabled or exit the program with a non-zero code " + "otherwise. For use with an external test framework."); + +#if GTEST_USE_OWN_FLAGFILE_FLAG_ +GTEST_DEFINE_string_( + flagfile, + internal::StringFromGTestEnv("flagfile", ""), + "This flag specifies the flagfile to read command-line flags from."); +#endif // GTEST_USE_OWN_FLAGFILE_FLAG_ + +namespace internal { + +// Generates a random number from [0, range), using a Linear +// Congruential Generator (LCG). Crashes if 'range' is 0 or greater +// than kMaxRange. +uint32_t Random::Generate(uint32_t range) { + // These constants are the same as are used in glibc's rand(3). + // Use wider types than necessary to prevent unsigned overflow diagnostics. + state_ = static_cast(1103515245ULL*state_ + 12345U) % kMaxRange; + + GTEST_CHECK_(range > 0) + << "Cannot generate a number in the range [0, 0)."; + GTEST_CHECK_(range <= kMaxRange) + << "Generation of a number in [0, " << range << ") was requested, " + << "but this can only generate numbers in [0, " << kMaxRange << ")."; + + // Converting via modulus introduces a bit of downward bias, but + // it's simple, and a linear congruential generator isn't too good + // to begin with. + return state_ % range; +} + +// GTestIsInitialized() returns true if and only if the user has initialized +// Google Test. Useful for catching the user mistake of not initializing +// Google Test before calling RUN_ALL_TESTS(). +static bool GTestIsInitialized() { return GetArgvs().size() > 0; } + +// Iterates over a vector of TestSuites, keeping a running sum of the +// results of calling a given int-returning method on each. +// Returns the sum. +static int SumOverTestSuiteList(const std::vector& case_list, + int (TestSuite::*method)() const) { + int sum = 0; + for (size_t i = 0; i < case_list.size(); i++) { + sum += (case_list[i]->*method)(); + } + return sum; +} + +// Returns true if and only if the test suite passed. +static bool TestSuitePassed(const TestSuite* test_suite) { + return test_suite->should_run() && test_suite->Passed(); +} + +// Returns true if and only if the test suite failed. +static bool TestSuiteFailed(const TestSuite* test_suite) { + return test_suite->should_run() && test_suite->Failed(); +} + +// Returns true if and only if test_suite contains at least one test that +// should run. +static bool ShouldRunTestSuite(const TestSuite* test_suite) { + return test_suite->should_run(); +} + +// AssertHelper constructor. +AssertHelper::AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message) + : data_(new AssertHelperData(type, file, line, message)) { +} + +AssertHelper::~AssertHelper() { + delete data_; +} + +// Message assignment, for assertion streaming support. +void AssertHelper::operator=(const Message& message) const { + UnitTest::GetInstance()-> + AddTestPartResult(data_->type, data_->file, data_->line, + AppendUserMessage(data_->message, message), + UnitTest::GetInstance()->impl() + ->CurrentOsStackTraceExceptTop(1) + // Skips the stack frame for this function itself. + ); // NOLINT +} + +namespace { + +// When TEST_P is found without a matching INSTANTIATE_TEST_SUITE_P +// to creates test cases for it, a syntetic test case is +// inserted to report ether an error or a log message. +// +// This configuration bit will likely be removed at some point. +constexpr bool kErrorOnUninstantiatedParameterizedTest = false; +constexpr bool kErrorOnUninstantiatedTypeParameterizedTest = false; + +// A test that fails at a given file/line location with a given message. +class FailureTest : public Test { + public: + explicit FailureTest(const CodeLocation& loc, std::string error_message, + bool as_error) + : loc_(loc), + error_message_(std::move(error_message)), + as_error_(as_error) {} + + void TestBody() override { + if (as_error_) { + AssertHelper(TestPartResult::kNonFatalFailure, loc_.file.c_str(), + loc_.line, "") = Message() << error_message_; + } else { + std::cout << error_message_ << std::endl; + } + } + + private: + const CodeLocation loc_; + const std::string error_message_; + const bool as_error_; +}; + + +} // namespace + +std::set* GetIgnoredParameterizedTestSuites() { + return UnitTest::GetInstance()->impl()->ignored_parameterized_test_suites(); +} + +// Add a given test_suit to the list of them allow to go un-instantiated. +MarkAsIgnored::MarkAsIgnored(const char* test_suite) { + GetIgnoredParameterizedTestSuites()->insert(test_suite); +} + +// If this parameterized test suite has no instantiations (and that +// has not been marked as okay), emit a test case reporting that. +void InsertSyntheticTestCase(const std::string& name, CodeLocation location, + bool has_test_p) { + const auto& ignored = *GetIgnoredParameterizedTestSuites(); + if (ignored.find(name) != ignored.end()) return; + + const char kMissingInstantiation[] = // + " is defined via TEST_P, but never instantiated. None of the test cases " + "will run. Either no INSTANTIATE_TEST_SUITE_P is provided or the only " + "ones provided expand to nothing." + "\n\n" + "Ideally, TEST_P definitions should only ever be included as part of " + "binaries that intend to use them. (As opposed to, for example, being " + "placed in a library that may be linked in to get other utilities.)"; + + const char kMissingTestCase[] = // + " is instantiated via INSTANTIATE_TEST_SUITE_P, but no tests are " + "defined via TEST_P . No test cases will run." + "\n\n" + "Ideally, INSTANTIATE_TEST_SUITE_P should only ever be invoked from " + "code that always depend on code that provides TEST_P. Failing to do " + "so is often an indication of dead code, e.g. the last TEST_P was " + "removed but the rest got left behind."; + + std::string message = + "Paramaterized test suite " + name + + (has_test_p ? kMissingInstantiation : kMissingTestCase) + + "\n\n" + "To suppress this error for this test suite, insert the following line " + "(in a non-header) in the namespace it is defined in:" + "\n\n" + "GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(" + name + ");"; + + std::string full_name = "UninstantiatedParamaterizedTestSuite<" + name + ">"; + RegisterTest( // + "GoogleTestVerification", full_name.c_str(), + nullptr, // No type parameter. + nullptr, // No value parameter. + location.file.c_str(), location.line, [message, location] { + return new FailureTest(location, message, + kErrorOnUninstantiatedParameterizedTest); + }); +} + +void RegisterTypeParameterizedTestSuite(const char* test_suite_name, + CodeLocation code_location) { + GetUnitTestImpl()->type_parameterized_test_registry().RegisterTestSuite( + test_suite_name, code_location); +} + +void RegisterTypeParameterizedTestSuiteInstantiation(const char* case_name) { + GetUnitTestImpl() + ->type_parameterized_test_registry() + .RegisterInstantiation(case_name); +} + +void TypeParameterizedTestSuiteRegistry::RegisterTestSuite( + const char* test_suite_name, CodeLocation code_location) { + suites_.emplace(std::string(test_suite_name), + TypeParameterizedTestSuiteInfo(code_location)); +} + +void TypeParameterizedTestSuiteRegistry::RegisterInstantiation( + const char* test_suite_name) { + auto it = suites_.find(std::string(test_suite_name)); + if (it != suites_.end()) { + it->second.instantiated = true; + } else { + GTEST_LOG_(ERROR) << "Unknown type parameterized test suit '" + << test_suite_name << "'"; + } +} + +void TypeParameterizedTestSuiteRegistry::CheckForInstantiations() { + const auto& ignored = *GetIgnoredParameterizedTestSuites(); + for (const auto& testcase : suites_) { + if (testcase.second.instantiated) continue; + if (ignored.find(testcase.first) != ignored.end()) continue; + + std::string message = + "Type paramaterized test suite " + testcase.first + + " is defined via REGISTER_TYPED_TEST_SUITE_P, but never instantiated " + "via INSTANTIATE_TYPED_TEST_SUITE_P. None of the test cases will run." + "\n\n" + "Ideally, TYPED_TEST_P definitions should only ever be included as " + "part of binaries that intend to use them. (As opposed to, for " + "example, being placed in a library that may be linked in to get other " + "utilities.)" + "\n\n" + "To suppress this error for this test suite, insert the following line " + "(in a non-header) in the namespace it is definedin in:" + "\n\n" + "GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(" + + testcase.first + ");"; + + std::string full_name = + "UninstantiatedTypeParamaterizedTestSuite<" + testcase.first + ">"; + RegisterTest( // + "GoogleTestVerification", full_name.c_str(), + nullptr, // No type parameter. + nullptr, // No value parameter. + testcase.second.code_location.file.c_str(), + testcase.second.code_location.line, [message, testcase] { + return new FailureTest(testcase.second.code_location, message, + kErrorOnUninstantiatedTypeParameterizedTest); + }); + } +} + +// A copy of all command line arguments. Set by InitGoogleTest(). +static ::std::vector g_argvs; + +::std::vector GetArgvs() { +#if defined(GTEST_CUSTOM_GET_ARGVS_) + // GTEST_CUSTOM_GET_ARGVS_() may return a container of std::string or + // ::string. This code converts it to the appropriate type. + const auto& custom = GTEST_CUSTOM_GET_ARGVS_(); + return ::std::vector(custom.begin(), custom.end()); +#else // defined(GTEST_CUSTOM_GET_ARGVS_) + return g_argvs; +#endif // defined(GTEST_CUSTOM_GET_ARGVS_) +} + +// Returns the current application's name, removing directory path if that +// is present. +FilePath GetCurrentExecutableName() { + FilePath result; + +#if GTEST_OS_WINDOWS || GTEST_OS_OS2 + result.Set(FilePath(GetArgvs()[0]).RemoveExtension("exe")); +#else + result.Set(FilePath(GetArgvs()[0])); +#endif // GTEST_OS_WINDOWS + + return result.RemoveDirectoryName(); +} + +// Functions for processing the gtest_output flag. + +// Returns the output format, or "" for normal printed output. +std::string UnitTestOptions::GetOutputFormat() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + const char* const colon = strchr(gtest_output_flag, ':'); + return (colon == nullptr) + ? std::string(gtest_output_flag) + : std::string(gtest_output_flag, + static_cast(colon - gtest_output_flag)); +} + +// Returns the name of the requested output file, or the default if none +// was explicitly specified. +std::string UnitTestOptions::GetAbsolutePathToOutputFile() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + + std::string format = GetOutputFormat(); + if (format.empty()) + format = std::string(kDefaultOutputFormat); + + const char* const colon = strchr(gtest_output_flag, ':'); + if (colon == nullptr) + return internal::FilePath::MakeFileName( + internal::FilePath( + UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(kDefaultOutputFile), 0, + format.c_str()).string(); + + internal::FilePath output_name(colon + 1); + if (!output_name.IsAbsolutePath()) + output_name = internal::FilePath::ConcatPaths( + internal::FilePath(UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(colon + 1)); + + if (!output_name.IsDirectory()) + return output_name.string(); + + internal::FilePath result(internal::FilePath::GenerateUniqueFileName( + output_name, internal::GetCurrentExecutableName(), + GetOutputFormat().c_str())); + return result.string(); +} + +// Returns true if and only if the wildcard pattern matches the string. +// The first ':' or '\0' character in pattern marks the end of it. +// +// This recursive algorithm isn't very efficient, but is clear and +// works well enough for matching test names, which are short. +bool UnitTestOptions::PatternMatchesString(const char *pattern, + const char *str) { + switch (*pattern) { + case '\0': + case ':': // Either ':' or '\0' marks the end of the pattern. + return *str == '\0'; + case '?': // Matches any single character. + return *str != '\0' && PatternMatchesString(pattern + 1, str + 1); + case '*': // Matches any string (possibly empty) of characters. + return (*str != '\0' && PatternMatchesString(pattern, str + 1)) || + PatternMatchesString(pattern + 1, str); + default: // Non-special character. Matches itself. + return *pattern == *str && + PatternMatchesString(pattern + 1, str + 1); + } +} + +bool UnitTestOptions::MatchesFilter( + const std::string& name, const char* filter) { + const char *cur_pattern = filter; + for (;;) { + if (PatternMatchesString(cur_pattern, name.c_str())) { + return true; + } + + // Finds the next pattern in the filter. + cur_pattern = strchr(cur_pattern, ':'); + + // Returns if no more pattern can be found. + if (cur_pattern == nullptr) { + return false; + } + + // Skips the pattern separater (the ':' character). + cur_pattern++; + } +} + +// Returns true if and only if the user-specified filter matches the test +// suite name and the test name. +bool UnitTestOptions::FilterMatchesTest(const std::string& test_suite_name, + const std::string& test_name) { + const std::string& full_name = test_suite_name + "." + test_name.c_str(); + + // Split --gtest_filter at '-', if there is one, to separate into + // positive filter and negative filter portions + const char* const p = GTEST_FLAG(filter).c_str(); + const char* const dash = strchr(p, '-'); + std::string positive; + std::string negative; + if (dash == nullptr) { + positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter + negative = ""; + } else { + positive = std::string(p, dash); // Everything up to the dash + negative = std::string(dash + 1); // Everything after the dash + if (positive.empty()) { + // Treat '-test1' as the same as '*-test1' + positive = kUniversalFilter; + } + } + + // A filter is a colon-separated list of patterns. It matches a + // test if any pattern in it matches the test. + return (MatchesFilter(full_name, positive.c_str()) && + !MatchesFilter(full_name, negative.c_str())); +} + +#if GTEST_HAS_SEH +// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the +// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. +// This function is useful as an __except condition. +int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) { + // Google Test should handle a SEH exception if: + // 1. the user wants it to, AND + // 2. this is not a breakpoint exception, AND + // 3. this is not a C++ exception (VC++ implements them via SEH, + // apparently). + // + // SEH exception code for C++ exceptions. + // (see http://support.microsoft.com/kb/185294 for more information). + const DWORD kCxxExceptionCode = 0xe06d7363; + + bool should_handle = true; + + if (!GTEST_FLAG(catch_exceptions)) + should_handle = false; + else if (exception_code == EXCEPTION_BREAKPOINT) + should_handle = false; + else if (exception_code == kCxxExceptionCode) + should_handle = false; + + return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH; +} +#endif // GTEST_HAS_SEH + +} // namespace internal + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. Intercepts only failures from the current thread. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + TestPartResultArray* result) + : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD), + result_(result) { + Init(); +} + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + InterceptMode intercept_mode, TestPartResultArray* result) + : intercept_mode_(intercept_mode), + result_(result) { + Init(); +} + +void ScopedFakeTestPartResultReporter::Init() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + old_reporter_ = impl->GetGlobalTestPartResultReporter(); + impl->SetGlobalTestPartResultReporter(this); + } else { + old_reporter_ = impl->GetTestPartResultReporterForCurrentThread(); + impl->SetTestPartResultReporterForCurrentThread(this); + } +} + +// The d'tor restores the test part result reporter used by Google Test +// before. +ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + impl->SetGlobalTestPartResultReporter(old_reporter_); + } else { + impl->SetTestPartResultReporterForCurrentThread(old_reporter_); + } +} + +// Increments the test part result count and remembers the result. +// This method is from the TestPartResultReporterInterface interface. +void ScopedFakeTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + result_->Append(result); +} + +namespace internal { + +// Returns the type ID of ::testing::Test. We should always call this +// instead of GetTypeId< ::testing::Test>() to get the type ID of +// testing::Test. This is to work around a suspected linker bug when +// using Google Test as a framework on Mac OS X. The bug causes +// GetTypeId< ::testing::Test>() to return different values depending +// on whether the call is from the Google Test framework itself or +// from user test code. GetTestTypeId() is guaranteed to always +// return the same value, as it always calls GetTypeId<>() from the +// gtest.cc, which is within the Google Test framework. +TypeId GetTestTypeId() { + return GetTypeId(); +} + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId(); + +// This predicate-formatter checks that 'results' contains a test part +// failure of the given type and that the failure message contains the +// given substring. +static AssertionResult HasOneFailure(const char* /* results_expr */, + const char* /* type_expr */, + const char* /* substr_expr */, + const TestPartResultArray& results, + TestPartResult::Type type, + const std::string& substr) { + const std::string expected(type == TestPartResult::kFatalFailure ? + "1 fatal failure" : + "1 non-fatal failure"); + Message msg; + if (results.size() != 1) { + msg << "Expected: " << expected << "\n" + << " Actual: " << results.size() << " failures"; + for (int i = 0; i < results.size(); i++) { + msg << "\n" << results.GetTestPartResult(i); + } + return AssertionFailure() << msg; + } + + const TestPartResult& r = results.GetTestPartResult(0); + if (r.type() != type) { + return AssertionFailure() << "Expected: " << expected << "\n" + << " Actual:\n" + << r; + } + + if (strstr(r.message(), substr.c_str()) == nullptr) { + return AssertionFailure() << "Expected: " << expected << " containing \"" + << substr << "\"\n" + << " Actual:\n" + << r; + } + + return AssertionSuccess(); +} + +// The constructor of SingleFailureChecker remembers where to look up +// test part results, what type of failure we expect, and what +// substring the failure message should contain. +SingleFailureChecker::SingleFailureChecker(const TestPartResultArray* results, + TestPartResult::Type type, + const std::string& substr) + : results_(results), type_(type), substr_(substr) {} + +// The destructor of SingleFailureChecker verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +SingleFailureChecker::~SingleFailureChecker() { + EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_); +} + +DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultGlobalTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->current_test_result()->AddTestPartResult(result); + unit_test_->listeners()->repeater()->OnTestPartResult(result); +} + +DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultPerThreadTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result); +} + +// Returns the global test part result reporter. +TestPartResultReporterInterface* +UnitTestImpl::GetGlobalTestPartResultReporter() { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + return global_test_part_result_repoter_; +} + +// Sets the global test part result reporter. +void UnitTestImpl::SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter) { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + global_test_part_result_repoter_ = reporter; +} + +// Returns the test part result reporter for the current thread. +TestPartResultReporterInterface* +UnitTestImpl::GetTestPartResultReporterForCurrentThread() { + return per_thread_test_part_result_reporter_.get(); +} + +// Sets the test part result reporter for the current thread. +void UnitTestImpl::SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter) { + per_thread_test_part_result_reporter_.set(reporter); +} + +// Gets the number of successful test suites. +int UnitTestImpl::successful_test_suite_count() const { + return CountIf(test_suites_, TestSuitePassed); +} + +// Gets the number of failed test suites. +int UnitTestImpl::failed_test_suite_count() const { + return CountIf(test_suites_, TestSuiteFailed); +} + +// Gets the number of all test suites. +int UnitTestImpl::total_test_suite_count() const { + return static_cast(test_suites_.size()); +} + +// Gets the number of all test suites that contain at least one test +// that should run. +int UnitTestImpl::test_suite_to_run_count() const { + return CountIf(test_suites_, ShouldRunTestSuite); +} + +// Gets the number of successful tests. +int UnitTestImpl::successful_test_count() const { + return SumOverTestSuiteList(test_suites_, &TestSuite::successful_test_count); +} + +// Gets the number of skipped tests. +int UnitTestImpl::skipped_test_count() const { + return SumOverTestSuiteList(test_suites_, &TestSuite::skipped_test_count); +} + +// Gets the number of failed tests. +int UnitTestImpl::failed_test_count() const { + return SumOverTestSuiteList(test_suites_, &TestSuite::failed_test_count); +} + +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTestImpl::reportable_disabled_test_count() const { + return SumOverTestSuiteList(test_suites_, + &TestSuite::reportable_disabled_test_count); +} + +// Gets the number of disabled tests. +int UnitTestImpl::disabled_test_count() const { + return SumOverTestSuiteList(test_suites_, &TestSuite::disabled_test_count); +} + +// Gets the number of tests to be printed in the XML report. +int UnitTestImpl::reportable_test_count() const { + return SumOverTestSuiteList(test_suites_, &TestSuite::reportable_test_count); +} + +// Gets the number of all tests. +int UnitTestImpl::total_test_count() const { + return SumOverTestSuiteList(test_suites_, &TestSuite::total_test_count); +} + +// Gets the number of tests that should run. +int UnitTestImpl::test_to_run_count() const { + return SumOverTestSuiteList(test_suites_, &TestSuite::test_to_run_count); +} + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// CurrentOsStackTraceExceptTop(1), Foo() will be included in the +// trace but Bar() and CurrentOsStackTraceExceptTop() won't. +std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) { + return os_stack_trace_getter()->CurrentStackTrace( + static_cast(GTEST_FLAG(stack_trace_depth)), + skip_count + 1 + // Skips the user-specified number of frames plus this function + // itself. + ); // NOLINT +} + +// Returns the current time in milliseconds. +TimeInMillis GetTimeInMillis() { +#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__) + // Difference between 1970-01-01 and 1601-01-01 in milliseconds. + // http://analogous.blogspot.com/2005/04/epoch.html + const TimeInMillis kJavaEpochToWinFileTimeDelta = + static_cast(116444736UL) * 100000UL; + const DWORD kTenthMicrosInMilliSecond = 10000; + + SYSTEMTIME now_systime; + FILETIME now_filetime; + ULARGE_INTEGER now_int64; + GetSystemTime(&now_systime); + if (SystemTimeToFileTime(&now_systime, &now_filetime)) { + now_int64.LowPart = now_filetime.dwLowDateTime; + now_int64.HighPart = now_filetime.dwHighDateTime; + now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) - + kJavaEpochToWinFileTimeDelta; + return now_int64.QuadPart; + } + return 0; +#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_ + __timeb64 now; + + // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996 + // (deprecated function) there. + GTEST_DISABLE_MSC_DEPRECATED_PUSH_() + _ftime64(&now); + GTEST_DISABLE_MSC_DEPRECATED_POP_() + + return static_cast(now.time) * 1000 + now.millitm; +#elif GTEST_HAS_GETTIMEOFDAY_ + struct timeval now; + gettimeofday(&now, nullptr); + return static_cast(now.tv_sec) * 1000 + now.tv_usec / 1000; +#else +# error "Don't know how to get the current time on your system." +#endif +} + +// Utilities + +// class String. + +#if GTEST_OS_WINDOWS_MOBILE +// Creates a UTF-16 wide string from the given ANSI string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the wide string, or NULL if the +// input is NULL. +LPCWSTR String::AnsiToUtf16(const char* ansi) { + if (!ansi) return nullptr; + const int length = strlen(ansi); + const int unicode_length = + MultiByteToWideChar(CP_ACP, 0, ansi, length, nullptr, 0); + WCHAR* unicode = new WCHAR[unicode_length + 1]; + MultiByteToWideChar(CP_ACP, 0, ansi, length, + unicode, unicode_length); + unicode[unicode_length] = 0; + return unicode; +} + +// Creates an ANSI string from the given wide string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the ANSI string, or NULL if the +// input is NULL. +const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { + if (!utf16_str) return nullptr; + const int ansi_length = WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, nullptr, + 0, nullptr, nullptr); + char* ansi = new char[ansi_length + 1]; + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, ansi, ansi_length, nullptr, + nullptr); + ansi[ansi_length] = 0; + return ansi; +} + +#endif // GTEST_OS_WINDOWS_MOBILE + +// Compares two C strings. Returns true if and only if they have the same +// content. +// +// Unlike strcmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CStringEquals(const char * lhs, const char * rhs) { + if (lhs == nullptr) return rhs == nullptr; + + if (rhs == nullptr) return false; + + return strcmp(lhs, rhs) == 0; +} + +#if GTEST_HAS_STD_WSTRING + +// Converts an array of wide chars to a narrow string using the UTF-8 +// encoding, and streams the result to the given Message object. +static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, + Message* msg) { + for (size_t i = 0; i != length; ) { // NOLINT + if (wstr[i] != L'\0') { + *msg << WideStringToUtf8(wstr + i, static_cast(length - i)); + while (i != length && wstr[i] != L'\0') + i++; + } else { + *msg << '\0'; + i++; + } + } +} + +#endif // GTEST_HAS_STD_WSTRING + +void SplitString(const ::std::string& str, char delimiter, + ::std::vector< ::std::string>* dest) { + ::std::vector< ::std::string> parsed; + ::std::string::size_type pos = 0; + while (::testing::internal::AlwaysTrue()) { + const ::std::string::size_type colon = str.find(delimiter, pos); + if (colon == ::std::string::npos) { + parsed.push_back(str.substr(pos)); + break; + } else { + parsed.push_back(str.substr(pos, colon - pos)); + pos = colon + 1; + } + } + dest->swap(parsed); +} + +} // namespace internal + +// Constructs an empty Message. +// We allocate the stringstream separately because otherwise each use of +// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's +// stack frame leading to huge stack frames in some cases; gcc does not reuse +// the stack space. +Message::Message() : ss_(new ::std::stringstream) { + // By default, we want there to be enough precision when printing + // a double to a Message. + *ss_ << std::setprecision(std::numeric_limits::digits10 + 2); +} + +// These two overloads allow streaming a wide C string to a Message +// using the UTF-8 encoding. +Message& Message::operator <<(const wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} +Message& Message::operator <<(wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} + +#if GTEST_HAS_STD_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::std::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_STD_WSTRING + +// Gets the text streamed to this object so far as an std::string. +// Each '\0' character in the buffer is replaced with "\\0". +std::string Message::GetString() const { + return internal::StringStreamToString(ss_.get()); +} + +// AssertionResult constructors. +// Used in EXPECT_TRUE/FALSE(assertion_result). +AssertionResult::AssertionResult(const AssertionResult& other) + : success_(other.success_), + message_(other.message_.get() != nullptr + ? new ::std::string(*other.message_) + : static_cast< ::std::string*>(nullptr)) {} + +// Swaps two AssertionResults. +void AssertionResult::swap(AssertionResult& other) { + using std::swap; + swap(success_, other.success_); + swap(message_, other.message_); +} + +// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. +AssertionResult AssertionResult::operator!() const { + AssertionResult negation(!success_); + if (message_.get() != nullptr) negation << *message_; + return negation; +} + +// Makes a successful assertion result. +AssertionResult AssertionSuccess() { + return AssertionResult(true); +} + +// Makes a failed assertion result. +AssertionResult AssertionFailure() { + return AssertionResult(false); +} + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << message. +AssertionResult AssertionFailure(const Message& message) { + return AssertionFailure() << message; +} + +namespace internal { + +namespace edit_distance { +std::vector CalculateOptimalEdits(const std::vector& left, + const std::vector& right) { + std::vector > costs( + left.size() + 1, std::vector(right.size() + 1)); + std::vector > best_move( + left.size() + 1, std::vector(right.size() + 1)); + + // Populate for empty right. + for (size_t l_i = 0; l_i < costs.size(); ++l_i) { + costs[l_i][0] = static_cast(l_i); + best_move[l_i][0] = kRemove; + } + // Populate for empty left. + for (size_t r_i = 1; r_i < costs[0].size(); ++r_i) { + costs[0][r_i] = static_cast(r_i); + best_move[0][r_i] = kAdd; + } + + for (size_t l_i = 0; l_i < left.size(); ++l_i) { + for (size_t r_i = 0; r_i < right.size(); ++r_i) { + if (left[l_i] == right[r_i]) { + // Found a match. Consume it. + costs[l_i + 1][r_i + 1] = costs[l_i][r_i]; + best_move[l_i + 1][r_i + 1] = kMatch; + continue; + } + + const double add = costs[l_i + 1][r_i]; + const double remove = costs[l_i][r_i + 1]; + const double replace = costs[l_i][r_i]; + if (add < remove && add < replace) { + costs[l_i + 1][r_i + 1] = add + 1; + best_move[l_i + 1][r_i + 1] = kAdd; + } else if (remove < add && remove < replace) { + costs[l_i + 1][r_i + 1] = remove + 1; + best_move[l_i + 1][r_i + 1] = kRemove; + } else { + // We make replace a little more expensive than add/remove to lower + // their priority. + costs[l_i + 1][r_i + 1] = replace + 1.00001; + best_move[l_i + 1][r_i + 1] = kReplace; + } + } + } + + // Reconstruct the best path. We do it in reverse order. + std::vector best_path; + for (size_t l_i = left.size(), r_i = right.size(); l_i > 0 || r_i > 0;) { + EditType move = best_move[l_i][r_i]; + best_path.push_back(move); + l_i -= move != kAdd; + r_i -= move != kRemove; + } + std::reverse(best_path.begin(), best_path.end()); + return best_path; +} + +namespace { + +// Helper class to convert string into ids with deduplication. +class InternalStrings { + public: + size_t GetId(const std::string& str) { + IdMap::iterator it = ids_.find(str); + if (it != ids_.end()) return it->second; + size_t id = ids_.size(); + return ids_[str] = id; + } + + private: + typedef std::map IdMap; + IdMap ids_; +}; + +} // namespace + +std::vector CalculateOptimalEdits( + const std::vector& left, + const std::vector& right) { + std::vector left_ids, right_ids; + { + InternalStrings intern_table; + for (size_t i = 0; i < left.size(); ++i) { + left_ids.push_back(intern_table.GetId(left[i])); + } + for (size_t i = 0; i < right.size(); ++i) { + right_ids.push_back(intern_table.GetId(right[i])); + } + } + return CalculateOptimalEdits(left_ids, right_ids); +} + +namespace { + +// Helper class that holds the state for one hunk and prints it out to the +// stream. +// It reorders adds/removes when possible to group all removes before all +// adds. It also adds the hunk header before printint into the stream. +class Hunk { + public: + Hunk(size_t left_start, size_t right_start) + : left_start_(left_start), + right_start_(right_start), + adds_(), + removes_(), + common_() {} + + void PushLine(char edit, const char* line) { + switch (edit) { + case ' ': + ++common_; + FlushEdits(); + hunk_.push_back(std::make_pair(' ', line)); + break; + case '-': + ++removes_; + hunk_removes_.push_back(std::make_pair('-', line)); + break; + case '+': + ++adds_; + hunk_adds_.push_back(std::make_pair('+', line)); + break; + } + } + + void PrintTo(std::ostream* os) { + PrintHeader(os); + FlushEdits(); + for (std::list >::const_iterator it = + hunk_.begin(); + it != hunk_.end(); ++it) { + *os << it->first << it->second << "\n"; + } + } + + bool has_edits() const { return adds_ || removes_; } + + private: + void FlushEdits() { + hunk_.splice(hunk_.end(), hunk_removes_); + hunk_.splice(hunk_.end(), hunk_adds_); + } + + // Print a unified diff header for one hunk. + // The format is + // "@@ -, +, @@" + // where the left/right parts are omitted if unnecessary. + void PrintHeader(std::ostream* ss) const { + *ss << "@@ "; + if (removes_) { + *ss << "-" << left_start_ << "," << (removes_ + common_); + } + if (removes_ && adds_) { + *ss << " "; + } + if (adds_) { + *ss << "+" << right_start_ << "," << (adds_ + common_); + } + *ss << " @@\n"; + } + + size_t left_start_, right_start_; + size_t adds_, removes_, common_; + std::list > hunk_, hunk_adds_, hunk_removes_; +}; + +} // namespace + +// Create a list of diff hunks in Unified diff format. +// Each hunk has a header generated by PrintHeader above plus a body with +// lines prefixed with ' ' for no change, '-' for deletion and '+' for +// addition. +// 'context' represents the desired unchanged prefix/suffix around the diff. +// If two hunks are close enough that their contexts overlap, then they are +// joined into one hunk. +std::string CreateUnifiedDiff(const std::vector& left, + const std::vector& right, + size_t context) { + const std::vector edits = CalculateOptimalEdits(left, right); + + size_t l_i = 0, r_i = 0, edit_i = 0; + std::stringstream ss; + while (edit_i < edits.size()) { + // Find first edit. + while (edit_i < edits.size() && edits[edit_i] == kMatch) { + ++l_i; + ++r_i; + ++edit_i; + } + + // Find the first line to include in the hunk. + const size_t prefix_context = std::min(l_i, context); + Hunk hunk(l_i - prefix_context + 1, r_i - prefix_context + 1); + for (size_t i = prefix_context; i > 0; --i) { + hunk.PushLine(' ', left[l_i - i].c_str()); + } + + // Iterate the edits until we found enough suffix for the hunk or the input + // is over. + size_t n_suffix = 0; + for (; edit_i < edits.size(); ++edit_i) { + if (n_suffix >= context) { + // Continue only if the next hunk is very close. + auto it = edits.begin() + static_cast(edit_i); + while (it != edits.end() && *it == kMatch) ++it; + if (it == edits.end() || + static_cast(it - edits.begin()) - edit_i >= context) { + // There is no next edit or it is too far away. + break; + } + } + + EditType edit = edits[edit_i]; + // Reset count when a non match is found. + n_suffix = edit == kMatch ? n_suffix + 1 : 0; + + if (edit == kMatch || edit == kRemove || edit == kReplace) { + hunk.PushLine(edit == kMatch ? ' ' : '-', left[l_i].c_str()); + } + if (edit == kAdd || edit == kReplace) { + hunk.PushLine('+', right[r_i].c_str()); + } + + // Advance indices, depending on edit type. + l_i += edit != kAdd; + r_i += edit != kRemove; + } + + if (!hunk.has_edits()) { + // We are done. We don't want this hunk. + break; + } + + hunk.PrintTo(&ss); + } + return ss.str(); +} + +} // namespace edit_distance + +namespace { + +// The string representation of the values received in EqFailure() are already +// escaped. Split them on escaped '\n' boundaries. Leave all other escaped +// characters the same. +std::vector SplitEscapedString(const std::string& str) { + std::vector lines; + size_t start = 0, end = str.size(); + if (end > 2 && str[0] == '"' && str[end - 1] == '"') { + ++start; + --end; + } + bool escaped = false; + for (size_t i = start; i + 1 < end; ++i) { + if (escaped) { + escaped = false; + if (str[i] == 'n') { + lines.push_back(str.substr(start, i - start - 1)); + start = i + 1; + } + } else { + escaped = str[i] == '\\'; + } + } + lines.push_back(str.substr(start, end - start)); + return lines; +} + +} // namespace + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// lhs_expression: "foo" +// rhs_expression: "bar" +// lhs_value: "5" +// rhs_value: "6" +// +// The ignoring_case parameter is true if and only if the assertion is a +// *_STRCASEEQ*. When it's true, the string "Ignoring case" will +// be inserted into the message. +AssertionResult EqFailure(const char* lhs_expression, + const char* rhs_expression, + const std::string& lhs_value, + const std::string& rhs_value, + bool ignoring_case) { + Message msg; + msg << "Expected equality of these values:"; + msg << "\n " << lhs_expression; + if (lhs_value != lhs_expression) { + msg << "\n Which is: " << lhs_value; + } + msg << "\n " << rhs_expression; + if (rhs_value != rhs_expression) { + msg << "\n Which is: " << rhs_value; + } + + if (ignoring_case) { + msg << "\nIgnoring case"; + } + + if (!lhs_value.empty() && !rhs_value.empty()) { + const std::vector lhs_lines = + SplitEscapedString(lhs_value); + const std::vector rhs_lines = + SplitEscapedString(rhs_value); + if (lhs_lines.size() > 1 || rhs_lines.size() > 1) { + msg << "\nWith diff:\n" + << edit_distance::CreateUnifiedDiff(lhs_lines, rhs_lines); + } + } + + return AssertionFailure() << msg; +} + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value) { + const char* actual_message = assertion_result.message(); + Message msg; + msg << "Value of: " << expression_text + << "\n Actual: " << actual_predicate_value; + if (actual_message[0] != '\0') + msg << " (" << actual_message << ")"; + msg << "\nExpected: " << expected_predicate_value; + return msg.GetString(); +} + +// Helper function for implementing ASSERT_NEAR. +AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error) { + const double diff = fabs(val1 - val2); + if (diff <= abs_error) return AssertionSuccess(); + + return AssertionFailure() + << "The difference between " << expr1 << " and " << expr2 + << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n" + << expr1 << " evaluates to " << val1 << ",\n" + << expr2 << " evaluates to " << val2 << ", and\n" + << abs_error_expr << " evaluates to " << abs_error << "."; +} + + +// Helper template for implementing FloatLE() and DoubleLE(). +template +AssertionResult FloatingPointLE(const char* expr1, + const char* expr2, + RawType val1, + RawType val2) { + // Returns success if val1 is less than val2, + if (val1 < val2) { + return AssertionSuccess(); + } + + // or if val1 is almost equal to val2. + const FloatingPoint lhs(val1), rhs(val2); + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + // Note that the above two checks will both fail if either val1 or + // val2 is NaN, as the IEEE floating-point standard requires that + // any predicate involving a NaN must return false. + + ::std::stringstream val1_ss; + val1_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val1; + + ::std::stringstream val2_ss; + val2_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val2; + + return AssertionFailure() + << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n" + << " Actual: " << StringStreamToString(&val1_ss) << " vs " + << StringStreamToString(&val2_ss); +} + +} // namespace internal + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +namespace internal { + +// The helper function for {ASSERT|EXPECT}_EQ with int or enum +// arguments. +AssertionResult CmpHelperEQ(const char* lhs_expression, + const char* rhs_expression, + BiggestInt lhs, + BiggestInt rhs) { + if (lhs == rhs) { + return AssertionSuccess(); + } + + return EqFailure(lhs_expression, + rhs_expression, + FormatForComparisonFailureMessage(lhs, rhs), + FormatForComparisonFailureMessage(rhs, lhs), + false); +} + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here +// just to avoid copy-and-paste of similar code. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + BiggestInt val1, BiggestInt val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +} + +// Implements the helper function for {ASSERT|EXPECT}_NE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(NE, !=) +// Implements the helper function for {ASSERT|EXPECT}_LE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LE, <=) +// Implements the helper function for {ASSERT|EXPECT}_LT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LT, < ) +// Implements the helper function for {ASSERT|EXPECT}_GE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GE, >=) +// Implements the helper function for {ASSERT|EXPECT}_GT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GT, > ) + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +AssertionResult CmpHelperSTREQ(const char* lhs_expression, + const char* rhs_expression, + const char* lhs, + const char* rhs) { + if (String::CStringEquals(lhs, rhs)) { + return AssertionSuccess(); + } + + return EqFailure(lhs_expression, + rhs_expression, + PrintToString(lhs), + PrintToString(rhs), + false); +} + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +AssertionResult CmpHelperSTRCASEEQ(const char* lhs_expression, + const char* rhs_expression, + const char* lhs, + const char* rhs) { + if (String::CaseInsensitiveCStringEquals(lhs, rhs)) { + return AssertionSuccess(); + } + + return EqFailure(lhs_expression, + rhs_expression, + PrintToString(lhs), + PrintToString(rhs), + true); +} + +// The helper function for {ASSERT|EXPECT}_STRNE. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CaseInsensitiveCStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() + << "Expected: (" << s1_expression << ") != (" + << s2_expression << ") (ignoring case), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +} // namespace internal + +namespace { + +// Helper functions for implementing IsSubString() and IsNotSubstring(). + +// This group of overloaded functions return true if and only if needle +// is a substring of haystack. NULL is considered a substring of +// itself only. + +bool IsSubstringPred(const char* needle, const char* haystack) { + if (needle == nullptr || haystack == nullptr) return needle == haystack; + + return strstr(haystack, needle) != nullptr; +} + +bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) { + if (needle == nullptr || haystack == nullptr) return needle == haystack; + + return wcsstr(haystack, needle) != nullptr; +} + +// StringType here can be either ::std::string or ::std::wstring. +template +bool IsSubstringPred(const StringType& needle, + const StringType& haystack) { + return haystack.find(needle) != StringType::npos; +} + +// This function implements either IsSubstring() or IsNotSubstring(), +// depending on the value of the expected_to_be_substring parameter. +// StringType here can be const char*, const wchar_t*, ::std::string, +// or ::std::wstring. +template +AssertionResult IsSubstringImpl( + bool expected_to_be_substring, + const char* needle_expr, const char* haystack_expr, + const StringType& needle, const StringType& haystack) { + if (IsSubstringPred(needle, haystack) == expected_to_be_substring) + return AssertionSuccess(); + + const bool is_wide_string = sizeof(needle[0]) > 1; + const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; + return AssertionFailure() + << "Value of: " << needle_expr << "\n" + << " Actual: " << begin_string_quote << needle << "\"\n" + << "Expected: " << (expected_to_be_substring ? "" : "not ") + << "a substring of " << haystack_expr << "\n" + << "Which is: " << begin_string_quote << haystack << "\""; +} + +} // namespace + +// IsSubstring() and IsNotSubstring() check whether needle is a +// substring of haystack (NULL is considered a substring of itself +// only), and return an appropriate error message when they fail. + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +#if GTEST_HAS_STD_WSTRING +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +#if GTEST_OS_WINDOWS + +namespace { + +// Helper function for IsHRESULT{SuccessFailure} predicates +AssertionResult HRESULTFailureHelper(const char* expr, + const char* expected, + long hr) { // NOLINT +# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_TV_TITLE + + // Windows CE doesn't support FormatMessage. + const char error_text[] = ""; + +# else + + // Looks up the human-readable system message for the HRESULT code + // and since we're not passing any params to FormatMessage, we don't + // want inserts expanded. + const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD kBufSize = 4096; + // Gets the system's human readable message string for this HRESULT. + char error_text[kBufSize] = { '\0' }; + DWORD message_length = ::FormatMessageA(kFlags, + 0, // no source, we're asking system + static_cast(hr), // the error + 0, // no line width restrictions + error_text, // output buffer + kBufSize, // buf size + nullptr); // no arguments for inserts + // Trims tailing white space (FormatMessage leaves a trailing CR-LF) + for (; message_length && IsSpace(error_text[message_length - 1]); + --message_length) { + error_text[message_length - 1] = '\0'; + } + +# endif // GTEST_OS_WINDOWS_MOBILE + + const std::string error_hex("0x" + String::FormatHexInt(hr)); + return ::testing::AssertionFailure() + << "Expected: " << expr << " " << expected << ".\n" + << " Actual: " << error_hex << " " << error_text << "\n"; +} + +} // namespace + +AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT + if (SUCCEEDED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "succeeds", hr); +} + +AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT + if (FAILED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "fails", hr); +} + +#endif // GTEST_OS_WINDOWS + +// Utility functions for encoding Unicode text (wide strings) in +// UTF-8. + +// A Unicode code-point can have up to 21 bits, and is encoded in UTF-8 +// like this: +// +// Code-point length Encoding +// 0 - 7 bits 0xxxxxxx +// 8 - 11 bits 110xxxxx 10xxxxxx +// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx +// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + +// The maximum code-point a one-byte UTF-8 sequence can represent. +constexpr uint32_t kMaxCodePoint1 = (static_cast(1) << 7) - 1; + +// The maximum code-point a two-byte UTF-8 sequence can represent. +constexpr uint32_t kMaxCodePoint2 = (static_cast(1) << (5 + 6)) - 1; + +// The maximum code-point a three-byte UTF-8 sequence can represent. +constexpr uint32_t kMaxCodePoint3 = (static_cast(1) << (4 + 2*6)) - 1; + +// The maximum code-point a four-byte UTF-8 sequence can represent. +constexpr uint32_t kMaxCodePoint4 = (static_cast(1) << (3 + 3*6)) - 1; + +// Chops off the n lowest bits from a bit pattern. Returns the n +// lowest bits. As a side effect, the original bit pattern will be +// shifted to the right by n bits. +inline uint32_t ChopLowBits(uint32_t* bits, int n) { + const uint32_t low_bits = *bits & ((static_cast(1) << n) - 1); + *bits >>= n; + return low_bits; +} + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type uint32_t because wchar_t may not be +// wide enough to contain a code point. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +std::string CodePointToUtf8(uint32_t code_point) { + if (code_point > kMaxCodePoint4) { + return "(Invalid Unicode 0x" + String::FormatHexUInt32(code_point) + ")"; + } + + char str[5]; // Big enough for the largest valid code point. + if (code_point <= kMaxCodePoint1) { + str[1] = '\0'; + str[0] = static_cast(code_point); // 0xxxxxxx + } else if (code_point <= kMaxCodePoint2) { + str[2] = '\0'; + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xC0 | code_point); // 110xxxxx + } else if (code_point <= kMaxCodePoint3) { + str[3] = '\0'; + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xE0 | code_point); // 1110xxxx + } else { // code_point <= kMaxCodePoint4 + str[4] = '\0'; + str[3] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xF0 | code_point); // 11110xxx + } + return str; +} + +// The following two functions only make sense if the system +// uses UTF-16 for wide string encoding. All supported systems +// with 16 bit wchar_t (Windows, Cygwin) do use UTF-16. + +// Determines if the arguments constitute UTF-16 surrogate pair +// and thus should be combined into a single Unicode code point +// using CreateCodePointFromUtf16SurrogatePair. +inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) { + return sizeof(wchar_t) == 2 && + (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00; +} + +// Creates a Unicode code point from UTF16 surrogate pair. +inline uint32_t CreateCodePointFromUtf16SurrogatePair(wchar_t first, + wchar_t second) { + const auto first_u = static_cast(first); + const auto second_u = static_cast(second); + const uint32_t mask = (1 << 10) - 1; + return (sizeof(wchar_t) == 2) + ? (((first_u & mask) << 10) | (second_u & mask)) + 0x10000 + : + // This function should not be called when the condition is + // false, but we provide a sensible default in case it is. + first_u; +} + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +std::string WideStringToUtf8(const wchar_t* str, int num_chars) { + if (num_chars == -1) + num_chars = static_cast(wcslen(str)); + + ::std::stringstream stream; + for (int i = 0; i < num_chars; ++i) { + uint32_t unicode_code_point; + + if (str[i] == L'\0') { + break; + } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) { + unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i], + str[i + 1]); + i++; + } else { + unicode_code_point = static_cast(str[i]); + } + + stream << CodePointToUtf8(unicode_code_point); + } + return StringStreamToString(&stream); +} + +// Converts a wide C string to an std::string using the UTF-8 encoding. +// NULL will be converted to "(null)". +std::string String::ShowWideCString(const wchar_t * wide_c_str) { + if (wide_c_str == nullptr) return "(null)"; + + return internal::WideStringToUtf8(wide_c_str, -1); +} + +// Compares two wide C strings. Returns true if and only if they have the +// same content. +// +// Unlike wcscmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) { + if (lhs == nullptr) return rhs == nullptr; + + if (rhs == nullptr) return false; + + return wcscmp(lhs, rhs) == 0; +} + +// Helper function for *_STREQ on wide strings. +AssertionResult CmpHelperSTREQ(const char* lhs_expression, + const char* rhs_expression, + const wchar_t* lhs, + const wchar_t* rhs) { + if (String::WideCStringEquals(lhs, rhs)) { + return AssertionSuccess(); + } + + return EqFailure(lhs_expression, + rhs_expression, + PrintToString(lhs), + PrintToString(rhs), + false); +} + +// Helper function for *_STRNE on wide strings. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2) { + if (!String::WideCStringEquals(s1, s2)) { + return AssertionSuccess(); + } + + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: " + << PrintToString(s1) + << " vs " << PrintToString(s2); +} + +// Compares two C strings, ignoring case. Returns true if and only if they have +// the same content. +// +// Unlike strcasecmp(), this function can handle NULL argument(s). A +// NULL C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) { + if (lhs == nullptr) return rhs == nullptr; + if (rhs == nullptr) return false; + return posix::StrCaseCmp(lhs, rhs) == 0; +} + +// Compares two wide C strings, ignoring case. Returns true if and only if they +// have the same content. +// +// Unlike wcscasecmp(), this function can handle NULL argument(s). +// A NULL C string is considered different to any non-NULL wide C string, +// including the empty string. +// NB: The implementations on different platforms slightly differ. +// On windows, this method uses _wcsicmp which compares according to LC_CTYPE +// environment variable. On GNU platform this method uses wcscasecmp +// which compares according to LC_CTYPE category of the current locale. +// On MacOS X, it uses towlower, which also uses LC_CTYPE category of the +// current locale. +bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs) { + if (lhs == nullptr) return rhs == nullptr; + + if (rhs == nullptr) return false; + +#if GTEST_OS_WINDOWS + return _wcsicmp(lhs, rhs) == 0; +#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID + return wcscasecmp(lhs, rhs) == 0; +#else + // Android, Mac OS X and Cygwin don't define wcscasecmp. + // Other unknown OSes may not define it either. + wint_t left, right; + do { + left = towlower(static_cast(*lhs++)); + right = towlower(static_cast(*rhs++)); + } while (left && left == right); + return left == right; +#endif // OS selector +} + +// Returns true if and only if str ends with the given suffix, ignoring case. +// Any string is considered to end with an empty suffix. +bool String::EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix) { + const size_t str_len = str.length(); + const size_t suffix_len = suffix.length(); + return (str_len >= suffix_len) && + CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len, + suffix.c_str()); +} + +// Formats an int value as "%02d". +std::string String::FormatIntWidth2(int value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << value; + return ss.str(); +} + +// Formats an int value as "%X". +std::string String::FormatHexUInt32(uint32_t value) { + std::stringstream ss; + ss << std::hex << std::uppercase << value; + return ss.str(); +} + +// Formats an int value as "%X". +std::string String::FormatHexInt(int value) { + return FormatHexUInt32(static_cast(value)); +} + +// Formats a byte as "%02X". +std::string String::FormatByte(unsigned char value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase + << static_cast(value); + return ss.str(); +} + +// Converts the buffer in a stringstream to an std::string, converting NUL +// bytes to "\\0" along the way. +std::string StringStreamToString(::std::stringstream* ss) { + const ::std::string& str = ss->str(); + const char* const start = str.c_str(); + const char* const end = start + str.length(); + + std::string result; + result.reserve(static_cast(2 * (end - start))); + for (const char* ch = start; ch != end; ++ch) { + if (*ch == '\0') { + result += "\\0"; // Replaces NUL with "\\0"; + } else { + result += *ch; + } + } + + return result; +} + +// Appends the user-supplied message to the Google-Test-generated message. +std::string AppendUserMessage(const std::string& gtest_msg, + const Message& user_msg) { + // Appends the user message if it's non-empty. + const std::string user_msg_string = user_msg.GetString(); + if (user_msg_string.empty()) { + return gtest_msg; + } + + return gtest_msg + "\n" + user_msg_string; +} + +} // namespace internal + +// class TestResult + +// Creates an empty TestResult. +TestResult::TestResult() + : death_test_count_(0), start_timestamp_(0), elapsed_time_(0) {} + +// D'tor. +TestResult::~TestResult() { +} + +// Returns the i-th test part result among all the results. i can +// range from 0 to total_part_count() - 1. If i is not in that range, +// aborts the program. +const TestPartResult& TestResult::GetTestPartResult(int i) const { + if (i < 0 || i >= total_part_count()) + internal::posix::Abort(); + return test_part_results_.at(static_cast(i)); +} + +// Returns the i-th test property. i can range from 0 to +// test_property_count() - 1. If i is not in that range, aborts the +// program. +const TestProperty& TestResult::GetTestProperty(int i) const { + if (i < 0 || i >= test_property_count()) + internal::posix::Abort(); + return test_properties_.at(static_cast(i)); +} + +// Clears the test part results. +void TestResult::ClearTestPartResults() { + test_part_results_.clear(); +} + +// Adds a test part result to the list. +void TestResult::AddTestPartResult(const TestPartResult& test_part_result) { + test_part_results_.push_back(test_part_result); +} + +// Adds a test property to the list. If a property with the same key as the +// supplied property is already represented, the value of this test_property +// replaces the old value for that key. +void TestResult::RecordProperty(const std::string& xml_element, + const TestProperty& test_property) { + if (!ValidateTestProperty(xml_element, test_property)) { + return; + } + internal::MutexLock lock(&test_properites_mutex_); + const std::vector::iterator property_with_matching_key = + std::find_if(test_properties_.begin(), test_properties_.end(), + internal::TestPropertyKeyIs(test_property.key())); + if (property_with_matching_key == test_properties_.end()) { + test_properties_.push_back(test_property); + return; + } + property_with_matching_key->SetValue(test_property.value()); +} + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuitesAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "random_seed", + "tests", + "time", + "timestamp" +}; + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuiteAttributes[] = { + "disabled", "errors", "failures", "name", "tests", "time", "timestamp"}; + +// The list of reserved attributes used in the element of XML output. +static const char* const kReservedTestCaseAttributes[] = { + "classname", "name", "status", "time", "type_param", + "value_param", "file", "line"}; + +// Use a slightly different set for allowed output to ensure existing tests can +// still RecordProperty("result") or "RecordProperty(timestamp") +static const char* const kReservedOutputTestCaseAttributes[] = { + "classname", "name", "status", "time", "type_param", + "value_param", "file", "line", "result", "timestamp"}; + +template +std::vector ArrayAsVector(const char* const (&array)[kSize]) { + return std::vector(array, array + kSize); +} + +static std::vector GetReservedAttributesForElement( + const std::string& xml_element) { + if (xml_element == "testsuites") { + return ArrayAsVector(kReservedTestSuitesAttributes); + } else if (xml_element == "testsuite") { + return ArrayAsVector(kReservedTestSuiteAttributes); + } else if (xml_element == "testcase") { + return ArrayAsVector(kReservedTestCaseAttributes); + } else { + GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element; + } + // This code is unreachable but some compilers may not realizes that. + return std::vector(); +} + +// TODO(jdesprez): Merge the two getReserved attributes once skip is improved +static std::vector GetReservedOutputAttributesForElement( + const std::string& xml_element) { + if (xml_element == "testsuites") { + return ArrayAsVector(kReservedTestSuitesAttributes); + } else if (xml_element == "testsuite") { + return ArrayAsVector(kReservedTestSuiteAttributes); + } else if (xml_element == "testcase") { + return ArrayAsVector(kReservedOutputTestCaseAttributes); + } else { + GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element; + } + // This code is unreachable but some compilers may not realizes that. + return std::vector(); +} + +static std::string FormatWordList(const std::vector& words) { + Message word_list; + for (size_t i = 0; i < words.size(); ++i) { + if (i > 0 && words.size() > 2) { + word_list << ", "; + } + if (i == words.size() - 1) { + word_list << "and "; + } + word_list << "'" << words[i] << "'"; + } + return word_list.GetString(); +} + +static bool ValidateTestPropertyName( + const std::string& property_name, + const std::vector& reserved_names) { + if (std::find(reserved_names.begin(), reserved_names.end(), property_name) != + reserved_names.end()) { + ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name + << " (" << FormatWordList(reserved_names) + << " are reserved by " << GTEST_NAME_ << ")"; + return false; + } + return true; +} + +// Adds a failure if the key is a reserved attribute of the element named +// xml_element. Returns true if the property is valid. +bool TestResult::ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property) { + return ValidateTestPropertyName(test_property.key(), + GetReservedAttributesForElement(xml_element)); +} + +// Clears the object. +void TestResult::Clear() { + test_part_results_.clear(); + test_properties_.clear(); + death_test_count_ = 0; + elapsed_time_ = 0; +} + +// Returns true off the test part was skipped. +static bool TestPartSkipped(const TestPartResult& result) { + return result.skipped(); +} + +// Returns true if and only if the test was skipped. +bool TestResult::Skipped() const { + return !Failed() && CountIf(test_part_results_, TestPartSkipped) > 0; +} + +// Returns true if and only if the test failed. +bool TestResult::Failed() const { + for (int i = 0; i < total_part_count(); ++i) { + if (GetTestPartResult(i).failed()) + return true; + } + return false; +} + +// Returns true if and only if the test part fatally failed. +static bool TestPartFatallyFailed(const TestPartResult& result) { + return result.fatally_failed(); +} + +// Returns true if and only if the test fatally failed. +bool TestResult::HasFatalFailure() const { + return CountIf(test_part_results_, TestPartFatallyFailed) > 0; +} + +// Returns true if and only if the test part non-fatally failed. +static bool TestPartNonfatallyFailed(const TestPartResult& result) { + return result.nonfatally_failed(); +} + +// Returns true if and only if the test has a non-fatal failure. +bool TestResult::HasNonfatalFailure() const { + return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0; +} + +// Gets the number of all test parts. This is the sum of the number +// of successful test parts and the number of failed test parts. +int TestResult::total_part_count() const { + return static_cast(test_part_results_.size()); +} + +// Returns the number of the test properties. +int TestResult::test_property_count() const { + return static_cast(test_properties_.size()); +} + +// class Test + +// Creates a Test object. + +// The c'tor saves the states of all flags. +Test::Test() + : gtest_flag_saver_(new GTEST_FLAG_SAVER_) { +} + +// The d'tor restores the states of all flags. The actual work is +// done by the d'tor of the gtest_flag_saver_ field, and thus not +// visible here. +Test::~Test() { +} + +// Sets up the test fixture. +// +// A sub-class may override this. +void Test::SetUp() { +} + +// Tears down the test fixture. +// +// A sub-class may override this. +void Test::TearDown() { +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const std::string& key, const std::string& value) { + UnitTest::GetInstance()->RecordProperty(key, value); +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const std::string& key, int value) { + Message value_message; + value_message << value; + RecordProperty(key, value_message.GetString().c_str()); +} + +namespace internal { + +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message) { + // This function is a friend of UnitTest and as such has access to + // AddTestPartResult. + UnitTest::GetInstance()->AddTestPartResult( + result_type, + nullptr, // No info about the source file where the exception occurred. + -1, // We have no info on which line caused the exception. + message, + ""); // No stack trace, either. +} + +} // namespace internal + +// Google Test requires all tests in the same test suite to use the same test +// fixture class. This function checks if the current test has the +// same fixture class as the first test in the current test suite. If +// yes, it returns true; otherwise it generates a Google Test failure and +// returns false. +bool Test::HasSameFixtureClass() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + const TestSuite* const test_suite = impl->current_test_suite(); + + // Info about the first test in the current test suite. + const TestInfo* const first_test_info = test_suite->test_info_list()[0]; + const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_; + const char* const first_test_name = first_test_info->name(); + + // Info about the current test. + const TestInfo* const this_test_info = impl->current_test_info(); + const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_; + const char* const this_test_name = this_test_info->name(); + + if (this_fixture_id != first_fixture_id) { + // Is the first test defined using TEST? + const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId(); + // Is this test defined using TEST? + const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId(); + + if (first_is_TEST || this_is_TEST) { + // Both TEST and TEST_F appear in same test suite, which is incorrect. + // Tell the user how to fix this. + + // Gets the name of the TEST and the name of the TEST_F. Note + // that first_is_TEST and this_is_TEST cannot both be true, as + // the fixture IDs are different for the two tests. + const char* const TEST_name = + first_is_TEST ? first_test_name : this_test_name; + const char* const TEST_F_name = + first_is_TEST ? this_test_name : first_test_name; + + ADD_FAILURE() + << "All tests in the same test suite must use the same test fixture\n" + << "class, so mixing TEST_F and TEST in the same test suite is\n" + << "illegal. In test suite " << this_test_info->test_suite_name() + << ",\n" + << "test " << TEST_F_name << " is defined using TEST_F but\n" + << "test " << TEST_name << " is defined using TEST. You probably\n" + << "want to change the TEST to TEST_F or move it to another test\n" + << "case."; + } else { + // Two fixture classes with the same name appear in two different + // namespaces, which is not allowed. Tell the user how to fix this. + ADD_FAILURE() + << "All tests in the same test suite must use the same test fixture\n" + << "class. However, in test suite " + << this_test_info->test_suite_name() << ",\n" + << "you defined test " << first_test_name << " and test " + << this_test_name << "\n" + << "using two different test fixture classes. This can happen if\n" + << "the two classes are from different namespaces or translation\n" + << "units and have the same name. You should probably rename one\n" + << "of the classes to put the tests into different test suites."; + } + return false; + } + + return true; +} + +#if GTEST_HAS_SEH + +// Adds an "exception thrown" fatal failure to the current test. This +// function returns its result via an output parameter pointer because VC++ +// prohibits creation of objects with destructors on stack in functions +// using __try (see error C2712). +static std::string* FormatSehExceptionMessage(DWORD exception_code, + const char* location) { + Message message; + message << "SEH exception with code 0x" << std::setbase(16) << + exception_code << std::setbase(10) << " thrown in " << location << "."; + + return new std::string(message.GetString()); +} + +#endif // GTEST_HAS_SEH + +namespace internal { + +#if GTEST_HAS_EXCEPTIONS + +// Adds an "exception thrown" fatal failure to the current test. +static std::string FormatCxxExceptionMessage(const char* description, + const char* location) { + Message message; + if (description != nullptr) { + message << "C++ exception with description \"" << description << "\""; + } else { + message << "Unknown C++ exception"; + } + message << " thrown in " << location << "."; + + return message.GetString(); +} + +static std::string PrintTestPartResultToString( + const TestPartResult& test_part_result); + +GoogleTestFailureException::GoogleTestFailureException( + const TestPartResult& failure) + : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {} + +#endif // GTEST_HAS_EXCEPTIONS + +// We put these helper functions in the internal namespace as IBM's xlC +// compiler rejects the code if they were declared static. + +// Runs the given method and handles SEH exceptions it throws, when +// SEH is supported; returns the 0-value for type Result in case of an +// SEH exception. (Microsoft compilers cannot handle SEH and C++ +// exceptions in the same function. Therefore, we provide a separate +// wrapper function for handling SEH exceptions.) +template +Result HandleSehExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { +#if GTEST_HAS_SEH + __try { + return (object->*method)(); + } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT + GetExceptionCode())) { + // We create the exception message on the heap because VC++ prohibits + // creation of objects with destructors on stack in functions using __try + // (see error C2712). + std::string* exception_message = FormatSehExceptionMessage( + GetExceptionCode(), location); + internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure, + *exception_message); + delete exception_message; + return static_cast(0); + } +#else + (void)location; + return (object->*method)(); +#endif // GTEST_HAS_SEH +} + +// Runs the given method and catches and reports C++ and/or SEH-style +// exceptions, if they are supported; returns the 0-value for type +// Result in case of an SEH exception. +template +Result HandleExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { + // NOTE: The user code can affect the way in which Google Test handles + // exceptions by setting GTEST_FLAG(catch_exceptions), but only before + // RUN_ALL_TESTS() starts. It is technically possible to check the flag + // after the exception is caught and either report or re-throw the + // exception based on the flag's value: + // + // try { + // // Perform the test method. + // } catch (...) { + // if (GTEST_FLAG(catch_exceptions)) + // // Report the exception as failure. + // else + // throw; // Re-throws the original exception. + // } + // + // However, the purpose of this flag is to allow the program to drop into + // the debugger when the exception is thrown. On most platforms, once the + // control enters the catch block, the exception origin information is + // lost and the debugger will stop the program at the point of the + // re-throw in this function -- instead of at the point of the original + // throw statement in the code under test. For this reason, we perform + // the check early, sacrificing the ability to affect Google Test's + // exception handling in the method where the exception is thrown. + if (internal::GetUnitTestImpl()->catch_exceptions()) { +#if GTEST_HAS_EXCEPTIONS + try { + return HandleSehExceptionsInMethodIfSupported(object, method, location); + } catch (const AssertionException&) { // NOLINT + // This failure was reported already. + } catch (const internal::GoogleTestFailureException&) { // NOLINT + // This exception type can only be thrown by a failed Google + // Test assertion with the intention of letting another testing + // framework catch it. Therefore we just re-throw it. + throw; + } catch (const std::exception& e) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(e.what(), location)); + } catch (...) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(nullptr, location)); + } + return static_cast(0); +#else + return HandleSehExceptionsInMethodIfSupported(object, method, location); +#endif // GTEST_HAS_EXCEPTIONS + } else { + return (object->*method)(); + } +} + +} // namespace internal + +// Runs the test and updates the test result. +void Test::Run() { + if (!HasSameFixtureClass()) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()"); + // We will run the test only if SetUp() was successful and didn't call + // GTEST_SKIP(). + if (!HasFatalFailure() && !IsSkipped()) { + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TestBody, "the test body"); + } + + // However, we want to clean up as much as possible. Hence we will + // always call TearDown(), even if SetUp() or the test body has + // failed. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TearDown, "TearDown()"); +} + +// Returns true if and only if the current test has a fatal failure. +bool Test::HasFatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure(); +} + +// Returns true if and only if the current test has a non-fatal failure. +bool Test::HasNonfatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()-> + HasNonfatalFailure(); +} + +// Returns true if and only if the current test was skipped. +bool Test::IsSkipped() { + return internal::GetUnitTestImpl()->current_test_result()->Skipped(); +} + +// class TestInfo + +// Constructs a TestInfo object. It assumes ownership of the test factory +// object. +TestInfo::TestInfo(const std::string& a_test_suite_name, + const std::string& a_name, const char* a_type_param, + const char* a_value_param, + internal::CodeLocation a_code_location, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory) + : test_suite_name_(a_test_suite_name), + name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : nullptr), + value_param_(a_value_param ? new std::string(a_value_param) : nullptr), + location_(a_code_location), + fixture_class_id_(fixture_class_id), + should_run_(false), + is_disabled_(false), + matches_filter_(false), + factory_(factory), + result_() {} + +// Destructs a TestInfo object. +TestInfo::~TestInfo() { delete factory_; } + +namespace internal { + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_suite_name: name of the test suite +// name: name of the test +// type_param: the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param: text representation of the test's value parameter, +// or NULL if this is not a value-parameterized test. +// code_location: code location where the test is defined +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test suite +// tear_down_tc: pointer to the function that tears down the test suite +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +TestInfo* MakeAndRegisterTestInfo( + const char* test_suite_name, const char* name, const char* type_param, + const char* value_param, CodeLocation code_location, + TypeId fixture_class_id, SetUpTestSuiteFunc set_up_tc, + TearDownTestSuiteFunc tear_down_tc, TestFactoryBase* factory) { + TestInfo* const test_info = + new TestInfo(test_suite_name, name, type_param, value_param, + code_location, fixture_class_id, factory); + GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info); + return test_info; +} + +void ReportInvalidTestSuiteType(const char* test_suite_name, + CodeLocation code_location) { + Message errors; + errors + << "Attempted redefinition of test suite " << test_suite_name << ".\n" + << "All tests in the same test suite must use the same test fixture\n" + << "class. However, in test suite " << test_suite_name << ", you tried\n" + << "to define a test using a fixture class different from the one\n" + << "used earlier. This can happen if the two fixture classes are\n" + << "from different namespaces and have the same name. You should\n" + << "probably rename one of the classes to put the tests into different\n" + << "test suites."; + + GTEST_LOG_(ERROR) << FormatFileLocation(code_location.file.c_str(), + code_location.line) + << " " << errors.GetString(); +} +} // namespace internal + +namespace { + +// A predicate that checks the test name of a TestInfo against a known +// value. +// +// This is used for implementation of the TestSuite class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestNameIs is copyable. +class TestNameIs { + public: + // Constructor. + // + // TestNameIs has NO default constructor. + explicit TestNameIs(const char* name) + : name_(name) {} + + // Returns true if and only if the test name of test_info matches name_. + bool operator()(const TestInfo * test_info) const { + return test_info && test_info->name() == name_; + } + + private: + std::string name_; +}; + +} // namespace + +namespace internal { + +// This method expands all parameterized tests registered with macros TEST_P +// and INSTANTIATE_TEST_SUITE_P into regular tests and registers those. +// This will be done just once during the program runtime. +void UnitTestImpl::RegisterParameterizedTests() { + if (!parameterized_tests_registered_) { + parameterized_test_registry_.RegisterTests(); + type_parameterized_test_registry_.CheckForInstantiations(); + parameterized_tests_registered_ = true; + } +} + +} // namespace internal + +// Creates the test object, runs it, records its result, and then +// deletes it. +void TestInfo::Run() { + if (!should_run_) return; + + // Tells UnitTest where to store test result. + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_info(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + // Notifies the unit test event listeners that a test is about to start. + repeater->OnTestStart(*this); + + const TimeInMillis start = internal::GetTimeInMillis(); + + impl->os_stack_trace_getter()->UponLeavingGTest(); + + // Creates the test object. + Test* const test = internal::HandleExceptionsInMethodIfSupported( + factory_, &internal::TestFactoryBase::CreateTest, + "the test fixture's constructor"); + + // Runs the test if the constructor didn't generate a fatal failure or invoke + // GTEST_SKIP(). + // Note that the object will not be null + if (!Test::HasFatalFailure() && !Test::IsSkipped()) { + // This doesn't throw as all user code that can throw are wrapped into + // exception handling code. + test->Run(); + } + + if (test != nullptr) { + // Deletes the test object. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + test, &Test::DeleteSelf_, "the test fixture's destructor"); + } + + result_.set_start_timestamp(start); + result_.set_elapsed_time(internal::GetTimeInMillis() - start); + + // Notifies the unit test event listener that a test has just finished. + repeater->OnTestEnd(*this); + + // Tells UnitTest to stop associating assertion results to this + // test. + impl->set_current_test_info(nullptr); +} + +// class TestSuite + +// Gets the number of successful tests in this test suite. +int TestSuite::successful_test_count() const { + return CountIf(test_info_list_, TestPassed); +} + +// Gets the number of successful tests in this test suite. +int TestSuite::skipped_test_count() const { + return CountIf(test_info_list_, TestSkipped); +} + +// Gets the number of failed tests in this test suite. +int TestSuite::failed_test_count() const { + return CountIf(test_info_list_, TestFailed); +} + +// Gets the number of disabled tests that will be reported in the XML report. +int TestSuite::reportable_disabled_test_count() const { + return CountIf(test_info_list_, TestReportableDisabled); +} + +// Gets the number of disabled tests in this test suite. +int TestSuite::disabled_test_count() const { + return CountIf(test_info_list_, TestDisabled); +} + +// Gets the number of tests to be printed in the XML report. +int TestSuite::reportable_test_count() const { + return CountIf(test_info_list_, TestReportable); +} + +// Get the number of tests in this test suite that should run. +int TestSuite::test_to_run_count() const { + return CountIf(test_info_list_, ShouldRunTest); +} + +// Gets the number of all tests. +int TestSuite::total_test_count() const { + return static_cast(test_info_list_.size()); +} + +// Creates a TestSuite with the given name. +// +// Arguments: +// +// name: name of the test suite +// a_type_param: the name of the test suite's type parameter, or NULL if +// this is not a typed or a type-parameterized test suite. +// set_up_tc: pointer to the function that sets up the test suite +// tear_down_tc: pointer to the function that tears down the test suite +TestSuite::TestSuite(const char* a_name, const char* a_type_param, + internal::SetUpTestSuiteFunc set_up_tc, + internal::TearDownTestSuiteFunc tear_down_tc) + : name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : nullptr), + set_up_tc_(set_up_tc), + tear_down_tc_(tear_down_tc), + should_run_(false), + start_timestamp_(0), + elapsed_time_(0) {} + +// Destructor of TestSuite. +TestSuite::~TestSuite() { + // Deletes every Test in the collection. + ForEach(test_info_list_, internal::Delete); +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +const TestInfo* TestSuite::GetTestInfo(int i) const { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? nullptr : test_info_list_[static_cast(index)]; +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +TestInfo* TestSuite::GetMutableTestInfo(int i) { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? nullptr : test_info_list_[static_cast(index)]; +} + +// Adds a test to this test suite. Will delete the test upon +// destruction of the TestSuite object. +void TestSuite::AddTestInfo(TestInfo* test_info) { + test_info_list_.push_back(test_info); + test_indices_.push_back(static_cast(test_indices_.size())); +} + +// Runs every test in this TestSuite. +void TestSuite::Run() { + if (!should_run_) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_suite(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + // Call both legacy and the new API + repeater->OnTestSuiteStart(*this); +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI + repeater->OnTestCaseStart(*this); +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI + + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestSuite::RunSetUpTestSuite, "SetUpTestSuite()"); + + start_timestamp_ = internal::GetTimeInMillis(); + for (int i = 0; i < total_test_count(); i++) { + GetMutableTestInfo(i)->Run(); + } + elapsed_time_ = internal::GetTimeInMillis() - start_timestamp_; + + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestSuite::RunTearDownTestSuite, "TearDownTestSuite()"); + + // Call both legacy and the new API + repeater->OnTestSuiteEnd(*this); +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI + repeater->OnTestCaseEnd(*this); +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI + + impl->set_current_test_suite(nullptr); +} + +// Clears the results of all tests in this test suite. +void TestSuite::ClearResult() { + ad_hoc_test_result_.Clear(); + ForEach(test_info_list_, TestInfo::ClearTestResult); +} + +// Shuffles the tests in this test suite. +void TestSuite::ShuffleTests(internal::Random* random) { + Shuffle(random, &test_indices_); +} + +// Restores the test order to before the first shuffle. +void TestSuite::UnshuffleTests() { + for (size_t i = 0; i < test_indices_.size(); i++) { + test_indices_[i] = static_cast(i); + } +} + +// Formats a countable noun. Depending on its quantity, either the +// singular form or the plural form is used. e.g. +// +// FormatCountableNoun(1, "formula", "formuli") returns "1 formula". +// FormatCountableNoun(5, "book", "books") returns "5 books". +static std::string FormatCountableNoun(int count, + const char * singular_form, + const char * plural_form) { + return internal::StreamableToString(count) + " " + + (count == 1 ? singular_form : plural_form); +} + +// Formats the count of tests. +static std::string FormatTestCount(int test_count) { + return FormatCountableNoun(test_count, "test", "tests"); +} + +// Formats the count of test suites. +static std::string FormatTestSuiteCount(int test_suite_count) { + return FormatCountableNoun(test_suite_count, "test suite", "test suites"); +} + +// Converts a TestPartResult::Type enum to human-friendly string +// representation. Both kNonFatalFailure and kFatalFailure are translated +// to "Failure", as the user usually doesn't care about the difference +// between the two when viewing the test result. +static const char * TestPartResultTypeToString(TestPartResult::Type type) { + switch (type) { + case TestPartResult::kSkip: + return "Skipped"; + case TestPartResult::kSuccess: + return "Success"; + + case TestPartResult::kNonFatalFailure: + case TestPartResult::kFatalFailure: +#ifdef _MSC_VER + return "error: "; +#else + return "Failure\n"; +#endif + default: + return "Unknown result type"; + } +} + +namespace internal { + +// Prints a TestPartResult to an std::string. +static std::string PrintTestPartResultToString( + const TestPartResult& test_part_result) { + return (Message() + << internal::FormatFileLocation(test_part_result.file_name(), + test_part_result.line_number()) + << " " << TestPartResultTypeToString(test_part_result.type()) + << test_part_result.message()).GetString(); +} + +// Prints a TestPartResult. +static void PrintTestPartResult(const TestPartResult& test_part_result) { + const std::string& result = + PrintTestPartResultToString(test_part_result); + printf("%s\n", result.c_str()); + fflush(stdout); + // If the test program runs in Visual Studio or a debugger, the + // following statements add the test part result message to the Output + // window such that the user can double-click on it to jump to the + // corresponding source code location; otherwise they do nothing. +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + // We don't call OutputDebugString*() on Windows Mobile, as printing + // to stdout is done by OutputDebugString() there already - we don't + // want the same message printed twice. + ::OutputDebugStringA(result.c_str()); + ::OutputDebugStringA("\n"); +#endif +} + +// class PrettyUnitTestResultPrinter +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \ + !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT && !GTEST_OS_WINDOWS_MINGW + +// Returns the character attribute for the given color. +static WORD GetColorAttribute(GTestColor color) { + switch (color) { + case COLOR_RED: return FOREGROUND_RED; + case COLOR_GREEN: return FOREGROUND_GREEN; + case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN; + default: return 0; + } +} + +static int GetBitOffset(WORD color_mask) { + if (color_mask == 0) return 0; + + int bitOffset = 0; + while ((color_mask & 1) == 0) { + color_mask >>= 1; + ++bitOffset; + } + return bitOffset; +} + +static WORD GetNewColor(GTestColor color, WORD old_color_attrs) { + // Let's reuse the BG + static const WORD background_mask = BACKGROUND_BLUE | BACKGROUND_GREEN | + BACKGROUND_RED | BACKGROUND_INTENSITY; + static const WORD foreground_mask = FOREGROUND_BLUE | FOREGROUND_GREEN | + FOREGROUND_RED | FOREGROUND_INTENSITY; + const WORD existing_bg = old_color_attrs & background_mask; + + WORD new_color = + GetColorAttribute(color) | existing_bg | FOREGROUND_INTENSITY; + static const int bg_bitOffset = GetBitOffset(background_mask); + static const int fg_bitOffset = GetBitOffset(foreground_mask); + + if (((new_color & background_mask) >> bg_bitOffset) == + ((new_color & foreground_mask) >> fg_bitOffset)) { + new_color ^= FOREGROUND_INTENSITY; // invert intensity + } + return new_color; +} + +#else + +// Returns the ANSI color code for the given color. COLOR_DEFAULT is +// an invalid input. +static const char* GetAnsiColorCode(GTestColor color) { + switch (color) { + case COLOR_RED: return "1"; + case COLOR_GREEN: return "2"; + case COLOR_YELLOW: return "3"; + default: + return nullptr; + } +} + +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns true if and only if Google Test should use colors in the output. +bool ShouldUseColor(bool stdout_is_tty) { + const char* const gtest_color = GTEST_FLAG(color).c_str(); + + if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) { +#if GTEST_OS_WINDOWS /*&& !GTEST_OS_WINDOWS_MINGW*/ + // On Windows the TERM variable is usually not set, but the + // console there does support colors. + return stdout_is_tty; +#else + // On non-Windows platforms, we rely on the TERM variable. + const char* const term = posix::GetEnv("TERM"); + const bool term_supports_color = + String::CStringEquals(term, "xterm") || + String::CStringEquals(term, "xterm-color") || + String::CStringEquals(term, "xterm-256color") || + String::CStringEquals(term, "screen") || + String::CStringEquals(term, "screen-256color") || + String::CStringEquals(term, "tmux") || + String::CStringEquals(term, "tmux-256color") || + String::CStringEquals(term, "rxvt-unicode") || + String::CStringEquals(term, "rxvt-unicode-256color") || + String::CStringEquals(term, "linux") || + String::CStringEquals(term, "cygwin"); + return stdout_is_tty && term_supports_color; +#endif // GTEST_OS_WINDOWS + } + + return String::CaseInsensitiveCStringEquals(gtest_color, "yes") || + String::CaseInsensitiveCStringEquals(gtest_color, "true") || + String::CaseInsensitiveCStringEquals(gtest_color, "t") || + String::CStringEquals(gtest_color, "1"); + // We take "yes", "true", "t", and "1" as meaning "yes". If the + // value is neither one of these nor "auto", we treat it as "no" to + // be conservative. +} + +// Helpers for printing colored strings to stdout. Note that on Windows, we +// cannot simply emit special characters and have the terminal change colors. +// This routine must actually emit the characters rather than return a string +// that would be colored when printed, as can be done on Linux. +void ColoredPrintf(GTestColor color, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_ZOS || GTEST_OS_IOS || \ + GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT || defined(ESP_PLATFORM) + const bool use_color = AlwaysFalse(); +#else + static const bool in_color_mode = + ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0); + const bool use_color = in_color_mode && (color != COLOR_DEFAULT); +#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_ZOS + + if (!use_color) { + vprintf(fmt, args); + va_end(args); + return; + } + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \ + !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT && !GTEST_OS_WINDOWS_MINGW + const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + + // Gets the current text color. + CONSOLE_SCREEN_BUFFER_INFO buffer_info; + GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); + const WORD old_color_attrs = buffer_info.wAttributes; + const WORD new_color = GetNewColor(color, old_color_attrs); + + // We need to flush the stream buffers into the console before each + // SetConsoleTextAttribute call lest it affect the text that is already + // printed but has not yet reached the console. + fflush(stdout); + SetConsoleTextAttribute(stdout_handle, new_color); + + vprintf(fmt, args); + + fflush(stdout); + // Restores the text color. + SetConsoleTextAttribute(stdout_handle, old_color_attrs); +#else + printf("\033[0;3%sm", GetAnsiColorCode(color)); + vprintf(fmt, args); + printf("\033[m"); // Resets the terminal to default. +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + va_end(args); +} + +// Text printed in Google Test's text output and --gtest_list_tests +// output to label the type parameter and value parameter for a test. +static const char kTypeParamLabel[] = "TypeParam"; +static const char kValueParamLabel[] = "GetParam()"; + +static void PrintFullTestCommentIfPresent(const TestInfo& test_info) { + const char* const type_param = test_info.type_param(); + const char* const value_param = test_info.value_param(); + + if (type_param != nullptr || value_param != nullptr) { + printf(", where "); + if (type_param != nullptr) { + printf("%s = %s", kTypeParamLabel, type_param); + if (value_param != nullptr) printf(" and "); + } + if (value_param != nullptr) { + printf("%s = %s", kValueParamLabel, value_param); + } + } +} + +// This class implements the TestEventListener interface. +// +// Class PrettyUnitTestResultPrinter is copyable. +class PrettyUnitTestResultPrinter : public TestEventListener { + public: + PrettyUnitTestResultPrinter() {} + static void PrintTestName(const char* test_suite, const char* test) { + printf("%s.%s", test_suite, test); + } + + // The following methods override what's in the TestEventListener class. + void OnTestProgramStart(const UnitTest& /*unit_test*/) override {} + void OnTestIterationStart(const UnitTest& unit_test, int iteration) override; + void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override; + void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {} +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + void OnTestCaseStart(const TestCase& test_case) override; +#else + void OnTestSuiteStart(const TestSuite& test_suite) override; +#endif // OnTestCaseStart + + void OnTestStart(const TestInfo& test_info) override; + + void OnTestPartResult(const TestPartResult& result) override; + void OnTestEnd(const TestInfo& test_info) override; +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + void OnTestCaseEnd(const TestCase& test_case) override; +#else + void OnTestSuiteEnd(const TestSuite& test_suite) override; +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + + void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override; + void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) override {} + void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override; + void OnTestProgramEnd(const UnitTest& /*unit_test*/) override {} + + private: + static void PrintFailedTests(const UnitTest& unit_test); + static void PrintFailedTestSuites(const UnitTest& unit_test); + static void PrintSkippedTests(const UnitTest& unit_test); +}; + + // Fired before each iteration of tests starts. +void PrettyUnitTestResultPrinter::OnTestIterationStart( + const UnitTest& unit_test, int iteration) { + if (GTEST_FLAG(repeat) != 1) + printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1); + + const char* const filter = GTEST_FLAG(filter).c_str(); + + // Prints the filter if it's not *. This reminds the user that some + // tests may be skipped. + if (!String::CStringEquals(filter, kUniversalFilter)) { + ColoredPrintf(COLOR_YELLOW, + "Note: %s filter = %s\n", GTEST_NAME_, filter); + } + + if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) { + const int32_t shard_index = Int32FromEnvOrDie(kTestShardIndex, -1); + ColoredPrintf(COLOR_YELLOW, + "Note: This is test shard %d of %s.\n", + static_cast(shard_index) + 1, + internal::posix::GetEnv(kTestTotalShards)); + } + + if (GTEST_FLAG(shuffle)) { + ColoredPrintf(COLOR_YELLOW, + "Note: Randomizing tests' orders with a seed of %d .\n", + unit_test.random_seed()); + } + + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("Running %s from %s.\n", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestSuiteCount(unit_test.test_suite_to_run_count()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment set-up.\n"); + fflush(stdout); +} + +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) { + const std::string counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s", counts.c_str(), test_case.name()); + if (test_case.type_param() == nullptr) { + printf("\n"); + } else { + printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param()); + } + fflush(stdout); +} +#else +void PrettyUnitTestResultPrinter::OnTestSuiteStart( + const TestSuite& test_suite) { + const std::string counts = + FormatCountableNoun(test_suite.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s", counts.c_str(), test_suite.name()); + if (test_suite.type_param() == nullptr) { + printf("\n"); + } else { + printf(", where %s = %s\n", kTypeParamLabel, test_suite.type_param()); + } + fflush(stdout); +} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) { + ColoredPrintf(COLOR_GREEN, "[ RUN ] "); + PrintTestName(test_info.test_suite_name(), test_info.name()); + printf("\n"); + fflush(stdout); +} + +// Called after an assertion failure. +void PrettyUnitTestResultPrinter::OnTestPartResult( + const TestPartResult& result) { + switch (result.type()) { + // If the test part succeeded, we don't need to do anything. + case TestPartResult::kSuccess: + return; + default: + // Print failure message from the assertion + // (e.g. expected this and got that). + PrintTestPartResult(result); + fflush(stdout); + } +} + +void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { + if (test_info.result()->Passed()) { + ColoredPrintf(COLOR_GREEN, "[ OK ] "); + } else if (test_info.result()->Skipped()) { + ColoredPrintf(COLOR_GREEN, "[ SKIPPED ] "); + } else { + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + } + PrintTestName(test_info.test_suite_name(), test_info.name()); + if (test_info.result()->Failed()) + PrintFullTestCommentIfPresent(test_info); + + if (GTEST_FLAG(print_time)) { + printf(" (%s ms)\n", internal::StreamableToString( + test_info.result()->elapsed_time()).c_str()); + } else { + printf("\n"); + } + fflush(stdout); +} + +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) { + if (!GTEST_FLAG(print_time)) return; + + const std::string counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s (%s ms total)\n\n", counts.c_str(), test_case.name(), + internal::StreamableToString(test_case.elapsed_time()).c_str()); + fflush(stdout); +} +#else +void PrettyUnitTestResultPrinter::OnTestSuiteEnd(const TestSuite& test_suite) { + if (!GTEST_FLAG(print_time)) return; + + const std::string counts = + FormatCountableNoun(test_suite.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s (%s ms total)\n\n", counts.c_str(), test_suite.name(), + internal::StreamableToString(test_suite.elapsed_time()).c_str()); + fflush(stdout); +} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment tear-down\n"); + fflush(stdout); +} + +// Internal helper for printing the list of failed tests. +void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) { + const int failed_test_count = unit_test.failed_test_count(); + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str()); + + for (int i = 0; i < unit_test.total_test_suite_count(); ++i) { + const TestSuite& test_suite = *unit_test.GetTestSuite(i); + if (!test_suite.should_run() || (test_suite.failed_test_count() == 0)) { + continue; + } + for (int j = 0; j < test_suite.total_test_count(); ++j) { + const TestInfo& test_info = *test_suite.GetTestInfo(j); + if (!test_info.should_run() || !test_info.result()->Failed()) { + continue; + } + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s.%s", test_suite.name(), test_info.name()); + PrintFullTestCommentIfPresent(test_info); + printf("\n"); + } + } + printf("\n%2d FAILED %s\n", failed_test_count, + failed_test_count == 1 ? "TEST" : "TESTS"); +} + +// Internal helper for printing the list of test suite failures not covered by +// PrintFailedTests. +void PrettyUnitTestResultPrinter::PrintFailedTestSuites( + const UnitTest& unit_test) { + int suite_failure_count = 0; + for (int i = 0; i < unit_test.total_test_suite_count(); ++i) { + const TestSuite& test_suite = *unit_test.GetTestSuite(i); + if (!test_suite.should_run()) { + continue; + } + if (test_suite.ad_hoc_test_result().Failed()) { + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s: SetUpTestSuite or TearDownTestSuite\n", test_suite.name()); + ++suite_failure_count; + } + } + if (suite_failure_count > 0) { + printf("\n%2d FAILED TEST %s\n", suite_failure_count, + suite_failure_count == 1 ? "SUITE" : "SUITES"); + } +} + +// Internal helper for printing the list of skipped tests. +void PrettyUnitTestResultPrinter::PrintSkippedTests(const UnitTest& unit_test) { + const int skipped_test_count = unit_test.skipped_test_count(); + if (skipped_test_count == 0) { + return; + } + + for (int i = 0; i < unit_test.total_test_suite_count(); ++i) { + const TestSuite& test_suite = *unit_test.GetTestSuite(i); + if (!test_suite.should_run() || (test_suite.skipped_test_count() == 0)) { + continue; + } + for (int j = 0; j < test_suite.total_test_count(); ++j) { + const TestInfo& test_info = *test_suite.GetTestInfo(j); + if (!test_info.should_run() || !test_info.result()->Skipped()) { + continue; + } + ColoredPrintf(COLOR_GREEN, "[ SKIPPED ] "); + printf("%s.%s", test_suite.name(), test_info.name()); + printf("\n"); + } + } +} + +void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("%s from %s ran.", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestSuiteCount(unit_test.test_suite_to_run_count()).c_str()); + if (GTEST_FLAG(print_time)) { + printf(" (%s ms total)", + internal::StreamableToString(unit_test.elapsed_time()).c_str()); + } + printf("\n"); + ColoredPrintf(COLOR_GREEN, "[ PASSED ] "); + printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str()); + + const int skipped_test_count = unit_test.skipped_test_count(); + if (skipped_test_count > 0) { + ColoredPrintf(COLOR_GREEN, "[ SKIPPED ] "); + printf("%s, listed below:\n", FormatTestCount(skipped_test_count).c_str()); + PrintSkippedTests(unit_test); + } + + if (!unit_test.Passed()) { + PrintFailedTests(unit_test); + PrintFailedTestSuites(unit_test); + } + + int num_disabled = unit_test.reportable_disabled_test_count(); + if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) { + if (unit_test.Passed()) { + printf("\n"); // Add a spacer if no FAILURE banner is displayed. + } + ColoredPrintf(COLOR_YELLOW, + " YOU HAVE %d DISABLED %s\n\n", + num_disabled, + num_disabled == 1 ? "TEST" : "TESTS"); + } + // Ensure that Google Test output is printed before, e.g., heapchecker output. + fflush(stdout); +} + +// End PrettyUnitTestResultPrinter + +// class TestEventRepeater +// +// This class forwards events to other event listeners. +class TestEventRepeater : public TestEventListener { + public: + TestEventRepeater() : forwarding_enabled_(true) {} + ~TestEventRepeater() override; + void Append(TestEventListener *listener); + TestEventListener* Release(TestEventListener* listener); + + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled() const { return forwarding_enabled_; } + void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; } + + void OnTestProgramStart(const UnitTest& unit_test) override; + void OnTestIterationStart(const UnitTest& unit_test, int iteration) override; + void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override; + void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) override; +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + void OnTestCaseStart(const TestSuite& parameter) override; +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + void OnTestSuiteStart(const TestSuite& parameter) override; + void OnTestStart(const TestInfo& test_info) override; + void OnTestPartResult(const TestPartResult& result) override; + void OnTestEnd(const TestInfo& test_info) override; +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + void OnTestCaseEnd(const TestCase& parameter) override; +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + void OnTestSuiteEnd(const TestSuite& parameter) override; + void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override; + void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) override; + void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override; + void OnTestProgramEnd(const UnitTest& unit_test) override; + + private: + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled_; + // The list of listeners that receive events. + std::vector listeners_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater); +}; + +TestEventRepeater::~TestEventRepeater() { + ForEach(listeners_, Delete); +} + +void TestEventRepeater::Append(TestEventListener *listener) { + listeners_.push_back(listener); +} + +TestEventListener* TestEventRepeater::Release(TestEventListener *listener) { + for (size_t i = 0; i < listeners_.size(); ++i) { + if (listeners_[i] == listener) { + listeners_.erase(listeners_.begin() + static_cast(i)); + return listener; + } + } + + return nullptr; +} + +// Since most methods are very similar, use macros to reduce boilerplate. +// This defines a member that forwards the call to all listeners. +#define GTEST_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (size_t i = 0; i < listeners_.size(); i++) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} +// This defines a member that forwards the call to all listeners in reverse +// order. +#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \ + void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (size_t i = listeners_.size(); i != 0; i--) { \ + listeners_[i - 1]->Name(parameter); \ + } \ + } \ + } + +GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest) +GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest) +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +GTEST_REPEATER_METHOD_(OnTestCaseStart, TestSuite) +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +GTEST_REPEATER_METHOD_(OnTestSuiteStart, TestSuite) +GTEST_REPEATER_METHOD_(OnTestStart, TestInfo) +GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult) +GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo) +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestSuite) +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +GTEST_REVERSE_REPEATER_METHOD_(OnTestSuiteEnd, TestSuite) +GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest) + +#undef GTEST_REPEATER_METHOD_ +#undef GTEST_REVERSE_REPEATER_METHOD_ + +void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (size_t i = 0; i < listeners_.size(); i++) { + listeners_[i]->OnTestIterationStart(unit_test, iteration); + } + } +} + +void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (size_t i = listeners_.size(); i > 0; i--) { + listeners_[i - 1]->OnTestIterationEnd(unit_test, iteration); + } + } +} + +// End TestEventRepeater + +// This class generates an XML output file. +class XmlUnitTestResultPrinter : public EmptyTestEventListener { + public: + explicit XmlUnitTestResultPrinter(const char* output_file); + + void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override; + void ListTestsMatchingFilter(const std::vector& test_suites); + + // Prints an XML summary of all unit tests. + static void PrintXmlTestsList(std::ostream* stream, + const std::vector& test_suites); + + private: + // Is c a whitespace character that is normalized to a space character + // when it appears in an XML attribute value? + static bool IsNormalizableWhitespace(char c) { + return c == 0x9 || c == 0xA || c == 0xD; + } + + // May c appear in a well-formed XML document? + static bool IsValidXmlCharacter(char c) { + return IsNormalizableWhitespace(c) || c >= 0x20; + } + + // Returns an XML-escaped copy of the input string str. If + // is_attribute is true, the text is meant to appear as an attribute + // value, and normalizable whitespace is preserved by replacing it + // with character references. + static std::string EscapeXml(const std::string& str, bool is_attribute); + + // Returns the given string with all characters invalid in XML removed. + static std::string RemoveInvalidXmlCharacters(const std::string& str); + + // Convenience wrapper around EscapeXml when str is an attribute value. + static std::string EscapeXmlAttribute(const std::string& str) { + return EscapeXml(str, true); + } + + // Convenience wrapper around EscapeXml when str is not an attribute value. + static std::string EscapeXmlText(const char* str) { + return EscapeXml(str, false); + } + + // Verifies that the given attribute belongs to the given element and + // streams the attribute as XML. + static void OutputXmlAttribute(std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value); + + // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. + static void OutputXmlCDataSection(::std::ostream* stream, const char* data); + + // Streams an XML representation of a TestInfo object. + static void OutputXmlTestInfo(::std::ostream* stream, + const char* test_suite_name, + const TestInfo& test_info); + + // Prints an XML representation of a TestSuite object + static void PrintXmlTestSuite(::std::ostream* stream, + const TestSuite& test_suite); + + // Prints an XML summary of unit_test to output stream out. + static void PrintXmlUnitTest(::std::ostream* stream, + const UnitTest& unit_test); + + // Produces a string representing the test properties in a result as space + // delimited XML attributes based on the property key="value" pairs. + // When the std::string is not empty, it includes a space at the beginning, + // to delimit this attribute from prior attributes. + static std::string TestPropertiesAsXmlAttributes(const TestResult& result); + + // Streams an XML representation of the test properties of a TestResult + // object. + static void OutputXmlTestProperties(std::ostream* stream, + const TestResult& result); + + // The output file. + const std::string output_file_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter); +}; + +// Creates a new XmlUnitTestResultPrinter. +XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file) + : output_file_(output_file) { + if (output_file_.empty()) { + GTEST_LOG_(FATAL) << "XML output file may not be null"; + } +} + +// Called after the unit test ends. +void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + FILE* xmlout = OpenFileForWriting(output_file_); + std::stringstream stream; + PrintXmlUnitTest(&stream, unit_test); + fprintf(xmlout, "%s", StringStreamToString(&stream).c_str()); + fclose(xmlout); +} + +void XmlUnitTestResultPrinter::ListTestsMatchingFilter( + const std::vector& test_suites) { + FILE* xmlout = OpenFileForWriting(output_file_); + std::stringstream stream; + PrintXmlTestsList(&stream, test_suites); + fprintf(xmlout, "%s", StringStreamToString(&stream).c_str()); + fclose(xmlout); +} + +// Returns an XML-escaped copy of the input string str. If is_attribute +// is true, the text is meant to appear as an attribute value, and +// normalizable whitespace is preserved by replacing it with character +// references. +// +// Invalid XML characters in str, if any, are stripped from the output. +// It is expected that most, if not all, of the text processed by this +// module will consist of ordinary English text. +// If this module is ever modified to produce version 1.1 XML output, +// most invalid characters can be retained using character references. +std::string XmlUnitTestResultPrinter::EscapeXml( + const std::string& str, bool is_attribute) { + Message m; + + for (size_t i = 0; i < str.size(); ++i) { + const char ch = str[i]; + switch (ch) { + case '<': + m << "<"; + break; + case '>': + m << ">"; + break; + case '&': + m << "&"; + break; + case '\'': + if (is_attribute) + m << "'"; + else + m << '\''; + break; + case '"': + if (is_attribute) + m << """; + else + m << '"'; + break; + default: + if (IsValidXmlCharacter(ch)) { + if (is_attribute && IsNormalizableWhitespace(ch)) + m << "&#x" << String::FormatByte(static_cast(ch)) + << ";"; + else + m << ch; + } + break; + } + } + + return m.GetString(); +} + +// Returns the given string with all characters invalid in XML removed. +// Currently invalid characters are dropped from the string. An +// alternative is to replace them with certain characters such as . or ?. +std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters( + const std::string& str) { + std::string output; + output.reserve(str.size()); + for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) + if (IsValidXmlCharacter(*it)) + output.push_back(*it); + + return output; +} + +// The following routines generate an XML representation of a UnitTest +// object. +// GOOGLETEST_CM0009 DO NOT DELETE +// +// This is how Google Test concepts map to the DTD: +// +// <-- corresponds to a UnitTest object +// <-- corresponds to a TestSuite object +// <-- corresponds to a TestInfo object +// ... +// ... +// ... +// <-- individual assertion failures +// +// +// + +// Formats the given time in milliseconds as seconds. +std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) { + ::std::stringstream ss; + ss << (static_cast(ms) * 1e-3); + return ss.str(); +} + +static bool PortableLocaltime(time_t seconds, struct tm* out) { +#if defined(_MSC_VER) + return localtime_s(out, &seconds) == 0; +#elif defined(__MINGW32__) || defined(__MINGW64__) + // MINGW provides neither localtime_r nor localtime_s, but uses + // Windows' localtime(), which has a thread-local tm buffer. + struct tm* tm_ptr = localtime(&seconds); // NOLINT + if (tm_ptr == nullptr) return false; + *out = *tm_ptr; + return true; +#else + return localtime_r(&seconds, out) != nullptr; +#endif +} + +// Converts the given epoch time in milliseconds to a date string in the ISO +// 8601 format, without the timezone information. +std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) { + struct tm time_struct; + if (!PortableLocaltime(static_cast(ms / 1000), &time_struct)) + return ""; + // YYYY-MM-DDThh:mm:ss + return StreamableToString(time_struct.tm_year + 1900) + "-" + + String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" + + String::FormatIntWidth2(time_struct.tm_mday) + "T" + + String::FormatIntWidth2(time_struct.tm_hour) + ":" + + String::FormatIntWidth2(time_struct.tm_min) + ":" + + String::FormatIntWidth2(time_struct.tm_sec); +} + +// Streams an XML CDATA section, escaping invalid CDATA sequences as needed. +void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, + const char* data) { + const char* segment = data; + *stream << ""); + if (next_segment != nullptr) { + stream->write( + segment, static_cast(next_segment - segment)); + *stream << "]]>]]>"); + } else { + *stream << segment; + break; + } + } + *stream << "]]>"; +} + +void XmlUnitTestResultPrinter::OutputXmlAttribute( + std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value) { + const std::vector& allowed_names = + GetReservedOutputAttributesForElement(element_name); + + GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != + allowed_names.end()) + << "Attribute " << name << " is not allowed for element <" << element_name + << ">."; + + *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\""; +} + +// Prints an XML representation of a TestInfo object. +void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, + const char* test_suite_name, + const TestInfo& test_info) { + const TestResult& result = *test_info.result(); + const std::string kTestsuite = "testcase"; + + if (test_info.is_in_another_shard()) { + return; + } + + *stream << " \n"; + return; + } + + OutputXmlAttribute(stream, kTestsuite, "status", + test_info.should_run() ? "run" : "notrun"); + OutputXmlAttribute(stream, kTestsuite, "result", + test_info.should_run() + ? (result.Skipped() ? "skipped" : "completed") + : "suppressed"); + OutputXmlAttribute(stream, kTestsuite, "time", + FormatTimeInMillisAsSeconds(result.elapsed_time())); + OutputXmlAttribute( + stream, kTestsuite, "timestamp", + FormatEpochTimeInMillisAsIso8601(result.start_timestamp())); + OutputXmlAttribute(stream, kTestsuite, "classname", test_suite_name); + + int failures = 0; + for (int i = 0; i < result.total_part_count(); ++i) { + const TestPartResult& part = result.GetTestPartResult(i); + if (part.failed()) { + if (++failures == 1) { + *stream << ">\n"; + } + const std::string location = + internal::FormatCompilerIndependentFileLocation(part.file_name(), + part.line_number()); + const std::string summary = location + "\n" + part.summary(); + *stream << " "; + const std::string detail = location + "\n" + part.message(); + OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str()); + *stream << "\n"; + } + } + + if (failures == 0 && result.test_property_count() == 0) { + *stream << " />\n"; + } else { + if (failures == 0) { + *stream << ">\n"; + } + OutputXmlTestProperties(stream, result); + *stream << " \n"; + } +} + +// Prints an XML representation of a TestSuite object +void XmlUnitTestResultPrinter::PrintXmlTestSuite(std::ostream* stream, + const TestSuite& test_suite) { + const std::string kTestsuite = "testsuite"; + *stream << " <" << kTestsuite; + OutputXmlAttribute(stream, kTestsuite, "name", test_suite.name()); + OutputXmlAttribute(stream, kTestsuite, "tests", + StreamableToString(test_suite.reportable_test_count())); + if (!GTEST_FLAG(list_tests)) { + OutputXmlAttribute(stream, kTestsuite, "failures", + StreamableToString(test_suite.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuite, "disabled", + StreamableToString(test_suite.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuite, "errors", "0"); + OutputXmlAttribute(stream, kTestsuite, "time", + FormatTimeInMillisAsSeconds(test_suite.elapsed_time())); + OutputXmlAttribute( + stream, kTestsuite, "timestamp", + FormatEpochTimeInMillisAsIso8601(test_suite.start_timestamp())); + *stream << TestPropertiesAsXmlAttributes(test_suite.ad_hoc_test_result()); + } + *stream << ">\n"; + for (int i = 0; i < test_suite.total_test_count(); ++i) { + if (test_suite.GetTestInfo(i)->is_reportable()) + OutputXmlTestInfo(stream, test_suite.name(), *test_suite.GetTestInfo(i)); + } + *stream << " \n"; +} + +// Prints an XML summary of unit_test to output stream out. +void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream, + const UnitTest& unit_test) { + const std::string kTestsuites = "testsuites"; + + *stream << "\n"; + *stream << "<" << kTestsuites; + + OutputXmlAttribute(stream, kTestsuites, "tests", + StreamableToString(unit_test.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuites, "failures", + StreamableToString(unit_test.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuites, "disabled", + StreamableToString(unit_test.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuites, "errors", "0"); + OutputXmlAttribute(stream, kTestsuites, "time", + FormatTimeInMillisAsSeconds(unit_test.elapsed_time())); + OutputXmlAttribute( + stream, kTestsuites, "timestamp", + FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp())); + + if (GTEST_FLAG(shuffle)) { + OutputXmlAttribute(stream, kTestsuites, "random_seed", + StreamableToString(unit_test.random_seed())); + } + *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result()); + + OutputXmlAttribute(stream, kTestsuites, "name", "AllTests"); + *stream << ">\n"; + + for (int i = 0; i < unit_test.total_test_suite_count(); ++i) { + if (unit_test.GetTestSuite(i)->reportable_test_count() > 0) + PrintXmlTestSuite(stream, *unit_test.GetTestSuite(i)); + } + *stream << "\n"; +} + +void XmlUnitTestResultPrinter::PrintXmlTestsList( + std::ostream* stream, const std::vector& test_suites) { + const std::string kTestsuites = "testsuites"; + + *stream << "\n"; + *stream << "<" << kTestsuites; + + int total_tests = 0; + for (auto test_suite : test_suites) { + total_tests += test_suite->total_test_count(); + } + OutputXmlAttribute(stream, kTestsuites, "tests", + StreamableToString(total_tests)); + OutputXmlAttribute(stream, kTestsuites, "name", "AllTests"); + *stream << ">\n"; + + for (auto test_suite : test_suites) { + PrintXmlTestSuite(stream, *test_suite); + } + *stream << "\n"; +} + +// Produces a string representing the test properties in a result as space +// delimited XML attributes based on the property key="value" pairs. +std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( + const TestResult& result) { + Message attributes; + for (int i = 0; i < result.test_property_count(); ++i) { + const TestProperty& property = result.GetTestProperty(i); + attributes << " " << property.key() << "=" + << "\"" << EscapeXmlAttribute(property.value()) << "\""; + } + return attributes.GetString(); +} + +void XmlUnitTestResultPrinter::OutputXmlTestProperties( + std::ostream* stream, const TestResult& result) { + const std::string kProperties = "properties"; + const std::string kProperty = "property"; + + if (result.test_property_count() <= 0) { + return; + } + + *stream << "<" << kProperties << ">\n"; + for (int i = 0; i < result.test_property_count(); ++i) { + const TestProperty& property = result.GetTestProperty(i); + *stream << "<" << kProperty; + *stream << " name=\"" << EscapeXmlAttribute(property.key()) << "\""; + *stream << " value=\"" << EscapeXmlAttribute(property.value()) << "\""; + *stream << "/>\n"; + } + *stream << "\n"; +} + +// End XmlUnitTestResultPrinter + +// This class generates an JSON output file. +class JsonUnitTestResultPrinter : public EmptyTestEventListener { + public: + explicit JsonUnitTestResultPrinter(const char* output_file); + + void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override; + + // Prints an JSON summary of all unit tests. + static void PrintJsonTestList(::std::ostream* stream, + const std::vector& test_suites); + + private: + // Returns an JSON-escaped copy of the input string str. + static std::string EscapeJson(const std::string& str); + + //// Verifies that the given attribute belongs to the given element and + //// streams the attribute as JSON. + static void OutputJsonKey(std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value, + const std::string& indent, + bool comma = true); + static void OutputJsonKey(std::ostream* stream, + const std::string& element_name, + const std::string& name, + int value, + const std::string& indent, + bool comma = true); + + // Streams a JSON representation of a TestInfo object. + static void OutputJsonTestInfo(::std::ostream* stream, + const char* test_suite_name, + const TestInfo& test_info); + + // Prints a JSON representation of a TestSuite object + static void PrintJsonTestSuite(::std::ostream* stream, + const TestSuite& test_suite); + + // Prints a JSON summary of unit_test to output stream out. + static void PrintJsonUnitTest(::std::ostream* stream, + const UnitTest& unit_test); + + // Produces a string representing the test properties in a result as + // a JSON dictionary. + static std::string TestPropertiesAsJson(const TestResult& result, + const std::string& indent); + + // The output file. + const std::string output_file_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(JsonUnitTestResultPrinter); +}; + +// Creates a new JsonUnitTestResultPrinter. +JsonUnitTestResultPrinter::JsonUnitTestResultPrinter(const char* output_file) + : output_file_(output_file) { + if (output_file_.empty()) { + GTEST_LOG_(FATAL) << "JSON output file may not be null"; + } +} + +void JsonUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + FILE* jsonout = OpenFileForWriting(output_file_); + std::stringstream stream; + PrintJsonUnitTest(&stream, unit_test); + fprintf(jsonout, "%s", StringStreamToString(&stream).c_str()); + fclose(jsonout); +} + +// Returns an JSON-escaped copy of the input string str. +std::string JsonUnitTestResultPrinter::EscapeJson(const std::string& str) { + Message m; + + for (size_t i = 0; i < str.size(); ++i) { + const char ch = str[i]; + switch (ch) { + case '\\': + case '"': + case '/': + m << '\\' << ch; + break; + case '\b': + m << "\\b"; + break; + case '\t': + m << "\\t"; + break; + case '\n': + m << "\\n"; + break; + case '\f': + m << "\\f"; + break; + case '\r': + m << "\\r"; + break; + default: + if (ch < ' ') { + m << "\\u00" << String::FormatByte(static_cast(ch)); + } else { + m << ch; + } + break; + } + } + + return m.GetString(); +} + +// The following routines generate an JSON representation of a UnitTest +// object. + +// Formats the given time in milliseconds as seconds. +static std::string FormatTimeInMillisAsDuration(TimeInMillis ms) { + ::std::stringstream ss; + ss << (static_cast(ms) * 1e-3) << "s"; + return ss.str(); +} + +// Converts the given epoch time in milliseconds to a date string in the +// RFC3339 format, without the timezone information. +static std::string FormatEpochTimeInMillisAsRFC3339(TimeInMillis ms) { + struct tm time_struct; + if (!PortableLocaltime(static_cast(ms / 1000), &time_struct)) + return ""; + // YYYY-MM-DDThh:mm:ss + return StreamableToString(time_struct.tm_year + 1900) + "-" + + String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" + + String::FormatIntWidth2(time_struct.tm_mday) + "T" + + String::FormatIntWidth2(time_struct.tm_hour) + ":" + + String::FormatIntWidth2(time_struct.tm_min) + ":" + + String::FormatIntWidth2(time_struct.tm_sec) + "Z"; +} + +static inline std::string Indent(size_t width) { + return std::string(width, ' '); +} + +void JsonUnitTestResultPrinter::OutputJsonKey( + std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value, + const std::string& indent, + bool comma) { + const std::vector& allowed_names = + GetReservedOutputAttributesForElement(element_name); + + GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != + allowed_names.end()) + << "Key \"" << name << "\" is not allowed for value \"" << element_name + << "\"."; + + *stream << indent << "\"" << name << "\": \"" << EscapeJson(value) << "\""; + if (comma) + *stream << ",\n"; +} + +void JsonUnitTestResultPrinter::OutputJsonKey( + std::ostream* stream, + const std::string& element_name, + const std::string& name, + int value, + const std::string& indent, + bool comma) { + const std::vector& allowed_names = + GetReservedOutputAttributesForElement(element_name); + + GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != + allowed_names.end()) + << "Key \"" << name << "\" is not allowed for value \"" << element_name + << "\"."; + + *stream << indent << "\"" << name << "\": " << StreamableToString(value); + if (comma) + *stream << ",\n"; +} + +// Prints a JSON representation of a TestInfo object. +void JsonUnitTestResultPrinter::OutputJsonTestInfo(::std::ostream* stream, + const char* test_suite_name, + const TestInfo& test_info) { + const TestResult& result = *test_info.result(); + const std::string kTestsuite = "testcase"; + const std::string kIndent = Indent(10); + + *stream << Indent(8) << "{\n"; + OutputJsonKey(stream, kTestsuite, "name", test_info.name(), kIndent); + + if (test_info.value_param() != nullptr) { + OutputJsonKey(stream, kTestsuite, "value_param", test_info.value_param(), + kIndent); + } + if (test_info.type_param() != nullptr) { + OutputJsonKey(stream, kTestsuite, "type_param", test_info.type_param(), + kIndent); + } + if (GTEST_FLAG(list_tests)) { + OutputJsonKey(stream, kTestsuite, "file", test_info.file(), kIndent); + OutputJsonKey(stream, kTestsuite, "line", test_info.line(), kIndent, false); + *stream << "\n" << Indent(8) << "}"; + return; + } + + OutputJsonKey(stream, kTestsuite, "status", + test_info.should_run() ? "RUN" : "NOTRUN", kIndent); + OutputJsonKey(stream, kTestsuite, "result", + test_info.should_run() + ? (result.Skipped() ? "SKIPPED" : "COMPLETED") + : "SUPPRESSED", + kIndent); + OutputJsonKey(stream, kTestsuite, "timestamp", + FormatEpochTimeInMillisAsRFC3339(result.start_timestamp()), + kIndent); + OutputJsonKey(stream, kTestsuite, "time", + FormatTimeInMillisAsDuration(result.elapsed_time()), kIndent); + OutputJsonKey(stream, kTestsuite, "classname", test_suite_name, kIndent, + false); + *stream << TestPropertiesAsJson(result, kIndent); + + int failures = 0; + for (int i = 0; i < result.total_part_count(); ++i) { + const TestPartResult& part = result.GetTestPartResult(i); + if (part.failed()) { + *stream << ",\n"; + if (++failures == 1) { + *stream << kIndent << "\"" << "failures" << "\": [\n"; + } + const std::string location = + internal::FormatCompilerIndependentFileLocation(part.file_name(), + part.line_number()); + const std::string message = EscapeJson(location + "\n" + part.message()); + *stream << kIndent << " {\n" + << kIndent << " \"failure\": \"" << message << "\",\n" + << kIndent << " \"type\": \"\"\n" + << kIndent << " }"; + } + } + + if (failures > 0) + *stream << "\n" << kIndent << "]"; + *stream << "\n" << Indent(8) << "}"; +} + +// Prints an JSON representation of a TestSuite object +void JsonUnitTestResultPrinter::PrintJsonTestSuite( + std::ostream* stream, const TestSuite& test_suite) { + const std::string kTestsuite = "testsuite"; + const std::string kIndent = Indent(6); + + *stream << Indent(4) << "{\n"; + OutputJsonKey(stream, kTestsuite, "name", test_suite.name(), kIndent); + OutputJsonKey(stream, kTestsuite, "tests", test_suite.reportable_test_count(), + kIndent); + if (!GTEST_FLAG(list_tests)) { + OutputJsonKey(stream, kTestsuite, "failures", + test_suite.failed_test_count(), kIndent); + OutputJsonKey(stream, kTestsuite, "disabled", + test_suite.reportable_disabled_test_count(), kIndent); + OutputJsonKey(stream, kTestsuite, "errors", 0, kIndent); + OutputJsonKey( + stream, kTestsuite, "timestamp", + FormatEpochTimeInMillisAsRFC3339(test_suite.start_timestamp()), + kIndent); + OutputJsonKey(stream, kTestsuite, "time", + FormatTimeInMillisAsDuration(test_suite.elapsed_time()), + kIndent, false); + *stream << TestPropertiesAsJson(test_suite.ad_hoc_test_result(), kIndent) + << ",\n"; + } + + *stream << kIndent << "\"" << kTestsuite << "\": [\n"; + + bool comma = false; + for (int i = 0; i < test_suite.total_test_count(); ++i) { + if (test_suite.GetTestInfo(i)->is_reportable()) { + if (comma) { + *stream << ",\n"; + } else { + comma = true; + } + OutputJsonTestInfo(stream, test_suite.name(), *test_suite.GetTestInfo(i)); + } + } + *stream << "\n" << kIndent << "]\n" << Indent(4) << "}"; +} + +// Prints a JSON summary of unit_test to output stream out. +void JsonUnitTestResultPrinter::PrintJsonUnitTest(std::ostream* stream, + const UnitTest& unit_test) { + const std::string kTestsuites = "testsuites"; + const std::string kIndent = Indent(2); + *stream << "{\n"; + + OutputJsonKey(stream, kTestsuites, "tests", unit_test.reportable_test_count(), + kIndent); + OutputJsonKey(stream, kTestsuites, "failures", unit_test.failed_test_count(), + kIndent); + OutputJsonKey(stream, kTestsuites, "disabled", + unit_test.reportable_disabled_test_count(), kIndent); + OutputJsonKey(stream, kTestsuites, "errors", 0, kIndent); + if (GTEST_FLAG(shuffle)) { + OutputJsonKey(stream, kTestsuites, "random_seed", unit_test.random_seed(), + kIndent); + } + OutputJsonKey(stream, kTestsuites, "timestamp", + FormatEpochTimeInMillisAsRFC3339(unit_test.start_timestamp()), + kIndent); + OutputJsonKey(stream, kTestsuites, "time", + FormatTimeInMillisAsDuration(unit_test.elapsed_time()), kIndent, + false); + + *stream << TestPropertiesAsJson(unit_test.ad_hoc_test_result(), kIndent) + << ",\n"; + + OutputJsonKey(stream, kTestsuites, "name", "AllTests", kIndent); + *stream << kIndent << "\"" << kTestsuites << "\": [\n"; + + bool comma = false; + for (int i = 0; i < unit_test.total_test_suite_count(); ++i) { + if (unit_test.GetTestSuite(i)->reportable_test_count() > 0) { + if (comma) { + *stream << ",\n"; + } else { + comma = true; + } + PrintJsonTestSuite(stream, *unit_test.GetTestSuite(i)); + } + } + + *stream << "\n" << kIndent << "]\n" << "}\n"; +} + +void JsonUnitTestResultPrinter::PrintJsonTestList( + std::ostream* stream, const std::vector& test_suites) { + const std::string kTestsuites = "testsuites"; + const std::string kIndent = Indent(2); + *stream << "{\n"; + int total_tests = 0; + for (auto test_suite : test_suites) { + total_tests += test_suite->total_test_count(); + } + OutputJsonKey(stream, kTestsuites, "tests", total_tests, kIndent); + + OutputJsonKey(stream, kTestsuites, "name", "AllTests", kIndent); + *stream << kIndent << "\"" << kTestsuites << "\": [\n"; + + for (size_t i = 0; i < test_suites.size(); ++i) { + if (i != 0) { + *stream << ",\n"; + } + PrintJsonTestSuite(stream, *test_suites[i]); + } + + *stream << "\n" + << kIndent << "]\n" + << "}\n"; +} +// Produces a string representing the test properties in a result as +// a JSON dictionary. +std::string JsonUnitTestResultPrinter::TestPropertiesAsJson( + const TestResult& result, const std::string& indent) { + Message attributes; + for (int i = 0; i < result.test_property_count(); ++i) { + const TestProperty& property = result.GetTestProperty(i); + attributes << ",\n" << indent << "\"" << property.key() << "\": " + << "\"" << EscapeJson(property.value()) << "\""; + } + return attributes.GetString(); +} + +// End JsonUnitTestResultPrinter + +#if GTEST_CAN_STREAM_RESULTS_ + +// Checks if str contains '=', '&', '%' or '\n' characters. If yes, +// replaces them by "%xx" where xx is their hexadecimal value. For +// example, replaces "=" with "%3D". This algorithm is O(strlen(str)) +// in both time and space -- important as the input str may contain an +// arbitrarily long test failure message and stack trace. +std::string StreamingListener::UrlEncode(const char* str) { + std::string result; + result.reserve(strlen(str) + 1); + for (char ch = *str; ch != '\0'; ch = *++str) { + switch (ch) { + case '%': + case '=': + case '&': + case '\n': + result.append("%" + String::FormatByte(static_cast(ch))); + break; + default: + result.push_back(ch); + break; + } + } + return result; +} + +void StreamingListener::SocketWriter::MakeConnection() { + GTEST_CHECK_(sockfd_ == -1) + << "MakeConnection() can't be called when there is already a connection."; + + addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. + hints.ai_socktype = SOCK_STREAM; + addrinfo* servinfo = nullptr; + + // Use the getaddrinfo() to get a linked list of IP addresses for + // the given host name. + const int error_num = getaddrinfo( + host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); + if (error_num != 0) { + GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: " + << gai_strerror(error_num); + } + + // Loop through all the results and connect to the first we can. + for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != nullptr; + cur_addr = cur_addr->ai_next) { + sockfd_ = socket( + cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol); + if (sockfd_ != -1) { + // Connect the client socket to the server socket. + if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) { + close(sockfd_); + sockfd_ = -1; + } + } + } + + freeaddrinfo(servinfo); // all done with this structure + + if (sockfd_ == -1) { + GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to " + << host_name_ << ":" << port_num_; + } +} + +// End of class Streaming Listener +#endif // GTEST_CAN_STREAM_RESULTS__ + +// class OsStackTraceGetter + +const char* const OsStackTraceGetterInterface::kElidedFramesMarker = + "... " GTEST_NAME_ " internal frames ..."; + +std::string OsStackTraceGetter::CurrentStackTrace(int max_depth, int skip_count) + GTEST_LOCK_EXCLUDED_(mutex_) { +#if GTEST_HAS_ABSL + std::string result; + + if (max_depth <= 0) { + return result; + } + + max_depth = std::min(max_depth, kMaxStackTraceDepth); + + std::vector raw_stack(max_depth); + // Skips the frames requested by the caller, plus this function. + const int raw_stack_size = + absl::GetStackTrace(&raw_stack[0], max_depth, skip_count + 1); + + void* caller_frame = nullptr; + { + MutexLock lock(&mutex_); + caller_frame = caller_frame_; + } + + for (int i = 0; i < raw_stack_size; ++i) { + if (raw_stack[i] == caller_frame && + !GTEST_FLAG(show_internal_stack_frames)) { + // Add a marker to the trace and stop adding frames. + absl::StrAppend(&result, kElidedFramesMarker, "\n"); + break; + } + + char tmp[1024]; + const char* symbol = "(unknown)"; + if (absl::Symbolize(raw_stack[i], tmp, sizeof(tmp))) { + symbol = tmp; + } + + char line[1024]; + snprintf(line, sizeof(line), " %p: %s\n", raw_stack[i], symbol); + result += line; + } + + return result; + +#else // !GTEST_HAS_ABSL + static_cast(max_depth); + static_cast(skip_count); + return ""; +#endif // GTEST_HAS_ABSL +} + +void OsStackTraceGetter::UponLeavingGTest() GTEST_LOCK_EXCLUDED_(mutex_) { +#if GTEST_HAS_ABSL + void* caller_frame = nullptr; + if (absl::GetStackTrace(&caller_frame, 1, 3) <= 0) { + caller_frame = nullptr; + } + + MutexLock lock(&mutex_); + caller_frame_ = caller_frame; +#endif // GTEST_HAS_ABSL +} + +// A helper class that creates the premature-exit file in its +// constructor and deletes the file in its destructor. +class ScopedPrematureExitFile { + public: + explicit ScopedPrematureExitFile(const char* premature_exit_filepath) + : premature_exit_filepath_(premature_exit_filepath ? + premature_exit_filepath : "") { + // If a path to the premature-exit file is specified... + if (!premature_exit_filepath_.empty()) { + // create the file with a single "0" character in it. I/O + // errors are ignored as there's nothing better we can do and we + // don't want to fail the test because of this. + FILE* pfile = posix::FOpen(premature_exit_filepath, "w"); + fwrite("0", 1, 1, pfile); + fclose(pfile); + } + } + + ~ScopedPrematureExitFile() { +#if !defined GTEST_OS_ESP8266 + if (!premature_exit_filepath_.empty()) { + int retval = remove(premature_exit_filepath_.c_str()); + if (retval) { + GTEST_LOG_(ERROR) << "Failed to remove premature exit filepath \"" + << premature_exit_filepath_ << "\" with error " + << retval; + } + } +#endif + } + + private: + const std::string premature_exit_filepath_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile); +}; + +} // namespace internal + +// class TestEventListeners + +TestEventListeners::TestEventListeners() + : repeater_(new internal::TestEventRepeater()), + default_result_printer_(nullptr), + default_xml_generator_(nullptr) {} + +TestEventListeners::~TestEventListeners() { delete repeater_; } + +// Returns the standard listener responsible for the default console +// output. Can be removed from the listeners list to shut down default +// console output. Note that removing this object from the listener list +// with Release transfers its ownership to the user. +void TestEventListeners::Append(TestEventListener* listener) { + repeater_->Append(listener); +} + +// Removes the given event listener from the list and returns it. It then +// becomes the caller's responsibility to delete the listener. Returns +// NULL if the listener is not found in the list. +TestEventListener* TestEventListeners::Release(TestEventListener* listener) { + if (listener == default_result_printer_) + default_result_printer_ = nullptr; + else if (listener == default_xml_generator_) + default_xml_generator_ = nullptr; + return repeater_->Release(listener); +} + +// Returns repeater that broadcasts the TestEventListener events to all +// subscribers. +TestEventListener* TestEventListeners::repeater() { return repeater_; } + +// Sets the default_result_printer attribute to the provided listener. +// The listener is also added to the listener list and previous +// default_result_printer is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) { + if (default_result_printer_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_result_printer_); + default_result_printer_ = listener; + if (listener != nullptr) Append(listener); + } +} + +// Sets the default_xml_generator attribute to the provided listener. The +// listener is also added to the listener list and previous +// default_xml_generator is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) { + if (default_xml_generator_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_xml_generator_); + default_xml_generator_ = listener; + if (listener != nullptr) Append(listener); + } +} + +// Controls whether events will be forwarded by the repeater to the +// listeners in the list. +bool TestEventListeners::EventForwardingEnabled() const { + return repeater_->forwarding_enabled(); +} + +void TestEventListeners::SuppressEventForwarding() { + repeater_->set_forwarding_enabled(false); +} + +// class UnitTest + +// Gets the singleton UnitTest object. The first time this method is +// called, a UnitTest object is constructed and returned. Consecutive +// calls will return the same object. +// +// We don't protect this under mutex_ as a user is not supposed to +// call this before main() starts, from which point on the return +// value will never change. +UnitTest* UnitTest::GetInstance() { + // CodeGear C++Builder insists on a public destructor for the + // default implementation. Use this implementation to keep good OO + // design with private destructor. + +#if defined(__BORLANDC__) + static UnitTest* const instance = new UnitTest; + return instance; +#else + static UnitTest instance; + return &instance; +#endif // defined(__BORLANDC__) +} + +// Gets the number of successful test suites. +int UnitTest::successful_test_suite_count() const { + return impl()->successful_test_suite_count(); +} + +// Gets the number of failed test suites. +int UnitTest::failed_test_suite_count() const { + return impl()->failed_test_suite_count(); +} + +// Gets the number of all test suites. +int UnitTest::total_test_suite_count() const { + return impl()->total_test_suite_count(); +} + +// Gets the number of all test suites that contain at least one test +// that should run. +int UnitTest::test_suite_to_run_count() const { + return impl()->test_suite_to_run_count(); +} + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +int UnitTest::successful_test_case_count() const { + return impl()->successful_test_suite_count(); +} +int UnitTest::failed_test_case_count() const { + return impl()->failed_test_suite_count(); +} +int UnitTest::total_test_case_count() const { + return impl()->total_test_suite_count(); +} +int UnitTest::test_case_to_run_count() const { + return impl()->test_suite_to_run_count(); +} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +// Gets the number of successful tests. +int UnitTest::successful_test_count() const { + return impl()->successful_test_count(); +} + +// Gets the number of skipped tests. +int UnitTest::skipped_test_count() const { + return impl()->skipped_test_count(); +} + +// Gets the number of failed tests. +int UnitTest::failed_test_count() const { return impl()->failed_test_count(); } + +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTest::reportable_disabled_test_count() const { + return impl()->reportable_disabled_test_count(); +} + +// Gets the number of disabled tests. +int UnitTest::disabled_test_count() const { + return impl()->disabled_test_count(); +} + +// Gets the number of tests to be printed in the XML report. +int UnitTest::reportable_test_count() const { + return impl()->reportable_test_count(); +} + +// Gets the number of all tests. +int UnitTest::total_test_count() const { return impl()->total_test_count(); } + +// Gets the number of tests that should run. +int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); } + +// Gets the time of the test program start, in ms from the start of the +// UNIX epoch. +internal::TimeInMillis UnitTest::start_timestamp() const { + return impl()->start_timestamp(); +} + +// Gets the elapsed time, in milliseconds. +internal::TimeInMillis UnitTest::elapsed_time() const { + return impl()->elapsed_time(); +} + +// Returns true if and only if the unit test passed (i.e. all test suites +// passed). +bool UnitTest::Passed() const { return impl()->Passed(); } + +// Returns true if and only if the unit test failed (i.e. some test suite +// failed or something outside of all tests failed). +bool UnitTest::Failed() const { return impl()->Failed(); } + +// Gets the i-th test suite among all the test suites. i can range from 0 to +// total_test_suite_count() - 1. If i is not in that range, returns NULL. +const TestSuite* UnitTest::GetTestSuite(int i) const { + return impl()->GetTestSuite(i); +} + +// Legacy API is deprecated but still available +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +const TestCase* UnitTest::GetTestCase(int i) const { + return impl()->GetTestCase(i); +} +#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ + +// Returns the TestResult containing information on test failures and +// properties logged outside of individual test suites. +const TestResult& UnitTest::ad_hoc_test_result() const { + return *impl()->ad_hoc_test_result(); +} + +// Gets the i-th test suite among all the test suites. i can range from 0 to +// total_test_suite_count() - 1. If i is not in that range, returns NULL. +TestSuite* UnitTest::GetMutableTestSuite(int i) { + return impl()->GetMutableSuiteCase(i); +} + +// Returns the list of event listeners that can be used to track events +// inside Google Test. +TestEventListeners& UnitTest::listeners() { + return *impl()->listeners(); +} + +// Registers and returns a global test environment. When a test +// program is run, all global test environments will be set-up in the +// order they were registered. After all tests in the program have +// finished, all global test environments will be torn-down in the +// *reverse* order they were registered. +// +// The UnitTest object takes ownership of the given environment. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +Environment* UnitTest::AddEnvironment(Environment* env) { + if (env == nullptr) { + return nullptr; + } + + impl_->environments().push_back(env); + return env; +} + +// Adds a TestPartResult to the current TestResult object. All Google Test +// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call +// this to report their results. The user code should use the +// assertion macros instead of calling this directly. +void UnitTest::AddTestPartResult( + TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) { + Message msg; + msg << message; + + internal::MutexLock lock(&mutex_); + if (impl_->gtest_trace_stack().size() > 0) { + msg << "\n" << GTEST_NAME_ << " trace:"; + + for (size_t i = impl_->gtest_trace_stack().size(); i > 0; --i) { + const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1]; + msg << "\n" << internal::FormatFileLocation(trace.file, trace.line) + << " " << trace.message; + } + } + + if (os_stack_trace.c_str() != nullptr && !os_stack_trace.empty()) { + msg << internal::kStackTraceMarker << os_stack_trace; + } + + const TestPartResult result = TestPartResult( + result_type, file_name, line_number, msg.GetString().c_str()); + impl_->GetTestPartResultReporterForCurrentThread()-> + ReportTestPartResult(result); + + if (result_type != TestPartResult::kSuccess && + result_type != TestPartResult::kSkip) { + // gtest_break_on_failure takes precedence over + // gtest_throw_on_failure. This allows a user to set the latter + // in the code (perhaps in order to use Google Test assertions + // with another testing framework) and specify the former on the + // command line for debugging. + if (GTEST_FLAG(break_on_failure)) { +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + // Using DebugBreak on Windows allows gtest to still break into a debugger + // when a failure happens and both the --gtest_break_on_failure and + // the --gtest_catch_exceptions flags are specified. + DebugBreak(); +#elif (!defined(__native_client__)) && \ + ((defined(__clang__) || defined(__GNUC__)) && \ + (defined(__x86_64__) || defined(__i386__))) + // with clang/gcc we can achieve the same effect on x86 by invoking int3 + asm("int3"); +#else + // Dereference nullptr through a volatile pointer to prevent the compiler + // from removing. We use this rather than abort() or __builtin_trap() for + // portability: some debuggers don't correctly trap abort(). + *static_cast(nullptr) = 1; +#endif // GTEST_OS_WINDOWS + } else if (GTEST_FLAG(throw_on_failure)) { +#if GTEST_HAS_EXCEPTIONS + throw internal::GoogleTestFailureException(result); +#else + // We cannot call abort() as it generates a pop-up in debug mode + // that cannot be suppressed in VC 7.1 or below. + exit(1); +#endif + } + } +} + +// Adds a TestProperty to the current TestResult object when invoked from +// inside a test, to current TestSuite's ad_hoc_test_result_ when invoked +// from SetUpTestSuite or TearDownTestSuite, or to the global property set +// when invoked elsewhere. If the result already contains a property with +// the same key, the value will be updated. +void UnitTest::RecordProperty(const std::string& key, + const std::string& value) { + impl_->RecordProperty(TestProperty(key, value)); +} + +// Runs all tests in this UnitTest object and prints the result. +// Returns 0 if successful, or 1 otherwise. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +int UnitTest::Run() { + const bool in_death_test_child_process = + internal::GTEST_FLAG(internal_run_death_test).length() > 0; + + // Google Test implements this protocol for catching that a test + // program exits before returning control to Google Test: + // + // 1. Upon start, Google Test creates a file whose absolute path + // is specified by the environment variable + // TEST_PREMATURE_EXIT_FILE. + // 2. When Google Test has finished its work, it deletes the file. + // + // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before + // running a Google-Test-based test program and check the existence + // of the file at the end of the test execution to see if it has + // exited prematurely. + + // If we are in the child process of a death test, don't + // create/delete the premature exit file, as doing so is unnecessary + // and will confuse the parent process. Otherwise, create/delete + // the file upon entering/leaving this function. If the program + // somehow exits before this function has a chance to return, the + // premature-exit file will be left undeleted, causing a test runner + // that understands the premature-exit-file protocol to report the + // test as having failed. + const internal::ScopedPrematureExitFile premature_exit_file( + in_death_test_child_process + ? nullptr + : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE")); + + // Captures the value of GTEST_FLAG(catch_exceptions). This value will be + // used for the duration of the program. + impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions)); + +#if GTEST_OS_WINDOWS + // Either the user wants Google Test to catch exceptions thrown by the + // tests or this is executing in the context of death test child + // process. In either case the user does not want to see pop-up dialogs + // about crashes - they are expected. + if (impl()->catch_exceptions() || in_death_test_child_process) { +# if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + // SetErrorMode doesn't exist on CE. + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | + SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); +# endif // !GTEST_OS_WINDOWS_MOBILE + +# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE + // Death test children can be terminated with _abort(). On Windows, + // _abort() can show a dialog with a warning message. This forces the + // abort message to go to stderr instead. + _set_error_mode(_OUT_TO_STDERR); +# endif + +# if defined(_MSC_VER) && !GTEST_OS_WINDOWS_MOBILE + // In the debug version, Visual Studio pops up a separate dialog + // offering a choice to debug the aborted program. We need to suppress + // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement + // executed. Google Test will notify the user of any unexpected + // failure via stderr. + if (!GTEST_FLAG(break_on_failure)) + _set_abort_behavior( + 0x0, // Clear the following flags: + _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump. + + // In debug mode, the Windows CRT can crash with an assertion over invalid + // input (e.g. passing an invalid file descriptor). The default handling + // for these assertions is to pop up a dialog and wait for user input. + // Instead ask the CRT to dump such assertions to stderr non-interactively. + if (!IsDebuggerPresent()) { + (void)_CrtSetReportMode(_CRT_ASSERT, + _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); + (void)_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); + } +# endif + } +#endif // GTEST_OS_WINDOWS + + return internal::HandleExceptionsInMethodIfSupported( + impl(), + &internal::UnitTestImpl::RunAllTests, + "auxiliary test code (environments or event listeners)") ? 0 : 1; +} + +// Returns the working directory when the first TEST() or TEST_F() was +// executed. +const char* UnitTest::original_working_dir() const { + return impl_->original_working_dir_.c_str(); +} + +// Returns the TestSuite object for the test that's currently running, +// or NULL if no test is running. +const TestSuite* UnitTest::current_test_suite() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_suite(); +} + +// Legacy API is still available but deprecated +#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ +const TestCase* UnitTest::current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_suite(); +} +#endif + +// Returns the TestInfo object for the test that's currently running, +// or NULL if no test is running. +const TestInfo* UnitTest::current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_info(); +} + +// Returns the random seed used at the start of the current test run. +int UnitTest::random_seed() const { return impl_->random_seed(); } + +// Returns ParameterizedTestSuiteRegistry object used to keep track of +// value-parameterized tests and instantiate and register them. +internal::ParameterizedTestSuiteRegistry& +UnitTest::parameterized_test_registry() GTEST_LOCK_EXCLUDED_(mutex_) { + return impl_->parameterized_test_registry(); +} + +// Creates an empty UnitTest. +UnitTest::UnitTest() { + impl_ = new internal::UnitTestImpl(this); +} + +// Destructor of UnitTest. +UnitTest::~UnitTest() { + delete impl_; +} + +// Pushes a trace defined by SCOPED_TRACE() on to the per-thread +// Google Test trace stack. +void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().push_back(trace); +} + +// Pops a trace from the per-thread Google Test trace stack. +void UnitTest::PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().pop_back(); +} + +namespace internal { + +UnitTestImpl::UnitTestImpl(UnitTest* parent) + : parent_(parent), + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4355 /* using this in initializer */) + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), + GTEST_DISABLE_MSC_WARNINGS_POP_() global_test_part_result_repoter_( + &default_global_test_part_result_reporter_), + per_thread_test_part_result_reporter_( + &default_per_thread_test_part_result_reporter_), + parameterized_test_registry_(), + parameterized_tests_registered_(false), + last_death_test_suite_(-1), + current_test_suite_(nullptr), + current_test_info_(nullptr), + ad_hoc_test_result_(), + os_stack_trace_getter_(nullptr), + post_flag_parse_init_performed_(false), + random_seed_(0), // Will be overridden by the flag before first use. + random_(0), // Will be reseeded before first use. + start_timestamp_(0), + elapsed_time_(0), +#if GTEST_HAS_DEATH_TEST + death_test_factory_(new DefaultDeathTestFactory), +#endif + // Will be overridden by the flag before first use. + catch_exceptions_(false) { + listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter); +} + +UnitTestImpl::~UnitTestImpl() { + // Deletes every TestSuite. + ForEach(test_suites_, internal::Delete); + + // Deletes every Environment. + ForEach(environments_, internal::Delete); + + delete os_stack_trace_getter_; +} + +// Adds a TestProperty to the current TestResult object when invoked in a +// context of a test, to current test suite's ad_hoc_test_result when invoke +// from SetUpTestSuite/TearDownTestSuite, or to the global property set +// otherwise. If the result already contains a property with the same key, +// the value will be updated. +void UnitTestImpl::RecordProperty(const TestProperty& test_property) { + std::string xml_element; + TestResult* test_result; // TestResult appropriate for property recording. + + if (current_test_info_ != nullptr) { + xml_element = "testcase"; + test_result = &(current_test_info_->result_); + } else if (current_test_suite_ != nullptr) { + xml_element = "testsuite"; + test_result = &(current_test_suite_->ad_hoc_test_result_); + } else { + xml_element = "testsuites"; + test_result = &ad_hoc_test_result_; + } + test_result->RecordProperty(xml_element, test_property); +} + +#if GTEST_HAS_DEATH_TEST +// Disables event forwarding if the control is currently in a death test +// subprocess. Must not be called before InitGoogleTest. +void UnitTestImpl::SuppressTestEventsIfInSubprocess() { + if (internal_run_death_test_flag_.get() != nullptr) + listeners()->SuppressEventForwarding(); +} +#endif // GTEST_HAS_DEATH_TEST + +// Initializes event listeners performing XML output as specified by +// UnitTestOptions. Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureXmlOutput() { + const std::string& output_format = UnitTestOptions::GetOutputFormat(); + if (output_format == "xml") { + listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str())); + } else if (output_format == "json") { + listeners()->SetDefaultXmlGenerator(new JsonUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str())); + } else if (output_format != "") { + GTEST_LOG_(WARNING) << "WARNING: unrecognized output format \"" + << output_format << "\" ignored."; + } +} + +#if GTEST_CAN_STREAM_RESULTS_ +// Initializes event listeners for streaming test results in string form. +// Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureStreamingOutput() { + const std::string& target = GTEST_FLAG(stream_result_to); + if (!target.empty()) { + const size_t pos = target.find(':'); + if (pos != std::string::npos) { + listeners()->Append(new StreamingListener(target.substr(0, pos), + target.substr(pos+1))); + } else { + GTEST_LOG_(WARNING) << "unrecognized streaming target \"" << target + << "\" ignored."; + } + } +} +#endif // GTEST_CAN_STREAM_RESULTS_ + +// Performs initialization dependent upon flag values obtained in +// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to +// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest +// this function is also called from RunAllTests. Since this function can be +// called more than once, it has to be idempotent. +void UnitTestImpl::PostFlagParsingInit() { + // Ensures that this function does not execute more than once. + if (!post_flag_parse_init_performed_) { + post_flag_parse_init_performed_ = true; + +#if defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_) + // Register to send notifications about key process state changes. + listeners()->Append(new GTEST_CUSTOM_TEST_EVENT_LISTENER_()); +#endif // defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_) + +#if GTEST_HAS_DEATH_TEST + InitDeathTestSubprocessControlInfo(); + SuppressTestEventsIfInSubprocess(); +#endif // GTEST_HAS_DEATH_TEST + + // Registers parameterized tests. This makes parameterized tests + // available to the UnitTest reflection API without running + // RUN_ALL_TESTS. + RegisterParameterizedTests(); + + // Configures listeners for XML output. This makes it possible for users + // to shut down the default XML output before invoking RUN_ALL_TESTS. + ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Configures listeners for streaming test results to the specified server. + ConfigureStreamingOutput(); +#endif // GTEST_CAN_STREAM_RESULTS_ + +#if GTEST_HAS_ABSL + if (GTEST_FLAG(install_failure_signal_handler)) { + absl::FailureSignalHandlerOptions options; + absl::InstallFailureSignalHandler(options); + } +#endif // GTEST_HAS_ABSL + } +} + +// A predicate that checks the name of a TestSuite against a known +// value. +// +// This is used for implementation of the UnitTest class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestSuiteNameIs is copyable. +class TestSuiteNameIs { + public: + // Constructor. + explicit TestSuiteNameIs(const std::string& name) : name_(name) {} + + // Returns true if and only if the name of test_suite matches name_. + bool operator()(const TestSuite* test_suite) const { + return test_suite != nullptr && + strcmp(test_suite->name(), name_.c_str()) == 0; + } + + private: + std::string name_; +}; + +// Finds and returns a TestSuite with the given name. If one doesn't +// exist, creates one and returns it. It's the CALLER'S +// RESPONSIBILITY to ensure that this function is only called WHEN THE +// TESTS ARE NOT SHUFFLED. +// +// Arguments: +// +// test_suite_name: name of the test suite +// type_param: the name of the test suite's type parameter, or NULL if +// this is not a typed or a type-parameterized test suite. +// set_up_tc: pointer to the function that sets up the test suite +// tear_down_tc: pointer to the function that tears down the test suite +TestSuite* UnitTestImpl::GetTestSuite( + const char* test_suite_name, const char* type_param, + internal::SetUpTestSuiteFunc set_up_tc, + internal::TearDownTestSuiteFunc tear_down_tc) { + // Can we find a TestSuite with the given name? + const auto test_suite = + std::find_if(test_suites_.rbegin(), test_suites_.rend(), + TestSuiteNameIs(test_suite_name)); + + if (test_suite != test_suites_.rend()) return *test_suite; + + // No. Let's create one. + auto* const new_test_suite = + new TestSuite(test_suite_name, type_param, set_up_tc, tear_down_tc); + + // Is this a death test suite? + if (internal::UnitTestOptions::MatchesFilter(test_suite_name, + kDeathTestSuiteFilter)) { + // Yes. Inserts the test suite after the last death test suite + // defined so far. This only works when the test suites haven't + // been shuffled. Otherwise we may end up running a death test + // after a non-death test. + ++last_death_test_suite_; + test_suites_.insert(test_suites_.begin() + last_death_test_suite_, + new_test_suite); + } else { + // No. Appends to the end of the list. + test_suites_.push_back(new_test_suite); + } + + test_suite_indices_.push_back(static_cast(test_suite_indices_.size())); + return new_test_suite; +} + +// Helpers for setting up / tearing down the given environment. They +// are for use in the ForEach() function. +static void SetUpEnvironment(Environment* env) { env->SetUp(); } +static void TearDownEnvironment(Environment* env) { env->TearDown(); } + +// Runs all tests in this UnitTest object, prints the result, and +// returns true if all tests are successful. If any exception is +// thrown during a test, the test is considered to be failed, but the +// rest of the tests will still be run. +// +// When parameterized tests are enabled, it expands and registers +// parameterized tests first in RegisterParameterizedTests(). +// All other functions called from RunAllTests() may safely assume that +// parameterized tests are ready to be counted and run. +bool UnitTestImpl::RunAllTests() { + // True if and only if Google Test is initialized before RUN_ALL_TESTS() is + // called. + const bool gtest_is_initialized_before_run_all_tests = GTestIsInitialized(); + + // Do not run any test if the --help flag was specified. + if (g_help_flag) + return true; + + // Repeats the call to the post-flag parsing initialization in case the + // user didn't call InitGoogleTest. + PostFlagParsingInit(); + + // Even if sharding is not on, test runners may want to use the + // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding + // protocol. + internal::WriteToShardStatusFileIfNeeded(); + + // True if and only if we are in a subprocess for running a thread-safe-style + // death test. + bool in_subprocess_for_death_test = false; + +#if GTEST_HAS_DEATH_TEST + in_subprocess_for_death_test = + (internal_run_death_test_flag_.get() != nullptr); +# if defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_) + if (in_subprocess_for_death_test) { + GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_(); + } +# endif // defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_) +#endif // GTEST_HAS_DEATH_TEST + + const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex, + in_subprocess_for_death_test); + + // Compares the full test names with the filter to decide which + // tests to run. + const bool has_tests_to_run = FilterTests(should_shard + ? HONOR_SHARDING_PROTOCOL + : IGNORE_SHARDING_PROTOCOL) > 0; + + // Lists the tests and exits if the --gtest_list_tests flag was specified. + if (GTEST_FLAG(list_tests)) { + // This must be called *after* FilterTests() has been called. + ListTestsMatchingFilter(); + return true; + } + + random_seed_ = GTEST_FLAG(shuffle) ? + GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0; + + // True if and only if at least one test has failed. + bool failed = false; + + TestEventListener* repeater = listeners()->repeater(); + + start_timestamp_ = GetTimeInMillis(); + repeater->OnTestProgramStart(*parent_); + + // How many times to repeat the tests? We don't want to repeat them + // when we are inside the subprocess of a death test. + const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat); + // Repeats forever if the repeat count is negative. + const bool gtest_repeat_forever = repeat < 0; + for (int i = 0; gtest_repeat_forever || i != repeat; i++) { + // We want to preserve failures generated by ad-hoc test + // assertions executed before RUN_ALL_TESTS(). + ClearNonAdHocTestResult(); + + const TimeInMillis start = GetTimeInMillis(); + + // Shuffles test suites and tests if requested. + if (has_tests_to_run && GTEST_FLAG(shuffle)) { + random()->Reseed(static_cast(random_seed_)); + // This should be done before calling OnTestIterationStart(), + // such that a test event listener can see the actual test order + // in the event. + ShuffleTests(); + } + + // Tells the unit test event listeners that the tests are about to start. + repeater->OnTestIterationStart(*parent_, i); + + // Runs each test suite if there is at least one test to run. + if (has_tests_to_run) { + // Sets up all environments beforehand. + repeater->OnEnvironmentsSetUpStart(*parent_); + ForEach(environments_, SetUpEnvironment); + repeater->OnEnvironmentsSetUpEnd(*parent_); + + // Runs the tests only if there was no fatal failure or skip triggered + // during global set-up. + if (Test::IsSkipped()) { + // Emit diagnostics when global set-up calls skip, as it will not be + // emitted by default. + TestResult& test_result = + *internal::GetUnitTestImpl()->current_test_result(); + for (int j = 0; j < test_result.total_part_count(); ++j) { + const TestPartResult& test_part_result = + test_result.GetTestPartResult(j); + if (test_part_result.type() == TestPartResult::kSkip) { + const std::string& result = test_part_result.message(); + printf("%s\n", result.c_str()); + } + } + fflush(stdout); + } else if (!Test::HasFatalFailure()) { + for (int test_index = 0; test_index < total_test_suite_count(); + test_index++) { + GetMutableSuiteCase(test_index)->Run(); + } + } + + // Tears down all environments in reverse order afterwards. + repeater->OnEnvironmentsTearDownStart(*parent_); + std::for_each(environments_.rbegin(), environments_.rend(), + TearDownEnvironment); + repeater->OnEnvironmentsTearDownEnd(*parent_); + } + + elapsed_time_ = GetTimeInMillis() - start; + + // Tells the unit test event listener that the tests have just finished. + repeater->OnTestIterationEnd(*parent_, i); + + // Gets the result and clears it. + if (!Passed()) { + failed = true; + } + + // Restores the original test order after the iteration. This + // allows the user to quickly repro a failure that happens in the + // N-th iteration without repeating the first (N - 1) iterations. + // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in + // case the user somehow changes the value of the flag somewhere + // (it's always safe to unshuffle the tests). + UnshuffleTests(); + + if (GTEST_FLAG(shuffle)) { + // Picks a new random seed for each iteration. + random_seed_ = GetNextRandomSeed(random_seed_); + } + } + + repeater->OnTestProgramEnd(*parent_); + + if (!gtest_is_initialized_before_run_all_tests) { + ColoredPrintf( + COLOR_RED, + "\nIMPORTANT NOTICE - DO NOT IGNORE:\n" + "This test program did NOT call " GTEST_INIT_GOOGLE_TEST_NAME_ + "() before calling RUN_ALL_TESTS(). This is INVALID. Soon " GTEST_NAME_ + " will start to enforce the valid usage. " + "Please fix it ASAP, or IT WILL START TO FAIL.\n"); // NOLINT +#if GTEST_FOR_GOOGLE_ + ColoredPrintf(COLOR_RED, + "For more details, see http://wiki/Main/ValidGUnitMain.\n"); +#endif // GTEST_FOR_GOOGLE_ + } + + return !failed; +} + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded() { + const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile); + if (test_shard_file != nullptr) { + FILE* const file = posix::FOpen(test_shard_file, "w"); + if (file == nullptr) { + ColoredPrintf(COLOR_RED, + "Could not write to the test shard status file \"%s\" " + "specified by the %s environment variable.\n", + test_shard_file, kTestShardStatusFile); + fflush(stdout); + exit(EXIT_FAILURE); + } + fclose(file); + } +} + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (i.e., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +bool ShouldShard(const char* total_shards_env, + const char* shard_index_env, + bool in_subprocess_for_death_test) { + if (in_subprocess_for_death_test) { + return false; + } + + const int32_t total_shards = Int32FromEnvOrDie(total_shards_env, -1); + const int32_t shard_index = Int32FromEnvOrDie(shard_index_env, -1); + + if (total_shards == -1 && shard_index == -1) { + return false; + } else if (total_shards == -1 && shard_index != -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestShardIndex << " = " << shard_index + << ", but have left " << kTestTotalShards << " unset.\n"; + ColoredPrintf(COLOR_RED, "%s", msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (total_shards != -1 && shard_index == -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestTotalShards << " = " << total_shards + << ", but have left " << kTestShardIndex << " unset.\n"; + ColoredPrintf(COLOR_RED, "%s", msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (shard_index < 0 || shard_index >= total_shards) { + const Message msg = Message() + << "Invalid environment variables: we require 0 <= " + << kTestShardIndex << " < " << kTestTotalShards + << ", but you have " << kTestShardIndex << "=" << shard_index + << ", " << kTestTotalShards << "=" << total_shards << ".\n"; + ColoredPrintf(COLOR_RED, "%s", msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } + + return total_shards > 1; +} + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error +// and aborts. +int32_t Int32FromEnvOrDie(const char* var, int32_t default_val) { + const char* str_val = posix::GetEnv(var); + if (str_val == nullptr) { + return default_val; + } + + int32_t result; + if (!ParseInt32(Message() << "The value of environment variable " << var, + str_val, &result)) { + exit(EXIT_FAILURE); + } + return result; +} + +// Given the total number of shards, the shard index, and the test id, +// returns true if and only if the test should be run on this shard. The test id +// is some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) { + return (test_id % total_shards) == shard_index; +} + +// Compares the name of each test with the user-specified filter to +// decide whether the test should be run, then records the result in +// each TestSuite and TestInfo object. +// If shard_tests == true, further filters tests based on sharding +// variables in the environment - see +// https://github.com/google/googletest/blob/master/googletest/docs/advanced.md +// . Returns the number of tests that should run. +int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { + const int32_t total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestTotalShards, -1) : -1; + const int32_t shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestShardIndex, -1) : -1; + + // num_runnable_tests are the number of tests that will + // run across all shards (i.e., match filter and are not disabled). + // num_selected_tests are the number of tests to be run on + // this shard. + int num_runnable_tests = 0; + int num_selected_tests = 0; + for (auto* test_suite : test_suites_) { + const std::string& test_suite_name = test_suite->name(); + test_suite->set_should_run(false); + + for (size_t j = 0; j < test_suite->test_info_list().size(); j++) { + TestInfo* const test_info = test_suite->test_info_list()[j]; + const std::string test_name(test_info->name()); + // A test is disabled if test suite name or test name matches + // kDisableTestFilter. + const bool is_disabled = internal::UnitTestOptions::MatchesFilter( + test_suite_name, kDisableTestFilter) || + internal::UnitTestOptions::MatchesFilter( + test_name, kDisableTestFilter); + test_info->is_disabled_ = is_disabled; + + const bool matches_filter = internal::UnitTestOptions::FilterMatchesTest( + test_suite_name, test_name); + test_info->matches_filter_ = matches_filter; + + const bool is_runnable = + (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) && + matches_filter; + + const bool is_in_another_shard = + shard_tests != IGNORE_SHARDING_PROTOCOL && + !ShouldRunTestOnShard(total_shards, shard_index, num_runnable_tests); + test_info->is_in_another_shard_ = is_in_another_shard; + const bool is_selected = is_runnable && !is_in_another_shard; + + num_runnable_tests += is_runnable; + num_selected_tests += is_selected; + + test_info->should_run_ = is_selected; + test_suite->set_should_run(test_suite->should_run() || is_selected); + } + } + return num_selected_tests; +} + +// Prints the given C-string on a single line by replacing all '\n' +// characters with string "\\n". If the output takes more than +// max_length characters, only prints the first max_length characters +// and "...". +static void PrintOnOneLine(const char* str, int max_length) { + if (str != nullptr) { + for (int i = 0; *str != '\0'; ++str) { + if (i >= max_length) { + printf("..."); + break; + } + if (*str == '\n') { + printf("\\n"); + i += 2; + } else { + printf("%c", *str); + ++i; + } + } + } +} + +// Prints the names of the tests matching the user-specified filter flag. +void UnitTestImpl::ListTestsMatchingFilter() { + // Print at most this many characters for each type/value parameter. + const int kMaxParamLength = 250; + + for (auto* test_suite : test_suites_) { + bool printed_test_suite_name = false; + + for (size_t j = 0; j < test_suite->test_info_list().size(); j++) { + const TestInfo* const test_info = test_suite->test_info_list()[j]; + if (test_info->matches_filter_) { + if (!printed_test_suite_name) { + printed_test_suite_name = true; + printf("%s.", test_suite->name()); + if (test_suite->type_param() != nullptr) { + printf(" # %s = ", kTypeParamLabel); + // We print the type parameter on a single line to make + // the output easy to parse by a program. + PrintOnOneLine(test_suite->type_param(), kMaxParamLength); + } + printf("\n"); + } + printf(" %s", test_info->name()); + if (test_info->value_param() != nullptr) { + printf(" # %s = ", kValueParamLabel); + // We print the value parameter on a single line to make the + // output easy to parse by a program. + PrintOnOneLine(test_info->value_param(), kMaxParamLength); + } + printf("\n"); + } + } + } + fflush(stdout); + const std::string& output_format = UnitTestOptions::GetOutputFormat(); + if (output_format == "xml" || output_format == "json") { + FILE* fileout = OpenFileForWriting( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str()); + std::stringstream stream; + if (output_format == "xml") { + XmlUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str()) + .PrintXmlTestsList(&stream, test_suites_); + } else if (output_format == "json") { + JsonUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str()) + .PrintJsonTestList(&stream, test_suites_); + } + fprintf(fileout, "%s", StringStreamToString(&stream).c_str()); + fclose(fileout); + } +} + +// Sets the OS stack trace getter. +// +// Does nothing if the input and the current OS stack trace getter are +// the same; otherwise, deletes the old getter and makes the input the +// current getter. +void UnitTestImpl::set_os_stack_trace_getter( + OsStackTraceGetterInterface* getter) { + if (os_stack_trace_getter_ != getter) { + delete os_stack_trace_getter_; + os_stack_trace_getter_ = getter; + } +} + +// Returns the current OS stack trace getter if it is not NULL; +// otherwise, creates an OsStackTraceGetter, makes it the current +// getter, and returns it. +OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() { + if (os_stack_trace_getter_ == nullptr) { +#ifdef GTEST_OS_STACK_TRACE_GETTER_ + os_stack_trace_getter_ = new GTEST_OS_STACK_TRACE_GETTER_; +#else + os_stack_trace_getter_ = new OsStackTraceGetter; +#endif // GTEST_OS_STACK_TRACE_GETTER_ + } + + return os_stack_trace_getter_; +} + +// Returns the most specific TestResult currently running. +TestResult* UnitTestImpl::current_test_result() { + if (current_test_info_ != nullptr) { + return ¤t_test_info_->result_; + } + if (current_test_suite_ != nullptr) { + return ¤t_test_suite_->ad_hoc_test_result_; + } + return &ad_hoc_test_result_; +} + +// Shuffles all test suites, and the tests within each test suite, +// making sure that death tests are still run first. +void UnitTestImpl::ShuffleTests() { + // Shuffles the death test suites. + ShuffleRange(random(), 0, last_death_test_suite_ + 1, &test_suite_indices_); + + // Shuffles the non-death test suites. + ShuffleRange(random(), last_death_test_suite_ + 1, + static_cast(test_suites_.size()), &test_suite_indices_); + + // Shuffles the tests inside each test suite. + for (auto& test_suite : test_suites_) { + test_suite->ShuffleTests(random()); + } +} + +// Restores the test suites and tests to their order before the first shuffle. +void UnitTestImpl::UnshuffleTests() { + for (size_t i = 0; i < test_suites_.size(); i++) { + // Unshuffles the tests in each test suite. + test_suites_[i]->UnshuffleTests(); + // Resets the index of each test suite. + test_suite_indices_[i] = static_cast(i); + } +} + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, + int skip_count) { + // We pass skip_count + 1 to skip this wrapper function in addition + // to what the user really wants to skip. + return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); +} + +// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to +// suppress unreachable code warnings. +namespace { +class ClassUniqueToAlwaysTrue {}; +} + +bool IsTrue(bool condition) { return condition; } + +bool AlwaysTrue() { +#if GTEST_HAS_EXCEPTIONS + // This condition is always false so AlwaysTrue() never actually throws, + // but it makes the compiler think that it may throw. + if (IsTrue(false)) + throw ClassUniqueToAlwaysTrue(); +#endif // GTEST_HAS_EXCEPTIONS + return true; +} + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +bool SkipPrefix(const char* prefix, const char** pstr) { + const size_t prefix_len = strlen(prefix); + if (strncmp(*pstr, prefix, prefix_len) == 0) { + *pstr += prefix_len; + return true; + } + return false; +} + +// Parses a string as a command line flag. The string should have +// the format "--flag=value". When def_optional is true, the "=value" +// part can be omitted. +// +// Returns the value of the flag, or NULL if the parsing failed. +static const char* ParseFlagValue(const char* str, const char* flag, + bool def_optional) { + // str and flag must not be NULL. + if (str == nullptr || flag == nullptr) return nullptr; + + // The flag must start with "--" followed by GTEST_FLAG_PREFIX_. + const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag; + const size_t flag_len = flag_str.length(); + if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; + + // Skips the flag name. + const char* flag_end = str + flag_len; + + // When def_optional is true, it's OK to not have a "=value" part. + if (def_optional && (flag_end[0] == '\0')) { + return flag_end; + } + + // If def_optional is true and there are more characters after the + // flag name, or if def_optional is false, there must be a '=' after + // the flag name. + if (flag_end[0] != '=') return nullptr; + + // Returns the string after "=". + return flag_end + 1; +} + +// Parses a string for a bool flag, in the form of either +// "--flag=value" or "--flag". +// +// In the former case, the value is taken as true as long as it does +// not start with '0', 'f', or 'F'. +// +// In the latter case, the value is taken as true. +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +static bool ParseBoolFlag(const char* str, const char* flag, bool* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, true); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Converts the string value to a bool. + *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F'); + return true; +} + +// Parses a string for an int32_t flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Sets *value to the value of the flag. + return ParseInt32(Message() << "The value of flag --" << flag, + value_str, value); +} + +// Parses a string for a string flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +template +static bool ParseStringFlag(const char* str, const char* flag, String* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Sets *value to the value of the flag. + *value = value_str; + return true; +} + +// Determines whether a string has a prefix that Google Test uses for its +// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_. +// If Google Test detects that a command line flag has its prefix but is not +// recognized, it will print its help message. Flags starting with +// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test +// internal flags and do not trigger the help message. +static bool HasGoogleTestFlagPrefix(const char* str) { + return (SkipPrefix("--", &str) || + SkipPrefix("-", &str) || + SkipPrefix("/", &str)) && + !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) && + (SkipPrefix(GTEST_FLAG_PREFIX_, &str) || + SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str)); +} + +// Prints a string containing code-encoded text. The following escape +// sequences can be used in the string to control the text color: +// +// @@ prints a single '@' character. +// @R changes the color to red. +// @G changes the color to green. +// @Y changes the color to yellow. +// @D changes to the default terminal text color. +// +static void PrintColorEncoded(const char* str) { + GTestColor color = COLOR_DEFAULT; // The current color. + + // Conceptually, we split the string into segments divided by escape + // sequences. Then we print one segment at a time. At the end of + // each iteration, the str pointer advances to the beginning of the + // next segment. + for (;;) { + const char* p = strchr(str, '@'); + if (p == nullptr) { + ColoredPrintf(color, "%s", str); + return; + } + + ColoredPrintf(color, "%s", std::string(str, p).c_str()); + + const char ch = p[1]; + str = p + 2; + if (ch == '@') { + ColoredPrintf(color, "@"); + } else if (ch == 'D') { + color = COLOR_DEFAULT; + } else if (ch == 'R') { + color = COLOR_RED; + } else if (ch == 'G') { + color = COLOR_GREEN; + } else if (ch == 'Y') { + color = COLOR_YELLOW; + } else { + --str; + } + } +} + +static const char kColorEncodedHelpMessage[] = +"This program contains tests written using " GTEST_NAME_ ". You can use the\n" +"following command line flags to control its behavior:\n" +"\n" +"Test Selection:\n" +" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n" +" List the names of all tests instead of running them. The name of\n" +" TEST(Foo, Bar) is \"Foo.Bar\".\n" +" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS" + "[@G-@YNEGATIVE_PATTERNS]@D\n" +" Run only the tests whose name matches one of the positive patterns but\n" +" none of the negative patterns. '?' matches any single character; '*'\n" +" matches any substring; ':' separates two patterns.\n" +" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n" +" Run all disabled tests too.\n" +"\n" +"Test Execution:\n" +" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n" +" Run the tests repeatedly; use a negative count to repeat forever.\n" +" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n" +" Randomize tests' orders on every iteration.\n" +" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n" +" Random number seed to use for shuffling test orders (between 1 and\n" +" 99999, or 0 to use a seed based on the current time).\n" +"\n" +"Test Output:\n" +" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n" +" Enable/disable colored output. The default is @Gauto@D.\n" +" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n" +" Don't print the elapsed time of each test.\n" +" @G--" GTEST_FLAG_PREFIX_ "output=@Y(@Gjson@Y|@Gxml@Y)[@G:@YDIRECTORY_PATH@G" + GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n" +" Generate a JSON or XML report in the given directory or with the given\n" +" file name. @YFILE_PATH@D defaults to @Gtest_detail.xml@D.\n" +# if GTEST_CAN_STREAM_RESULTS_ +" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n" +" Stream test results to the given server.\n" +# endif // GTEST_CAN_STREAM_RESULTS_ +"\n" +"Assertion Behavior:\n" +# if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n" +" Set the default death test style.\n" +# endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n" +" Turn assertion failures into debugger break-points.\n" +" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n" +" Turn assertion failures into C++ exceptions for use by an external\n" +" test framework.\n" +" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n" +" Do not report exceptions as test failures. Instead, allow them\n" +" to crash the program or throw a pop-up (on Windows).\n" +"\n" +"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set " + "the corresponding\n" +"environment variable of a flag (all letters in upper-case). For example, to\n" +"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_ + "color=no@D or set\n" +"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n" +"\n" +"For more information, please read the " GTEST_NAME_ " documentation at\n" +"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n" +"(not one in your own code or tests), please report it to\n" +"@G<" GTEST_DEV_EMAIL_ ">@D.\n"; + +static bool ParseGoogleTestFlag(const char* const arg) { + return ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag, + >EST_FLAG(also_run_disabled_tests)) || + ParseBoolFlag(arg, kBreakOnFailureFlag, + >EST_FLAG(break_on_failure)) || + ParseBoolFlag(arg, kCatchExceptionsFlag, + >EST_FLAG(catch_exceptions)) || + ParseStringFlag(arg, kColorFlag, >EST_FLAG(color)) || + ParseStringFlag(arg, kDeathTestStyleFlag, + >EST_FLAG(death_test_style)) || + ParseBoolFlag(arg, kDeathTestUseFork, + >EST_FLAG(death_test_use_fork)) || + ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) || + ParseStringFlag(arg, kInternalRunDeathTestFlag, + >EST_FLAG(internal_run_death_test)) || + ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) || + ParseStringFlag(arg, kOutputFlag, >EST_FLAG(output)) || + ParseBoolFlag(arg, kPrintTimeFlag, >EST_FLAG(print_time)) || + ParseBoolFlag(arg, kPrintUTF8Flag, >EST_FLAG(print_utf8)) || + ParseInt32Flag(arg, kRandomSeedFlag, >EST_FLAG(random_seed)) || + ParseInt32Flag(arg, kRepeatFlag, >EST_FLAG(repeat)) || + ParseBoolFlag(arg, kShuffleFlag, >EST_FLAG(shuffle)) || + ParseInt32Flag(arg, kStackTraceDepthFlag, + >EST_FLAG(stack_trace_depth)) || + ParseStringFlag(arg, kStreamResultToFlag, + >EST_FLAG(stream_result_to)) || + ParseBoolFlag(arg, kThrowOnFailureFlag, + >EST_FLAG(throw_on_failure)); +} + +#if GTEST_USE_OWN_FLAGFILE_FLAG_ +static void LoadFlagsFromFile(const std::string& path) { + FILE* flagfile = posix::FOpen(path.c_str(), "r"); + if (!flagfile) { + GTEST_LOG_(FATAL) << "Unable to open file \"" << GTEST_FLAG(flagfile) + << "\""; + } + std::string contents(ReadEntireFile(flagfile)); + posix::FClose(flagfile); + std::vector lines; + SplitString(contents, '\n', &lines); + for (size_t i = 0; i < lines.size(); ++i) { + if (lines[i].empty()) + continue; + if (!ParseGoogleTestFlag(lines[i].c_str())) + g_help_flag = true; + } +} +#endif // GTEST_USE_OWN_FLAGFILE_FLAG_ + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. The type parameter CharType can be +// instantiated to either char or wchar_t. +template +void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { + for (int i = 1; i < *argc; i++) { + const std::string arg_string = StreamableToString(argv[i]); + const char* const arg = arg_string.c_str(); + + using internal::ParseBoolFlag; + using internal::ParseInt32Flag; + using internal::ParseStringFlag; + + bool remove_flag = false; + if (ParseGoogleTestFlag(arg)) { + remove_flag = true; +#if GTEST_USE_OWN_FLAGFILE_FLAG_ + } else if (ParseStringFlag(arg, kFlagfileFlag, >EST_FLAG(flagfile))) { + LoadFlagsFromFile(GTEST_FLAG(flagfile)); + remove_flag = true; +#endif // GTEST_USE_OWN_FLAGFILE_FLAG_ + } else if (arg_string == "--help" || arg_string == "-h" || + arg_string == "-?" || arg_string == "/?" || + HasGoogleTestFlagPrefix(arg)) { + // Both help flag and unrecognized Google Test flags (excluding + // internal ones) trigger help display. + g_help_flag = true; + } + + if (remove_flag) { + // Shift the remainder of the argv list left by one. Note + // that argv has (*argc + 1) elements, the last one always being + // NULL. The following loop moves the trailing NULL element as + // well. + for (int j = i; j != *argc; j++) { + argv[j] = argv[j + 1]; + } + + // Decrements the argument count. + (*argc)--; + + // We also need to decrement the iterator as we just removed + // an element. + i--; + } + } + + if (g_help_flag) { + // We print the help here instead of in RUN_ALL_TESTS(), as the + // latter may not be called at all if the user is using Google + // Test with another testing framework. + PrintColorEncoded(kColorEncodedHelpMessage); + } +} + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +void ParseGoogleTestFlagsOnly(int* argc, char** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); + + // Fix the value of *_NSGetArgc() on macOS, but if and only if + // *_NSGetArgv() == argv + // Only applicable to char** version of argv +#if GTEST_OS_MAC +#ifndef GTEST_OS_IOS + if (*_NSGetArgv() == argv) { + *_NSGetArgc() = *argc; + } +#endif +#endif +} +void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} + +// The internal implementation of InitGoogleTest(). +// +// The type parameter CharType can be instantiated to either char or +// wchar_t. +template +void InitGoogleTestImpl(int* argc, CharType** argv) { + // We don't want to run the initialization code twice. + if (GTestIsInitialized()) return; + + if (*argc <= 0) return; + + g_argvs.clear(); + for (int i = 0; i != *argc; i++) { + g_argvs.push_back(StreamableToString(argv[i])); + } + +#if GTEST_HAS_ABSL + absl::InitializeSymbolizer(g_argvs[0].c_str()); +#endif // GTEST_HAS_ABSL + + ParseGoogleTestFlagsOnly(argc, argv); + GetUnitTestImpl()->PostFlagParsingInit(); +} + +} // namespace internal + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +void InitGoogleTest(int* argc, char** argv) { +#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) + GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv); +#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) + internal::InitGoogleTestImpl(argc, argv); +#endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) +} + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +void InitGoogleTest(int* argc, wchar_t** argv) { +#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) + GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv); +#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) + internal::InitGoogleTestImpl(argc, argv); +#endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) +} + +// This overloaded version can be used on Arduino/embedded platforms where +// there is no argc/argv. +void InitGoogleTest() { + // Since Arduino doesn't have a command line, fake out the argc/argv arguments + int argc = 1; + const auto arg0 = "dummy"; + char* argv0 = const_cast(arg0); + char** argv = &argv0; + +#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) + GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(&argc, argv); +#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) + internal::InitGoogleTestImpl(&argc, argv); +#endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) +} + +std::string TempDir() { +#if defined(GTEST_CUSTOM_TEMPDIR_FUNCTION_) + return GTEST_CUSTOM_TEMPDIR_FUNCTION_(); +#endif + +#if GTEST_OS_WINDOWS_MOBILE + return "\\temp\\"; +#elif GTEST_OS_WINDOWS + const char* temp_dir = internal::posix::GetEnv("TEMP"); + if (temp_dir == nullptr || temp_dir[0] == '\0') + return "\\temp\\"; + else if (temp_dir[strlen(temp_dir) - 1] == '\\') + return temp_dir; + else + return std::string(temp_dir) + "\\"; +#elif GTEST_OS_LINUX_ANDROID + const char* temp_dir = internal::posix::GetEnv("TEST_TMPDIR"); + if (temp_dir == nullptr || temp_dir[0] == '\0') + return "/data/local/tmp/"; + else + return temp_dir; +#else + return "/tmp/"; +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Class ScopedTrace + +// Pushes the given source file location and message onto a per-thread +// trace stack maintained by Google Test. +void ScopedTrace::PushTrace(const char* file, int line, std::string message) { + internal::TraceInfo trace; + trace.file = file; + trace.line = line; + trace.message.swap(message); + + UnitTest::GetInstance()->PushGTestTrace(trace); +} + +// Pops the info pushed by the c'tor. +ScopedTrace::~ScopedTrace() + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { + UnitTest::GetInstance()->PopGTestTrace(); +} + +} // namespace testing diff --git a/source/3rdparty/gtest/src/gtest_main.cc b/source/3rdparty/gtest/src/gtest_main.cc new file mode 100644 index 0000000..46b27c3 --- /dev/null +++ b/source/3rdparty/gtest/src/gtest_main.cc @@ -0,0 +1,54 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include "gtest/gtest.h" + +#if GTEST_OS_ESP8266 || GTEST_OS_ESP32 +#if GTEST_OS_ESP8266 +extern "C" { +#endif +void setup() { + testing::InitGoogleTest(); +} + +void loop() { RUN_ALL_TESTS(); } + +#if GTEST_OS_ESP8266 +} +#endif + +#else + +GTEST_API_ int main(int argc, char **argv) { + printf("Running main() from %s\n", __FILE__); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} +#endif diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index 4f65c00..28fd5d3 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -82,11 +82,13 @@ if (KYTY_LINKER STREQUAL LD) set(KYTY_LD_OPTIONS "-Wl,--image-base=0x100000000000") endif() -project(Kyty${KYTY_PROJECT_NAME}${CMAKE_BUILD_TYPE}${KYTY_COMPILER} VERSION 0.0.13) +project(Kyty${KYTY_PROJECT_NAME}${CMAKE_BUILD_TYPE}${KYTY_COMPILER} VERSION 0.0.14) include(src_script.cmake) include_directories( + 3rdparty/gtest/include + 3rdparty/gtest 3rdparty/lua/include 3rdparty/rijndael/source 3rdparty/miniz @@ -121,15 +123,16 @@ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0.0) #scripts_obj #sys_obj launcher + unit_test ) list(APPEND KYTY_CLANG_TYDY emulator_obj core_obj - #core #math_obj #scripts_obj #sys_obj launcher + unit_test ) endif() @@ -139,6 +142,7 @@ config_compiler_and_linker() add_subdirectory(3rdparty) add_subdirectory(emulator) +add_subdirectory(unit_test) if(NOT ${KYTY_PROJECT_NAME} MATCHES "Build_Tools") add_subdirectory(launcher) endif() @@ -146,6 +150,15 @@ add_subdirectory(lib) add_executable(fc_script ${KYTY_SCRIPT_SRC}) +if(MINGW) + SET(UNIT_TEST_LIB -Wl,--whole-archive unit_test -Wl,--no-whole-archive) +endif() + +if(MSVC) + SET(UNIT_TEST_LIB unit_test) +endif() + +target_link_libraries(fc_script ${UNIT_TEST_LIB}) target_link_libraries(fc_script core) target_link_libraries(fc_script sys) target_link_libraries(fc_script math) @@ -163,6 +176,7 @@ target_link_libraries(fc_script zstd) target_link_libraries(fc_script easy_profiler) target_link_libraries(fc_script ws2_32) target_link_libraries(fc_script psapi) +target_link_libraries(fc_script cpuinfo) if (CLANG AND NOT MSVC) target_link_libraries(fc_script pthread) endif() diff --git a/source/KytyScripts.cpp b/source/KytyScripts.cpp index 08c7284..25e1a21 100644 --- a/source/KytyScripts.cpp +++ b/source/KytyScripts.cpp @@ -11,6 +11,7 @@ #include "Kyty/Scripts/Scripts.h" #include "Kyty/Sys/SysDbg.h" #include "Kyty/Sys/SysSync.h" +#include "Kyty/UnitTest.h" #include "Emulator/Emulator.h" @@ -22,6 +23,7 @@ using namespace Math; using namespace Scripts; using namespace Emulator; using namespace BuildTools; +using namespace UnitTest; #if KYTY_PLATFORM == KYTY_PLATFORM_ANDROID #error "can't compile for android" @@ -100,8 +102,9 @@ int main(int argc, char* argv[]) slist.SetArgs(argc, argv); - auto Scripts = ScriptsSubsystem::Instance(); - auto Core = CoreSubsystem::Instance(); + auto Scripts = ScriptsSubsystem::Instance(); + auto Core = CoreSubsystem::Instance(); + auto UnitTest = UnitTestSubsystem::Instance(); #if KYTY_PROJECT != KYTY_PROJECT_BUILD_TOOLS auto Math = MathSubsystem::Instance(); auto SDL = SDLSubsystem::Instance(); @@ -119,6 +122,7 @@ int main(int argc, char* argv[]) slist.Add(Emulator, {Core, Scripts}); #endif slist.Add(BuildTools, {Core, Scripts}); + slist.Add(UnitTest, {Core}); if (!slist.InitAll(false)) { diff --git a/source/emulator/CMakeLists.txt b/source/emulator/CMakeLists.txt index d884480..b60e74d 100644 --- a/source/emulator/CMakeLists.txt +++ b/source/emulator/CMakeLists.txt @@ -15,7 +15,7 @@ endif() add_library(emulator_obj OBJECT ${emulator_src}) add_library(emulator STATIC $) -target_link_libraries(emulator core math scripts lua vulkan-1 spirv-tools-opt spirv-tools easy_profiler) +target_link_libraries(emulator core math scripts lua unit_test vulkan-1 spirv-tools-opt spirv-tools easy_profiler) target_include_directories(emulator PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") @@ -26,6 +26,7 @@ list(APPEND inc_headers ${CMAKE_SOURCE_DIR}/3rdparty/vulkan/include ${CMAKE_SOURCE_DIR}/3rdparty/easy_profiler/include ${CMAKE_SOURCE_DIR}/3rdparty/xxhash/include + ${CMAKE_SOURCE_DIR}/3rdparty/cpuinfo/include ) if (MSVC AND CLANG) diff --git a/source/emulator/include/Emulator/Audio.h b/source/emulator/include/Emulator/Audio.h index a10dbc7..11997e1 100644 --- a/source/emulator/include/Emulator/Audio.h +++ b/source/emulator/include/Emulator/Audio.h @@ -12,6 +12,17 @@ namespace Kyty::Libs::Audio { KYTY_SUBSYSTEM_DEFINE(Audio); +namespace AudioOut { + +struct AudioOutOutputParam; + +int KYTY_SYSV_ABI AudioOutInit(); +int KYTY_SYSV_ABI AudioOutOpen(int user_id, int type, int index, uint32_t len, uint32_t freq, uint32_t param); +int KYTY_SYSV_ABI AudioOutSetVolume(int handle, uint32_t flag, int* vol); +int KYTY_SYSV_ABI AudioOutOutputs(AudioOutOutputParam* param, uint32_t num); + +} // namespace AudioOut + namespace VoiceQoS { int KYTY_SYSV_ABI VoiceQoSInit(void* mem_block, uint32_t mem_size, int32_t app_type); diff --git a/source/emulator/include/Emulator/Libs/Errno.h b/source/emulator/include/Emulator/Libs/Errno.h index 4c1352e..e2aae23 100644 --- a/source/emulator/include/Emulator/Libs/Errno.h +++ b/source/emulator/include/Emulator/Libs/Errno.h @@ -14,7 +14,7 @@ constexpr int OK = 0; auto result = func; \ if (result != OK) \ { \ - *GetErrorAddr() = LibKernel::KernelToPosix(result); \ + *Posix::GetErrorAddr() = LibKernel::KernelToPosix(result); \ return -1; \ } \ return 0; \ @@ -39,7 +39,7 @@ constexpr int OK = 0; auto result = func; \ if (result != OK) \ { \ - *GetErrorAddr() = Network::NetToPosix(result); \ + *Posix::GetErrorAddr() = Network::NetToPosix(result); \ return -1; \ } \ return 0; \ @@ -322,6 +322,32 @@ constexpr int VIDEO_OUT_ERROR_ENOMEM = -2144792564; /* } // namespace VideoOut +namespace Audio { + +constexpr int AUDIO_OUT_ERROR_NOT_OPENED = -2144993279; /* 0x80260001 */ +constexpr int AUDIO_OUT_ERROR_BUSY = -2144993278; /* 0x80260002 */ +constexpr int AUDIO_OUT_ERROR_INVALID_PORT = -2144993277; /* 0x80260003 */ +constexpr int AUDIO_OUT_ERROR_INVALID_POINTER = -2144993276; /* 0x80260004 */ +constexpr int AUDIO_OUT_ERROR_PORT_FULL = -2144993275; /* 0x80260005 */ +constexpr int AUDIO_OUT_ERROR_INVALID_SIZE = -2144993274; /* 0x80260006 */ +constexpr int AUDIO_OUT_ERROR_INVALID_FORMAT = -2144993273; /* 0x80260007 */ +constexpr int AUDIO_OUT_ERROR_INVALID_SAMPLE_FREQ = -2144993272; /* 0x80260008 */ +constexpr int AUDIO_OUT_ERROR_INVALID_VOLUME = -2144993271; /* 0x80260009 */ +constexpr int AUDIO_OUT_ERROR_INVALID_PORT_TYPE = -2144993270; /* 0x8026000A */ +constexpr int AUDIO_OUT_ERROR_INVALID_CONF_TYPE = -2144993268; /* 0x8026000C */ +constexpr int AUDIO_OUT_ERROR_OUT_OF_MEMORY = -2144993267; /* 0x8026000D */ +constexpr int AUDIO_OUT_ERROR_ALREADY_INIT = -2144993266; /* 0x8026000E */ +constexpr int AUDIO_OUT_ERROR_NOT_INIT = -2144993265; /* 0x8026000F */ +constexpr int AUDIO_OUT_ERROR_MEMORY = -2144993264; /* 0x80260010 */ +constexpr int AUDIO_OUT_ERROR_SYSTEM_RESOURCE = -2144993263; /* 0x80260011 */ +constexpr int AUDIO_OUT_ERROR_TRANS_EVENT = -2144993262; /* 0x80260012 */ +constexpr int AUDIO_OUT_ERROR_INVALID_FLAG = -2144993261; /* 0x80260013 */ +constexpr int AUDIO_OUT_ERROR_INVALID_MIXLEVEL = -2144993260; /* 0x80260014 */ +constexpr int AUDIO_OUT_ERROR_INVALID_ARG = -2144993259; /* 0x80260015 */ +constexpr int AUDIO_OUT_ERROR_INVALID_PARAM = -2144993258; /* 0x80260016 */ + +} // namespace Audio + namespace SystemService { constexpr int SYSTEM_SERVICE_ERROR_INTERNAL = -2136932351; /* 0x80A10001 */ @@ -588,6 +614,34 @@ constexpr int USER_SERVICE_ERROR_BUFFER_TOO_SHORT = -2137653238; /* 0x809 } // namespace UserService +namespace SaveData { + +constexpr int SAVE_DATA_ERROR_PARAMETER = -2137063424; /* 0x809F0000 */ +constexpr int SAVE_DATA_ERROR_NOT_INITIALIZED = -2137063423; /* 0x809F0001 */ +constexpr int SAVE_DATA_ERROR_OUT_OF_MEMORY = -2137063422; /* 0x809F0002 */ +constexpr int SAVE_DATA_ERROR_BUSY = -2137063421; /* 0x809F0003 */ +constexpr int SAVE_DATA_ERROR_NOT_MOUNTED = -2137063420; /* 0x809F0004 */ +constexpr int SAVE_DATA_ERROR_NO_PERMISSION = -2137063419; /* 0x809F0005 */ +constexpr int SAVE_DATA_ERROR_FINGERPRINT_MISMATCH = -2137063418; /* 0x809F0006 */ +constexpr int SAVE_DATA_ERROR_EXISTS = -2137063417; /* 0x809F0007 */ +constexpr int SAVE_DATA_ERROR_NOT_FOUND = -2137063416; /* 0x809F0008 */ +constexpr int SAVE_DATA_ERROR_NO_SPACE_FS = -2137063414; /* 0x809F000A */ +constexpr int SAVE_DATA_ERROR_INTERNAL = -2137063413; /* 0x809F000B */ +constexpr int SAVE_DATA_ERROR_MOUNT_FULL = -2137063412; /* 0x809F000C */ +constexpr int SAVE_DATA_ERROR_BAD_MOUNTED = -2137063411; /* 0x809F000D */ +constexpr int SAVE_DATA_ERROR_FILE_NOT_FOUND = -2137063410; /* 0x809F000E */ +constexpr int SAVE_DATA_ERROR_BROKEN = -2137063409; /* 0x809F000F */ +constexpr int SAVE_DATA_ERROR_INVALID_LOGIN_USER = -2137063407; /* 0x809F0011 */ +constexpr int SAVE_DATA_ERROR_MEMORY_NOT_READY = -2137063406; /* 0x809F0012 */ +constexpr int SAVE_DATA_ERROR_BACKUP_BUSY = -2137063405; /* 0x809F0013 */ +constexpr int SAVE_DATA_ERROR_NOT_REGIST_CALLBACK = -2137063403; /* 0x809F0015 */ +constexpr int SAVE_DATA_ERROR_BUSY_FOR_SAVING = -2137063402; /* 0x809F0016 */ +constexpr int SAVE_DATA_ERROR_LIMITATION_OVER = -2137063401; /* 0x809F0017 */ +constexpr int SAVE_DATA_ERROR_EVENT_BUSY = -2137063400; /* 0x809F0018 */ +constexpr int SAVE_DATA_ERROR_PARAMSFO_TRANSFER_TITLE_ID_NOT_FOUND = -2137063399; /* 0x809F0019 */ + +} // namespace SaveData + } // namespace Kyty::Libs #endif // KYTY_EMU_ENABLED diff --git a/source/emulator/include/Emulator/Libs/Printf.h b/source/emulator/include/Emulator/Libs/Printf.h index 0ad2709..24b604e 100644 --- a/source/emulator/include/Emulator/Libs/Printf.h +++ b/source/emulator/include/Emulator/Libs/Printf.h @@ -12,13 +12,15 @@ namespace Kyty::Libs { struct VaContext; struct VaList; -using libc_print_func_t = KYTY_FORMAT_PRINTF(1, 2) KYTY_SYSV_ABI int (*)(const char* str, ...); -using libc_print_v_func_t = int (*)(VaContext* c); -using libc_vprint_func_t = int (*)(const char* str, VaList* c); +using libc_printf_std_func_t = KYTY_FORMAT_PRINTF(1, 2) KYTY_SYSV_ABI int (*)(const char* str, ...); +using libc_printf_ctx_func_t = int (*)(VaContext* c); +using libc_snprintf_ctx_func_t = int (*)(VaContext* c); +using libc_vprintf_func_t = int (*)(const char* str, VaList* c); -libc_print_func_t GetPrintFunc(); -libc_print_v_func_t GetPrintFuncV(); -libc_vprint_func_t GetVPrintFunc(); +libc_printf_std_func_t GetPrintfStdFunc(); +libc_printf_ctx_func_t GetPrintfCtxFunc(); +libc_snprintf_ctx_func_t GetSnrintfCtxFunc(); +libc_vprintf_func_t GetVprintfFunc(); } // namespace Kyty::Libs diff --git a/source/emulator/include/Emulator/Network.h b/source/emulator/include/Emulator/Network.h index 39e4941..9229c9d 100644 --- a/source/emulator/include/Emulator/Network.h +++ b/source/emulator/include/Emulator/Network.h @@ -74,6 +74,12 @@ int KYTY_SYSV_ABI NpRegisterPlusEventCallback(void* callback, void* userdata); } // namespace NpManager +namespace NpManagerForToolkit { + +int KYTY_SYSV_ABI NpRegisterStateCallbackForToolkit(void* callback, void* userdata); + +} // namespace NpManagerForToolkit + namespace NpTrophy { int KYTY_SYSV_ABI NpTrophyCreateHandle(int* handle); diff --git a/source/emulator/include/Emulator/RuntimeLinker.h b/source/emulator/include/Emulator/RuntimeLinker.h index 9f140df..8ba5b06 100644 --- a/source/emulator/include/Emulator/RuntimeLinker.h +++ b/source/emulator/include/Emulator/RuntimeLinker.h @@ -135,6 +135,7 @@ public: Program* LoadProgram(const String& elf_name); void SaveMainProgram(const String& elf_name); + void SaveProgram(Program* program, const String& elf_name); void UnloadProgram(Program* program); [[nodiscard]] uint64_t GetEntry(); diff --git a/source/emulator/include/Emulator/VirtualMemory.h b/source/emulator/include/Emulator/VirtualMemory.h index 058bfa0..bb14ec4 100644 --- a/source/emulator/include/Emulator/VirtualMemory.h +++ b/source/emulator/include/Emulator/VirtualMemory.h @@ -2,6 +2,7 @@ #define EMULATOR_INCLUDE_EMULATOR_VIRTUALMEMORY_H_ #include "Kyty/Core/Common.h" +#include "Kyty/Core/String.h" #include "Emulator/Common.h" @@ -26,6 +27,7 @@ struct SystemInfo uint32_t AllocationGranularity; uint16_t ProcessorLevel; uint16_t ProcessorRevision; + String ProcessorName; }; SystemInfo GetSystemInfo(); @@ -94,6 +96,8 @@ inline bool IsExecute(Mode mode) return (mode == Mode::Execute || mode == Mode::ExecuteRead || mode == Mode::ExecuteWrite || mode == Mode::ExecuteReadWrite); } +void Init(); + uint64_t Alloc(uint64_t address, uint64_t size, Mode mode); uint64_t AllocAligned(uint64_t address, uint64_t size, Mode mode, uint64_t alignment); bool Free(uint64_t address); diff --git a/source/emulator/src/Audio.cpp b/source/emulator/src/Audio.cpp index 1344861..c5daa45 100644 --- a/source/emulator/src/Audio.cpp +++ b/source/emulator/src/Audio.cpp @@ -2,9 +2,11 @@ #include "Kyty/Core/Common.h" #include "Kyty/Core/DbgAssert.h" +#include "Kyty/Core/MagicEnum.h" #include "Kyty/Core/String.h" #include "Kyty/Core/Threads.h" +#include "Emulator/Kernel/Pthread.h" #include "Emulator/Libs/Errno.h" #include "Emulator/Libs/Libs.h" @@ -15,13 +17,75 @@ namespace Kyty::Libs::Audio { class Audio { public: + enum class Format + { + Unknown, + Signed16bitMono, + Signed16bitStereo, + Signed16bit8Ch, + FloatMono, + FloatStereo, + Float8Ch, + Signed16bit8ChStd, + Float8ChStd, + }; + + class Id + { + public: + explicit Id(int id): m_id(id - 1) {} + [[nodiscard]] int ToInt() const { return m_id + 1; } + [[nodiscard]] bool IsValid() const { return m_id >= 0; } + + friend class Audio; + + private: + Id() = default; + static Id Invalid() { return Id(); } + static Id Create(int audio_id) + { + Id r; + r.m_id = audio_id; + return r; + } + [[nodiscard]] int GetId() const { return m_id; } + + int m_id = -1; + }; + + struct OutputParam + { + Id handle; + const void* data = nullptr; + }; + Audio() = default; virtual ~Audio() = default; KYTY_CLASS_NO_COPY(Audio); + Id AudioOutOpen(int type, uint32_t samples_num, uint32_t freq, Format format); + bool AudioOutValid(Id handle); + bool AudioOutSetVolume(Id handle, uint32_t bitflag, const int* volume); + uint32_t AudioOutOutputs(OutputParam* params, uint32_t num); + + static constexpr int PORTS_MAX = 32; + private: + struct Port + { + bool used = false; + int type = 0; + uint32_t samples_num = 0; + uint32_t freq = 0; + Format format = Format::Unknown; + uint64_t last_output_time = 0; + int channels_num = 0; + int volume[8] = {}; + }; + Core::Mutex m_mutex; + Port m_ports[PORTS_MAX]; }; static Audio* g_audio = nullptr; @@ -37,6 +101,230 @@ KYTY_SUBSYSTEM_UNEXPECTED_SHUTDOWN(Audio) {} KYTY_SUBSYSTEM_DESTROY(Audio) {} +Audio::Id Audio::AudioOutOpen(int type, uint32_t samples_num, uint32_t freq, Format format) +{ + Core::LockGuard lock(m_mutex); + + for (int id = 0; id < PORTS_MAX; id++) + { + if (!m_ports[id].used) + { + auto& port = m_ports[id]; + + port.used = true; + port.type = type; + port.samples_num = samples_num; + port.freq = freq; + port.format = format; + port.last_output_time = 0; + + switch (format) + { + case Format::Signed16bitMono: + case Format::FloatMono: port.channels_num = 1; break; + case Format::Signed16bitStereo: + case Format::FloatStereo: port.channels_num = 2; break; + case Format::Signed16bit8Ch: + case Format::Float8Ch: + case Format::Signed16bit8ChStd: + case Format::Float8ChStd: port.channels_num = 8; break; + default: EXIT("unknown format"); + } + + for (int i = 0; i < port.channels_num; i++) + { + port.volume[i] = 32768; + } + + return Id::Create(id); + } + } + + return Id::Invalid(); +} + +bool Audio::AudioOutValid(Id handle) +{ + Core::LockGuard lock(m_mutex); + + return (handle.GetId() >= 0 && handle.GetId() < PORTS_MAX && m_ports[handle.GetId()].used); +} + +bool Audio::AudioOutSetVolume(Id handle, uint32_t bitflag, const int* volume) +{ + Core::LockGuard lock(m_mutex); + + if (AudioOutValid(handle)) + { + auto& port = m_ports[handle.GetId()]; + + for (int i = 0; i < port.channels_num; i++, bitflag >>= 1u) + { + auto bit = bitflag & 0x1u; + + if (bit == 1) + { + int src_index = i; + if (port.format == Format::Float8ChStd || port.format == Format::Signed16bit8ChStd) + { + switch (i) + { + case 4: src_index = 6; break; + case 5: src_index = 7; break; + case 6: src_index = 4; break; + case 7: src_index = 5; break; + default:; + } + } + port.volume[i] = volume[src_index]; + + printf("\t port.volume[%d] = volume[%d] (%d)\n", i, src_index, volume[src_index]); + } + } + + return true; + } + + return false; +} + +uint32_t Audio::AudioOutOutputs(OutputParam* params, uint32_t num) +{ + EXIT_NOT_IMPLEMENTED(num == 0); + EXIT_NOT_IMPLEMENTED(!AudioOutValid(params[0].handle)); + + const auto& first_port = m_ports[params[0].handle.GetId()]; + + uint64_t block_time = (1000000 * first_port.samples_num) / first_port.freq; + uint64_t current_time = LibKernel::KernelGetProcessTime(); + + uint64_t max_wait_time = 0; + + for (uint32_t i = 0; i < num; i++) + { + uint64_t next_time = m_ports[params[i].handle.GetId()].last_output_time + block_time; + uint64_t wait_time = (next_time > current_time ? next_time - current_time : 0); + max_wait_time = (wait_time > max_wait_time ? wait_time : max_wait_time); + } + + // Audio output is not yet implemented, so simulate audio delay + Core::Thread::SleepMicro(max_wait_time); + + for (uint32_t i = 0; i < num; i++) + { + m_ports[params[i].handle.GetId()].last_output_time = LibKernel::KernelGetProcessTime(); + } + + return first_port.samples_num; +} + +namespace AudioOut { + +LIB_NAME("AudioOut", "AudioOut"); + +struct AudioOutOutputParam +{ + int handle; + const void* ptr; +}; + +int KYTY_SYSV_ABI AudioOutInit() +{ + PRINT_NAME(); + + return OK; +} + +int KYTY_SYSV_ABI AudioOutOpen(int user_id, int type, int index, uint32_t len, uint32_t freq, uint32_t param) +{ + PRINT_NAME(); + + printf("\t user_id = %d\n", user_id); + printf("\t type = %d\n", type); + printf("\t index = %d\n", index); + printf("\t len = %u\n", len); + printf("\t freq = %u\n", freq); + + EXIT_NOT_IMPLEMENTED(user_id != 255); + EXIT_NOT_IMPLEMENTED(type != 0); + EXIT_NOT_IMPLEMENTED(index != 0); + + Audio::Format format = Audio::Format::Unknown; + + switch (param) + { + case 0: format = Audio::Format::Signed16bitMono; break; + case 1: format = Audio::Format::Signed16bitStereo; break; + case 2: format = Audio::Format::Signed16bit8Ch; break; + case 3: format = Audio::Format::FloatMono; break; + case 4: format = Audio::Format::FloatStereo; break; + case 5: format = Audio::Format::Float8Ch; break; + case 6: format = Audio::Format::Signed16bit8ChStd; break; + case 7: format = Audio::Format::Float8ChStd; break; + default:; + } + + printf("\t param = %u (%s)\n", param, Core::EnumName(format).C_Str()); + + EXIT_NOT_IMPLEMENTED(format == Audio::Format::Unknown); + + EXIT_IF(g_audio == nullptr); + + auto id = g_audio->AudioOutOpen(type, len, freq, format); + + if (!id.IsValid()) + { + return AUDIO_OUT_ERROR_PORT_FULL; + } + + return id.ToInt(); +} + +int KYTY_SYSV_ABI AudioOutSetVolume(int handle, uint32_t flag, int* vol) +{ + PRINT_NAME(); + + printf("\t handle = %d\n", handle); + printf("\t flag = %u\n", flag); + + EXIT_IF(g_audio == nullptr); + EXIT_NOT_IMPLEMENTED(vol == nullptr); + + if (!g_audio->AudioOutSetVolume(Audio::Id(handle), flag, vol)) + { + return AUDIO_OUT_ERROR_INVALID_PORT; + } + + return OK; +} + +int KYTY_SYSV_ABI AudioOutOutputs(AudioOutOutputParam* param, uint32_t num) +{ + PRINT_NAME(); + + EXIT_NOT_IMPLEMENTED(param == nullptr); + EXIT_NOT_IMPLEMENTED(num != 1); + + Audio::OutputParam params[Audio::PORTS_MAX]; + + EXIT_IF(g_audio == nullptr); + + for (uint32_t i = 0; i < num; i++) + { + params[i].handle = Audio::Id(param[i].handle); + params[i].data = param[i].ptr; + + if (!g_audio->AudioOutValid(params[i].handle)) + { + return AUDIO_OUT_ERROR_INVALID_PORT; + } + } + + return static_cast(g_audio->AudioOutOutputs(params, num)); +} + +} // namespace AudioOut + namespace VoiceQoS { LIB_NAME("VoiceQoS", "VoiceQoS"); diff --git a/source/emulator/src/Graphics/Tile.cpp b/source/emulator/src/Graphics/Tile.cpp index 33de751..640657e 100644 --- a/source/emulator/src/Graphics/Tile.cpp +++ b/source/emulator/src/Graphics/Tile.cpp @@ -476,22 +476,32 @@ void TileGetDepthSize(uint32_t width, uint32_t height, uint32_t z_format, uint32 }; static const DepthInfo infos_base[] = { + {3840, 2160, 3, 0, true, false, 3840, {0, 0}, {655360, 2048}, {33423360, 32768}}, + {3840, 2160, 3, 0, false, false, 3840, {0, 0}, {0, 0}, {33423360, 32768}}, {1920, 1080, 3, 0, true, false, 2048, {0, 0}, {196608, 2048}, {9437184, 32768}}, {1920, 1080, 3, 0, false, false, 2048, {0, 0}, {0, 0}, {9437184, 32768}}, {1280, 720, 3, 0, true, false, 1280, {0, 0}, {98304, 2048}, {3932160, 32768}}, {1280, 720, 3, 0, false, false, 1280, {0, 0}, {0, 0}, {3932160, 32768}}, + {3840, 2160, 1, 0, true, false, 3840, {0, 0}, {655360, 2048}, {16711680, 32768}}, + {3840, 2160, 1, 0, false, false, 3840, {0, 0}, {0, 0}, {16711680, 32768}}, {1920, 1080, 1, 0, true, false, 2048, {0, 0}, {196608, 2048}, {4718592, 32768}}, {1920, 1080, 1, 0, false, false, 2048, {0, 0}, {0, 0}, {4718592, 32768}}, {1280, 720, 1, 0, true, false, 1280, {0, 0}, {98304, 2048}, {1966080, 32768}}, {1280, 720, 1, 0, false, false, 1280, {0, 0}, {0, 0}, {1966080, 32768}}, + {3840, 2160, 0, 1, true, false, 3840, {8355840, 32768}, {655360, 2048}, {0, 0}}, + {3840, 2160, 0, 1, false, false, 3840, {8355840, 32768}, {0, 0}, {0, 0}}, {1920, 1080, 0, 1, true, false, 2048, {2359296, 32768}, {196608, 2048}, {0, 0}}, {1920, 1080, 0, 1, false, false, 2048, {2359296, 32768}, {0, 0}, {0, 0}}, {1280, 720, 0, 1, true, false, 1280, {983040, 32768}, {98304, 2048}, {0, 0}}, {1280, 720, 0, 1, false, false, 1280, {983040, 32768}, {0, 0}, {0, 0}}, + {3840, 2160, 3, 1, true, false, 3840, {8355840, 32768}, {655360, 2048}, {33423360, 32768}}, + {3840, 2160, 3, 1, false, false, 3840, {8355840, 32768}, {0, 0}, {33423360, 32768}}, {1920, 1080, 3, 1, true, false, 2048, {2359296, 32768}, {196608, 2048}, {9437184, 32768}}, {1920, 1080, 3, 1, false, false, 2048, {2359296, 32768}, {0, 0}, {9437184, 32768}}, {1280, 720, 3, 1, true, false, 1280, {983040, 32768}, {98304, 2048}, {3932160, 32768}}, {1280, 720, 3, 1, false, false, 1280, {983040, 32768}, {0, 0}, {3932160, 32768}}, + {3840, 2160, 1, 1, true, false, 3840, {8355840, 32768}, {655360, 2048}, {16711680, 32768}}, + {3840, 2160, 1, 1, false, false, 3840, {8355840, 32768}, {0, 0}, {16711680, 32768}}, {1920, 1080, 1, 1, true, false, 2048, {2359296, 32768}, {196608, 2048}, {4718592, 32768}}, {1920, 1080, 1, 1, false, false, 2048, {2359296, 32768}, {0, 0}, {4718592, 32768}}, {1280, 720, 1, 1, true, false, 1280, {983040, 32768}, {98304, 2048}, {1966080, 32768}}, @@ -499,22 +509,32 @@ void TileGetDepthSize(uint32_t width, uint32_t height, uint32_t z_format, uint32 }; static const DepthInfo infos_neo[] = { + {3840, 2160, 3, 0, true, true, 3840, {0, 0}, {655360, 4096}, {33423360, 65536}}, + {3840, 2160, 3, 0, false, true, 3840, {0, 0}, {0, 0}, {33423360, 65536}}, {1920, 1080, 3, 0, true, true, 1920, {0, 0}, {196608, 4096}, {8847360, 65536}}, {1920, 1080, 3, 0, false, true, 1920, {0, 0}, {0, 0}, {8847360, 65536}}, {1280, 720, 3, 0, true, true, 1280, {0, 0}, {131072, 4096}, {3932160, 65536}}, {1280, 720, 3, 0, false, true, 1280, {0, 0}, {0, 0}, {3932160, 65536}}, + {3840, 2160, 1, 0, true, true, 3840, {0, 0}, {655360, 4096}, {16711680, 65536}}, + {3840, 2160, 1, 0, false, true, 3840, {0, 0}, {0, 0}, {16711680, 65536}}, {1920, 1080, 1, 0, true, true, 2048, {0, 0}, {196608, 4096}, {4718592, 65536}}, {1920, 1080, 1, 0, false, true, 2048, {0, 0}, {0, 0}, {4718592, 65536}}, {1280, 720, 1, 0, true, true, 1280, {0, 0}, {131072, 4096}, {1966080, 65536}}, {1280, 720, 1, 0, false, true, 1280, {0, 0}, {0, 0}, {1966080, 65536}}, + {3840, 2160, 0, 1, true, true, 3840, {8355840, 32768}, {655360, 4096}, {0, 0}}, + {3840, 2160, 0, 1, false, true, 3840, {8355840, 32768}, {0, 0}, {0, 0}}, {1920, 1080, 0, 1, true, true, 2048, {2359296, 32768}, {196608, 4096}, {0, 0}}, {1920, 1080, 0, 1, false, true, 2048, {2359296, 32768}, {0, 0}, {0, 0}}, {1280, 720, 0, 1, true, true, 1280, {983040, 32768}, {131072, 4096}, {0, 0}}, {1280, 720, 0, 1, false, true, 1280, {983040, 32768}, {0, 0}, {0, 0}}, + {3840, 2160, 3, 1, true, true, 3840, {8355840, 32768}, {655360, 4096}, {33423360, 65536}}, + {3840, 2160, 3, 1, false, true, 3840, {8355840, 32768}, {0, 0}, {33423360, 65536}}, {1920, 1080, 3, 1, true, true, 2048, {2359296, 32768}, {196608, 4096}, {9437184, 65536}}, {1920, 1080, 3, 1, false, true, 2048, {2359296, 32768}, {0, 0}, {9437184, 65536}}, {1280, 720, 3, 1, true, true, 1280, {983040, 32768}, {131072, 4096}, {3932160, 65536}}, {1280, 720, 3, 1, false, true, 1280, {983040, 32768}, {0, 0}, {3932160, 65536}}, + {3840, 2160, 1, 1, true, true, 3840, {8355840, 32768}, {655360, 4096}, {16711680, 65536}}, + {3840, 2160, 1, 1, false, true, 3840, {8355840, 32768}, {0, 0}, {16711680, 65536}}, {1920, 1080, 1, 1, true, true, 2048, {2359296, 32768}, {196608, 4096}, {4718592, 65536}}, {1920, 1080, 1, 1, false, true, 2048, {2359296, 32768}, {0, 0}, {4718592, 65536}}, {1280, 720, 1, 1, true, true, 1280, {983040, 32768}, {131072, 4096}, {1966080, 65536}}, @@ -566,6 +586,27 @@ void TileGetVideoOutSize(uint32_t width, uint32_t height, bool tile, bool neo, u uint32_t ret_size = 0; uint32_t ret_pitch = 0; + if (width == 3840 && height == 2160 && tile && !neo) + { + ret_size = 33423360; + ret_pitch = 3840; + } + if (width == 3840 && height == 2160 && tile && neo) + { + ret_size = 33423360; + ret_pitch = 3840; + } + if (width == 3840 && height == 2160 && !tile && !neo) + { + ret_size = 33177600; + ret_pitch = 3840; + } + if (width == 3840 && height == 2160 && !tile && neo) + { + ret_size = 33177600; + ret_pitch = 3840; + } + if (width == 1920 && height == 1080 && tile && !neo) { ret_size = 8355840; @@ -586,6 +627,7 @@ void TileGetVideoOutSize(uint32_t width, uint32_t height, bool tile, bool neo, u ret_size = 8294400; ret_pitch = 1920; } + if (width == 1280 && height == 720 && tile && !neo) { ret_size = 3932160; diff --git a/source/emulator/src/Graphics/Window.cpp b/source/emulator/src/Graphics/Window.cpp index 1ba8191..5047306 100644 --- a/source/emulator/src/Graphics/Window.cpp +++ b/source/emulator/src/Graphics/Window.cpp @@ -16,6 +16,7 @@ #include "Emulator/Graphics/Utils.h" #include "Emulator/Graphics/VideoOut.h" #include "Emulator/Profiler.h" +#include "Emulator/VirtualMemory.h" #include "SDL.h" #include "SDL_error.h" @@ -230,6 +231,7 @@ struct WindowContext GameApi* game = nullptr; char device_name[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE] = {0}; + char processor_name[64] = {0}; Core::Mutex mutex; bool graphic_initialized = false; @@ -2016,6 +2018,7 @@ static void VulkanCreate(WindowContext* ctx) printf("Select device: %s\n", device_properties.deviceName); memcpy(ctx->device_name, device_properties.deviceName, sizeof(ctx->device_name)); + memcpy(ctx->processor_name, Loader::GetSystemInfo().ProcessorName.C_Str(), sizeof(ctx->processor_name)); ctx->graphic_ctx.device = VulkanCreateDevice(ctx->graphic_ctx.physical_device, ctx->surface, &r, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, @@ -2109,8 +2112,8 @@ void WindowShowFps() EXIT_IF(g_window_ctx == nullptr); EXIT_IF(g_window_ctx->game == nullptr); - auto fps = String::FromPrintf("[%s], frame: %d, fps: %f", g_window_ctx->device_name, g_window_ctx->game->m_frame_num, - g_window_ctx->game->m_current_fps); + auto fps = String::FromPrintf("[%s] [%s], frame: %d, fps: %f", g_window_ctx->device_name, g_window_ctx->processor_name, + g_window_ctx->game->m_frame_num, g_window_ctx->game->m_current_fps); SDL_SetWindowTitle(g_window_ctx->window, fps.C_Str()); } diff --git a/source/emulator/src/Kernel/Memory.cpp b/source/emulator/src/Kernel/Memory.cpp index d822cfb..820ca3e 100644 --- a/source/emulator/src/Kernel/Memory.cpp +++ b/source/emulator/src/Kernel/Memory.cpp @@ -90,6 +90,8 @@ KYTY_SUBSYSTEM_INIT(Memory) { g_physical_memory = new PhysicalMemory; g_flexible_memory = new FlexibleMemory; + + VirtualMemory::Init(); } KYTY_SUBSYSTEM_UNEXPECTED_SHUTDOWN(Memory) {} diff --git a/source/emulator/src/Kernel/Pthread.cpp b/source/emulator/src/Kernel/Pthread.cpp index 9c0f39b..5386894 100644 --- a/source/emulator/src/Kernel/Pthread.cpp +++ b/source/emulator/src/Kernel/Pthread.cpp @@ -2310,6 +2310,7 @@ int KYTY_SYSV_ABI KernelClockGettime(KernelClockid clock_id, KernelTimespec* tp) switch (clock_id) { case 0: pclock_id = CLOCK_REALTIME; break; + case 13: case 4: pclock_id = CLOCK_MONOTONIC; break; default: EXIT("unknown clock_id: %d", clock_id); } @@ -2419,7 +2420,7 @@ int KYTY_SYSV_ABI KernelNanosleep(const KernelTimespec* rqtp, KernelTimespec* rm { PRINT_NAME(); - if (rqtp == nullptr || rmtp == nullptr) + if (rqtp == nullptr) { return KERNEL_ERROR_EFAULT; } @@ -2438,7 +2439,11 @@ int KYTY_SYSV_ABI KernelNanosleep(const KernelTimespec* rqtp, KernelTimespec* rm Core::Thread::SleepNano(nanos); double ts = t.GetTimeS(); printf("\tactual: %g nanoseconds\n", ts * 1000000000.0); - sec_to_timespec(rmtp, ts); + + if (rmtp != nullptr) + { + sec_to_timespec(rmtp, ts); + } return OK; } @@ -2546,14 +2551,14 @@ int KYTY_SYSV_ABI pthread_cond_wait(LibKernel::PthreadCond* cond, LibKernel::Pth int KYTY_SYSV_ABI pthread_mutex_lock(LibKernel::PthreadMutex* mutex) { - PRINT_NAME(); + // PRINT_NAME(); return POSIX_PTHREAD_CALL(LibKernel::PthreadMutexLock(mutex)); } int KYTY_SYSV_ABI pthread_mutex_unlock(LibKernel::PthreadMutex* mutex) { - PRINT_NAME(); + // PRINT_NAME(); return POSIX_PTHREAD_CALL(LibKernel::PthreadMutexUnlock(mutex)); } diff --git a/source/emulator/src/Kyty.cpp b/source/emulator/src/Kyty.cpp index 7aa4d3e..2474272 100644 --- a/source/emulator/src/Kyty.cpp +++ b/source/emulator/src/Kyty.cpp @@ -1,4 +1,3 @@ -#include "Kyty/Core/Common.h" #include "Kyty/Core/Core.h" #include "Kyty/Core/DbgAssert.h" #include "Kyty/Core/MagicEnum.h" @@ -8,6 +7,7 @@ #include "Kyty/Core/Threads.h" #include "Kyty/Core/Vector.h" #include "Kyty/Scripts/Scripts.h" +#include "Kyty/UnitTest.h" #include "Emulator/Audio.h" #include "Emulator/Common.h" @@ -58,6 +58,7 @@ static void print_system_info() printf("AllocationGranularity = %" PRIu32 "\n", info.AllocationGranularity); printf("ProcessorLevel = %" PRIu16 "\n", info.ProcessorLevel); printf("ProcessorRevision = 0x%04" PRIx16 "\n", info.ProcessorRevision); + printf("ProcessorName = %s\n", info.ProcessorName.C_Str()); } static void kyty_close() @@ -144,7 +145,7 @@ KYTY_SCRIPT_FUNC(kyty_init_func) KYTY_SCRIPT_FUNC(kyty_load_elf_func) { - if (Scripts::ArgGetVarCount() != 1 && Scripts::ArgGetVarCount() != 2) + if (Scripts::ArgGetVarCount() != 1 && Scripts::ArgGetVarCount() != 2 && Scripts::ArgGetVarCount() != 3) { EXIT("invalid args\n"); } @@ -155,7 +156,7 @@ KYTY_SCRIPT_FUNC(kyty_load_elf_func) auto* program = rt->LoadProgram(Libs::LibKernel::FileSystem::GetRealFilename(elf.ToString())); - if (Scripts::ArgGetVarCount() == 2) + if (Scripts::ArgGetVarCount() >= 2) { if (Scripts::ArgGetVar(1).ToInteger() == 1) { @@ -163,6 +164,13 @@ KYTY_SCRIPT_FUNC(kyty_load_elf_func) } } + if (Scripts::ArgGetVarCount() >= 3) + { + auto save_name = Scripts::ArgGetVar(2).ToString(); + + rt->SaveProgram(program, Libs::LibKernel::FileSystem::GetRealFilename(save_name)); + } + return 0; } @@ -355,6 +363,16 @@ KYTY_SCRIPT_FUNC(kyty_shader_printf) return 0; } +KYTY_SCRIPT_FUNC(kyty_run_tests) +{ + if (!UnitTest::unit_test_all()) + { + EXIT("test failed\n"); + } + + return 0; +} + void kyty_help() {} } // namespace LuaFunc @@ -371,6 +389,7 @@ void kyty_reg() Scripts::RegisterFunc("kyty_mount", LuaFunc::kyty_mount_func, LuaFunc::kyty_help); Scripts::RegisterFunc("kyty_shader_disable", LuaFunc::kyty_shader_disable, LuaFunc::kyty_help); Scripts::RegisterFunc("kyty_shader_printf", LuaFunc::kyty_shader_printf, LuaFunc::kyty_help); + Scripts::RegisterFunc("kyty_run_tests", LuaFunc::kyty_run_tests, LuaFunc::kyty_help); } #else diff --git a/source/emulator/src/Libs/LibAudio.cpp b/source/emulator/src/Libs/LibAudio.cpp index 2dcbaa6..5d3c440 100644 --- a/source/emulator/src/Libs/LibAudio.cpp +++ b/source/emulator/src/Libs/LibAudio.cpp @@ -7,6 +7,22 @@ namespace Kyty::Libs { +namespace LibAudioOut { + +LIB_VERSION("AudioOut", 1, "AudioOut", 1, 1); + +namespace AudioOut = Audio::AudioOut; + +LIB_DEFINE(InitAudio_1_AudioOut) +{ + LIB_FUNC("JfEPXVxhFqA", AudioOut::AudioOutInit); + LIB_FUNC("ekNvsT22rsY", AudioOut::AudioOutOpen); + LIB_FUNC("b+uAV89IlxE", AudioOut::AudioOutSetVolume); + LIB_FUNC("w3PdaSTSwGE", AudioOut::AudioOutOutputs); +} + +} // namespace LibAudioOut + namespace LibVoiceQoS { LIB_VERSION("VoiceQoS", 1, "VoiceQoS", 0, 0); @@ -22,6 +38,7 @@ LIB_DEFINE(InitAudio_1_VoiceQoS) LIB_DEFINE(InitAudio_1) { + LibAudioOut::InitAudio_1_AudioOut(s); LibVoiceQoS::InitAudio_1_VoiceQoS(s); } diff --git a/source/emulator/src/Libs/LibC.cpp b/source/emulator/src/Libs/LibC.cpp index 9202723..3042fd3 100644 --- a/source/emulator/src/Libs/LibC.cpp +++ b/source/emulator/src/Libs/LibC.cpp @@ -1,6 +1,7 @@ #include "Kyty/Core/Common.h" #include "Kyty/Core/DbgAssert.h" #include "Kyty/Core/LinkList.h" +#include "Kyty/Core/MSpace.h" #include "Kyty/Core/Singleton.h" #include "Kyty/Core/String.h" @@ -59,21 +60,20 @@ static KYTY_SYSV_ABI int atexit(void (*func)()) return 0; } -static KYTY_SYSV_ABI int printf(VA_ARGS) +static KYTY_SYSV_ABI int libc_printf(VA_ARGS) { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,hicpp-member-init) - VA_CONTEXT(ctx); + VA_CONTEXT(ctx); // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init) PRINT_NAME(); - return GetPrintFuncV()(&ctx); + return GetPrintfCtxFunc()(&ctx); } static KYTY_SYSV_ABI int puts(const char* s) { PRINT_NAME(); - return GetPrintFunc()("%s\n", s); + return GetPrintfStdFunc()("%s\n", s); } static KYTY_SYSV_ABI void catchReturnFromMain(int status) @@ -161,7 +161,16 @@ int KYTY_SYSV_ABI vprintf(const char* str, VaList* c) { PRINT_NAME(); - return GetVPrintFunc()(str, c); + return GetVprintfFunc()(str, c); +} + +static KYTY_SYSV_ABI int snprintf(VA_ARGS) +{ + VA_CONTEXT(ctx); // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init) + + PRINT_NAME(); + + return GetSnrintfCtxFunc()(&ctx); } int KYTY_SYSV_ABI fflush(FILE* stream) @@ -180,6 +189,49 @@ void* KYTY_SYSV_ABI memset(void* s, int c, size_t n) return ::memset(s, c, n); } +void* KYTY_SYSV_ABI LibcMspaceCreate(const char* name, void* base, size_t capacity, uint32_t flag) +{ + PRINT_NAME(); + + printf("\t name = %s\n", name); + printf("\t base = %016" PRIx64 "\n", reinterpret_cast(base)); + printf("\t capacity = %016" PRIx64 "\n", capacity); + printf("\t flag = %u\n", flag); + + EXIT_NOT_IMPLEMENTED(flag != 0 && flag != 1); + EXIT_NOT_IMPLEMENTED(name == nullptr); + EXIT_NOT_IMPLEMENTED(base == nullptr); + EXIT_NOT_IMPLEMENTED(capacity == 0); + + bool thread_safe = true; + + if (flag == 1) + { + thread_safe = false; + } + + auto* msp = Core::MSpaceCreate(name, base, capacity, thread_safe, nullptr); + + EXIT_NOT_IMPLEMENTED(msp == nullptr); + + return msp; +} + +void* KYTY_SYSV_ABI LibcMspaceMalloc(void* msp, size_t size) +{ + PRINT_NAME(); + + printf("\t size = %016" PRIx64 "\n", size); + + auto* buf = Core::MSpaceMalloc(msp, size); + + printf("\t buf = %016" PRIx64 "\n", reinterpret_cast(buf)); + + EXIT_NOT_IMPLEMENTED(buf == nullptr); + + return buf; +} + LIB_DEFINE(InitLibcInternal_1) { LibcInternalExt::InitLibcInternalExt_1(s); @@ -190,8 +242,13 @@ LIB_DEFINE(InitLibcInternal_1) LIB_FUNC("GMpvxPFW924", LibcInternal::vprintf); LIB_FUNC("MUjC4lbHrK4", LibcInternal::fflush); LIB_FUNC("8zTFvBIAIN8", LibcInternal::memset); + LIB_FUNC("eLdDw6l0-bU", LibcInternal::snprintf); + LIB_FUNC("tsvEmnenz48", LibC::cxa_atexit); LIB_FUNC("H2e8t5ScQGc", LibC::cxa_finalize); + + LIB_FUNC("-hn1tcVHq5Q", LibcInternal::LibcMspaceCreate); + LIB_FUNC("OJjm-QOIHlI", LibcInternal::LibcMspaceMalloc); } } // namespace LibcInternal @@ -200,6 +257,8 @@ LIB_USING(LibC); LIB_DEFINE(InitLibC_1) { + EXIT("deprecated\n"); + LibcInternal::InitLibcInternal_1(s); LIB_OBJECT("P330P3dFF68", &LibC::g_need_flag); @@ -207,7 +266,7 @@ LIB_DEFINE(InitLibC_1) LIB_FUNC("uMei1W9uyNo", LibC::exit); LIB_FUNC("bzQExy189ZI", LibC::init_env); LIB_FUNC("8G2LB+A3rzg", LibC::atexit); - LIB_FUNC("hcuQgD53UxM", LibC::printf); + LIB_FUNC("hcuQgD53UxM", LibC::libc_printf); LIB_FUNC("YQ0navp+YIc", LibC::puts); LIB_FUNC("XKRegsFpEpk", LibC::catchReturnFromMain); LIB_FUNC("tsvEmnenz48", LibC::cxa_atexit); diff --git a/source/emulator/src/Libs/LibKernel.cpp b/source/emulator/src/Libs/LibKernel.cpp index c1938d1..fbfeb1c 100644 --- a/source/emulator/src/Libs/LibKernel.cpp +++ b/source/emulator/src/Libs/LibKernel.cpp @@ -410,6 +410,13 @@ int KYTY_SYSV_ABI KernelIsNeoMode() return (Config::IsNeo() ? 1 : 0); } +int KYTY_SYSV_ABI clock_gettime(int clock_id, LibKernel::KernelTimespec* time) +{ + PRINT_NAME(); + + return POSIX_CALL(LibKernel::KernelClockGettime(clock_id, time)); +} + } // namespace LibKernel namespace Posix { @@ -423,9 +430,17 @@ int KYTY_SYSV_ABI clock_gettime(int clock_id, LibKernel::KernelTimespec* time) return POSIX_CALL(LibKernel::KernelClockGettime(clock_id, time)); } +int KYTY_SYSV_ABI nanosleep(const LibKernel::KernelTimespec* rqtp, LibKernel::KernelTimespec* rmtp) +{ + PRINT_NAME(); + + return POSIX_CALL(LibKernel::KernelNanosleep(rqtp, rmtp)); +} + LIB_DEFINE(InitLibKernel_1_Posix) { LIB_FUNC("lLMT9vJAck0", clock_gettime); + LIB_FUNC("yS8U2TGCe1A", nanosleep); LIB_FUNC("7H0iTOciTLo", Posix::pthread_mutex_lock); LIB_FUNC("2Z+PpY6CaJg", Posix::pthread_mutex_unlock); @@ -543,6 +558,7 @@ LIB_DEFINE(InitLibKernel_1_Pthread) LIB_FUNC("WKAXJ4XBPQ4", LibKernel::PthreadCondWait); LIB_FUNC("JGgj7Uvrl+A", LibKernel::PthreadCondBroadcast); LIB_FUNC("BmMjYxmew1w", LibKernel::PthreadCondTimedwait); + LIB_FUNC("m5-2bsNfv7s", LibKernel::PthreadCondattrInit); LIB_FUNC("QBi7HCK03hw", LibKernel::KernelClockGettime); LIB_FUNC("ejekcaNQNq0", LibKernel::KernelGettimeofday); @@ -588,6 +604,7 @@ LIB_DEFINE(InitLibKernel_1) LIB_FUNC("WslcK1FQcGI", LibKernel::KernelIsNeoMode); LIB_FUNC("9BcDykPmo1I", LibKernel::get_error_addr); LIB_FUNC("6xVpy0Fdq+I", LibKernel::sigprocmask); + LIB_FUNC("lLMT9vJAck0", LibKernel::clock_gettime); LIB_FUNC("1jfXLRVzisc", LibKernel::KernelUsleep); LIB_FUNC("rNhWz+lvOMU", LibKernel::KernelSetThreadDtors); diff --git a/source/emulator/src/Libs/LibNet.cpp b/source/emulator/src/Libs/LibNet.cpp index db10ac9..e16d112 100644 --- a/source/emulator/src/Libs/LibNet.cpp +++ b/source/emulator/src/Libs/LibNet.cpp @@ -137,6 +137,19 @@ LIB_DEFINE(InitNet_1_NpManager) } // namespace LibNpManager +namespace LibNpManagerForToolkit { + +LIB_VERSION("NpManagerForToolkit", 1, "NpManager", 1, 1); + +namespace NpManagerForToolkit = Network::NpManagerForToolkit; + +LIB_DEFINE(InitNet_1_NpManagerForToolkit) +{ + LIB_FUNC("0c7HbXRKUt4", NpManagerForToolkit::NpRegisterStateCallbackForToolkit); +} + +} // namespace LibNpManagerForToolkit + namespace LibNpTrophy { LIB_VERSION("NpTrophy", 1, "NpTrophy", 1, 1); @@ -170,6 +183,7 @@ LIB_DEFINE(InitNet_1) LibHttp::InitNet_1_Http(s); LibNetCtl::InitNet_1_NetCtl(s); LibNpManager::InitNet_1_NpManager(s); + LibNpManagerForToolkit::InitNet_1_NpManagerForToolkit(s); LibNpTrophy::InitNet_1_NpTrophy(s); LibNpWebApi::InitNet_1_NpWebApi(s); } diff --git a/source/emulator/src/Libs/LibSaveData.cpp b/source/emulator/src/Libs/LibSaveData.cpp index 0454297..7ba94d8 100644 --- a/source/emulator/src/Libs/LibSaveData.cpp +++ b/source/emulator/src/Libs/LibSaveData.cpp @@ -1,3 +1,5 @@ +#include "Kyty/Core/Common.h" +#include "Kyty/Core/DbgAssert.h" #include "Kyty/Core/String.h" #include "Emulator/Common.h" @@ -13,6 +15,37 @@ LIB_VERSION("SaveData", 1, "SaveData", 1, 1); namespace SaveData { +struct SceSaveDataDirName +{ + char data[32]; +}; + +struct SaveDataMountPoint +{ + char data[16]; +}; + +struct SaveDataMount2 +{ + int user_id; + int pad; + const SceSaveDataDirName* dir_name; + uint64_t blocks; + uint32_t mount_mode; + uint8_t reserved[32]; + int pad2; +}; + +struct SaveDataMountResult +{ + SaveDataMountPoint mount_point; + uint64_t required_blocks; + uint32_t unused; + uint32_t mount_status; + uint8_t reserved[28]; + int pad; +}; + int KYTY_SYSV_ABI SaveDataInitialize(const void* /*init*/) { PRINT_NAME(); @@ -40,6 +73,34 @@ int KYTY_SYSV_ABI SaveDataInitialize3(const void* /*init*/) return OK; } +int KYTY_SYSV_ABI SaveDataMount2(const SaveDataMount2* mount, SaveDataMountResult* mount_result) +{ + PRINT_NAME(); + + EXIT_NOT_IMPLEMENTED(mount == nullptr); + EXIT_NOT_IMPLEMENTED(mount_result == nullptr); + + printf("\t user_id = %d\n", mount->user_id); + printf("\t dir_name = %s\n", mount->dir_name->data); + printf("\t blocks = %" PRIu64 "\n", mount->blocks); + printf("\t mount_mode = %" PRIu32 "\n", mount->mount_mode); + + if (mount->mount_mode == 1) + { + return SAVE_DATA_ERROR_NOT_FOUND; + } else // NOLINT + { + EXIT("unknown mount mode: %u", mount->mount_mode); + } + + strcpy(mount_result->mount_point.data, "/savedata0"); + + mount_result->required_blocks = 0; + mount_result->mount_status = 1; + + return OK; +} + } // namespace SaveData LIB_DEFINE(InitSaveData_1) @@ -47,6 +108,7 @@ LIB_DEFINE(InitSaveData_1) LIB_FUNC("ZkZhskCPXFw", SaveData::SaveDataInitialize); LIB_FUNC("l1NmDeDpNGU", SaveData::SaveDataInitialize2); LIB_FUNC("TywrFKCoLGY", SaveData::SaveDataInitialize3); + LIB_FUNC("0z45PIH+SNI", SaveData::SaveDataMount2); } } // namespace Kyty::Libs diff --git a/source/emulator/src/Libs/LibSysmodule.cpp b/source/emulator/src/Libs/LibSysmodule.cpp index 43e8487..12f5e15 100644 --- a/source/emulator/src/Libs/LibSysmodule.cpp +++ b/source/emulator/src/Libs/LibSysmodule.cpp @@ -18,7 +18,7 @@ static KYTY_SYSV_ABI int SysmoduleLoadModule(uint16_t id) { PRINT_NAME(); - printf("\tid = %d\n", static_cast(id)); + printf("\t id = %d\n", static_cast(id)); return 0; } @@ -27,7 +27,7 @@ static KYTY_SYSV_ABI int SysmoduleUnloadModule(uint16_t id) { PRINT_NAME(); - printf("\tid = %d\n", static_cast(id)); + printf("\t id = %d\n", static_cast(id)); return 0; } @@ -36,7 +36,7 @@ static KYTY_SYSV_ABI int SysmoduleLoadModuleInternalWithArg(uint16_t id, int arg { PRINT_NAME(); - printf("\tid = %d\n", static_cast(id)); + printf("\t id = %d\n", static_cast(id)); EXIT_IF(arg1 != 0); EXIT_IF(arg2 != 0); @@ -48,6 +48,15 @@ static KYTY_SYSV_ABI int SysmoduleLoadModuleInternalWithArg(uint16_t id, int arg return 0; } +static KYTY_SYSV_ABI int SysmoduleIsLoaded(uint16_t id) +{ + PRINT_NAME(); + + printf("\t id = %d\n", static_cast(id)); + + return 0; +} + } // namespace Sysmodule LIB_DEFINE(InitSysmodule_1) @@ -55,6 +64,7 @@ LIB_DEFINE(InitSysmodule_1) LIB_FUNC("eR2bZFAAU0Q", Sysmodule::SysmoduleUnloadModule); LIB_FUNC("hHrGoGoNf+s", Sysmodule::SysmoduleLoadModuleInternalWithArg); LIB_FUNC("g8cM39EUZ6o", Sysmodule::SysmoduleLoadModule); + LIB_FUNC("fMP5NHUOaMk", Sysmodule::SysmoduleIsLoaded); } } // namespace Kyty::Libs diff --git a/source/emulator/src/Libs/LibUserService.cpp b/source/emulator/src/Libs/LibUserService.cpp index 8f38991..d29337c 100644 --- a/source/emulator/src/Libs/LibUserService.cpp +++ b/source/emulator/src/Libs/LibUserService.cpp @@ -14,6 +14,11 @@ LIB_VERSION("UserService", 1, "UserService", 1, 1); namespace UserService { +struct UserServiceLoginUserIdList +{ + int user_id[4]; +}; + static KYTY_SYSV_ABI int UserServiceInitialize(const void* /*params*/) { PRINT_NAME(); @@ -41,6 +46,20 @@ static KYTY_SYSV_ABI int UserServiceGetEvent(void* event) return USER_SERVICE_ERROR_NO_EVENT; } +static KYTY_SYSV_ABI int UserServiceGetLoginUserIdList(UserServiceLoginUserIdList* user_id_list) +{ + PRINT_NAME(); + + EXIT_NOT_IMPLEMENTED(user_id_list == nullptr); + + user_id_list->user_id[0] = 1; + user_id_list->user_id[1] = -1; + user_id_list->user_id[2] = -1; + user_id_list->user_id[3] = -1; + + return OK; +} + } // namespace UserService LIB_DEFINE(InitUserService_1) @@ -48,6 +67,7 @@ LIB_DEFINE(InitUserService_1) LIB_FUNC("j3YMu1MVNNo", UserService::UserServiceInitialize); LIB_FUNC("CdWp0oHWGr0", UserService::UserServiceGetInitialUser); LIB_FUNC("yH17Q6NWtVg", UserService::UserServiceGetEvent); + LIB_FUNC("fPhymKNvK-A", UserService::UserServiceGetLoginUserIdList); } } // namespace Kyty::Libs diff --git a/source/emulator/src/Libs/Libs.cpp b/source/emulator/src/Libs/Libs.cpp index 0a99d24..590dc07 100644 --- a/source/emulator/src/Libs/Libs.cpp +++ b/source/emulator/src/Libs/Libs.cpp @@ -13,7 +13,7 @@ LIB_DEFINE(InitDebug_1); LIB_DEFINE(InitDialog_1); LIB_DEFINE(InitDiscMap_1); LIB_DEFINE(InitGraphicsDriver_1); -LIB_DEFINE(InitLibC_1); +// LIB_DEFINE(InitLibC_1); LIB_DEFINE(InitLibKernel_1); LIB_DEFINE(InitNet_1); LIB_DEFINE(InitPad_1); @@ -27,7 +27,7 @@ LIB_DEFINE(InitVideoOut_1); bool Init(const String& id, Loader::SymbolDatabase* s) { LIB_CHECK(U"libAudio_1", InitAudio_1); - LIB_CHECK(U"libc_1", InitLibC_1); + // LIB_CHECK(U"libc_1", InitLibC_1); LIB_CHECK(U"libc_internal_1", LibcInternal::InitLibcInternal_1); LIB_CHECK(U"libDebug_1", InitDebug_1); LIB_CHECK(U"libDialog_1", InitDialog_1); diff --git a/source/emulator/src/Libs/Printf.cpp b/source/emulator/src/Libs/Printf.cpp index a1c76e8..a83d91c 100644 --- a/source/emulator/src/Libs/Printf.cpp +++ b/source/emulator/src/Libs/Printf.cpp @@ -506,7 +506,7 @@ static inline unsigned int _strnlen_s(const char* str, size_t maxsize) } // NOLINTNEXTLINE(readability-function-cognitive-complexity) -int my_vprint(const char* format, VaList* va_list) +static int kyty_printf_internal(bool sn, char* sn_s, size_t sn_n, const char* format, VaList* va_list) { Vector buffer; @@ -854,40 +854,64 @@ int my_vprint(const char* format, VaList* va_list) // termination out(static_cast(0), &buffer, idx < maxlen ? idx : maxlen - 1U, maxlen); - printf(FG_BRIGHT_MAGENTA "%s" DEFAULT, buffer.GetDataConst()); + if (sn) + { + snprintf(sn_s, sn_n, "%s", buffer.GetDataConst()); + } else + { + printf(FG_BRIGHT_MAGENTA "%s" DEFAULT, buffer.GetDataConst()); + } // return written chars without terminating \0 return static_cast(idx); } -int my_print_v(VaContext* ctx) +static int kyty_vprintf(const char* format, VaList* va_list) +{ + return kyty_printf_internal(false, nullptr, 0, format, va_list); +} + +static int kyty_printf_ctx(VaContext* ctx) { const char* format = VaArg_ptr(&ctx->va_list); - return my_vprint(format, &ctx->va_list); + return kyty_printf_internal(false, nullptr, 0, format, &ctx->va_list); } -int KYTY_SYSV_ABI my_print2(VA_ARGS) +static int kyty_snprintf_ctx(VaContext* ctx) { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,hicpp-member-init) - VA_CONTEXT(ctx); + char* s = VaArg_ptr(&ctx->va_list); + size_t n = VaArg_size_t(&ctx->va_list); + const char* format = VaArg_ptr(&ctx->va_list); - return my_print_v(&ctx); + return kyty_printf_internal(true, s, n, format, &ctx->va_list); } -libc_print_func_t GetPrintFunc() +static int KYTY_SYSV_ABI kyty_printf_std(VA_ARGS) { - return reinterpret_cast(my_print2); + VA_CONTEXT(ctx); // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init) + + return kyty_printf_ctx(&ctx); } -libc_print_v_func_t GetPrintFuncV() +libc_printf_std_func_t GetPrintfStdFunc() { - return my_print_v; + return reinterpret_cast(kyty_printf_std); } -libc_vprint_func_t GetVPrintFunc() +libc_printf_ctx_func_t GetPrintfCtxFunc() { - return my_vprint; + return kyty_printf_ctx; +} + +libc_snprintf_ctx_func_t GetSnrintfCtxFunc() +{ + return kyty_snprintf_ctx; +} + +libc_vprintf_func_t GetVprintfFunc() +{ + return kyty_vprintf; } } // namespace Kyty::Libs diff --git a/source/emulator/src/Network.cpp b/source/emulator/src/Network.cpp index c5db239..9c93181 100644 --- a/source/emulator/src/Network.cpp +++ b/source/emulator/src/Network.cpp @@ -17,6 +17,29 @@ namespace Kyty::Libs::Network { class Network { public: + class Id + { + public: + explicit Id(int id): m_id(id - 1) {} + [[nodiscard]] int ToInt() const { return m_id + 1; } + [[nodiscard]] bool IsValid() const { return m_id >= 0; } + + friend class Network; + + private: + Id() = default; + static Id Invalid() { return Id(); } + static Id Create(int net_id) + { + Id r; + r.m_id = net_id; + return r; + } + [[nodiscard]] int GetId() const { return m_id; } + + int m_id = -1; + }; + Network() = default; virtual ~Network() = default; @@ -25,14 +48,14 @@ public: int PoolCreate(const char* name, int size); bool PoolDestroy(int memid); - int SslInit(uint64_t pool_size); - bool SslTerm(int ssl_ctx_id); + Id SslInit(uint64_t pool_size); + bool SslTerm(Id ssl_ctx_id); - int HttpInit(int memid, int ssl_ctx_id, uint64_t pool_size); - bool HttpTerm(int http_ctx_id); - int HttpCreateTemplate(int http_ctx_id, const char* user_agent, int http_ver, bool is_auto_proxy_conf); - bool HttpDeleteTemplate(int tmpl_id); - bool HttpValid(int http_ctx_id); + Id HttpInit(int memid, Id ssl_ctx_id, uint64_t pool_size); + bool HttpTerm(Id http_ctx_id); + Id HttpCreateTemplate(Id http_ctx_id, const char* user_agent, int http_ver, bool is_auto_proxy_conf); + bool HttpDeleteTemplate(Id tmpl_id); + bool HttpValid(Id http_ctx_id); private: struct Pool @@ -122,7 +145,7 @@ bool Network::PoolDestroy(int memid) return false; } -int Network::SslInit(uint64_t pool_size) +Network::Id Network::SslInit(uint64_t pool_size) { Core::LockGuard lock(m_mutex); @@ -133,20 +156,20 @@ int Network::SslInit(uint64_t pool_size) m_ssl[id].used = true; m_ssl[id].size = pool_size; - return id; + return Id::Create(id); } } - return -1; + return Id::Invalid(); } -bool Network::SslTerm(int ssl_ctx_id) +bool Network::SslTerm(Id ssl_ctx_id) { Core::LockGuard lock(m_mutex); - if (ssl_ctx_id >= 0 && ssl_ctx_id < SSL_MAX && m_ssl[ssl_ctx_id].used) + if (ssl_ctx_id.GetId() >= 0 && ssl_ctx_id.GetId() < SSL_MAX && m_ssl[ssl_ctx_id.GetId()].used) { - m_ssl[ssl_ctx_id].used = false; + m_ssl[ssl_ctx_id.GetId()].used = false; return true; } @@ -154,11 +177,12 @@ bool Network::SslTerm(int ssl_ctx_id) return false; } -int Network::HttpInit(int memid, int ssl_ctx_id, uint64_t pool_size) +Network::Id Network::HttpInit(int memid, Id ssl_ctx_id, uint64_t pool_size) { Core::LockGuard lock(m_mutex); - if (ssl_ctx_id >= 0 && ssl_ctx_id < SSL_MAX && m_ssl[ssl_ctx_id].used && memid >= 0 && memid < POOLS_MAX && m_pools[memid].used) + if (ssl_ctx_id.GetId() >= 0 && ssl_ctx_id.GetId() < SSL_MAX && m_ssl[ssl_ctx_id.GetId()].used && memid >= 0 && memid < POOLS_MAX && + m_pools[memid].used) { for (int id = 0; id < HTTP_MAX; id++) { @@ -166,31 +190,31 @@ int Network::HttpInit(int memid, int ssl_ctx_id, uint64_t pool_size) { m_http[id].used = true; m_http[id].size = pool_size; - m_http[id].ssl_ctx_id = ssl_ctx_id; + m_http[id].ssl_ctx_id = ssl_ctx_id.GetId(); m_http[id].memid = memid; - return id; + return Id::Create(id); } } } - return -1; + return Id::Invalid(); } -bool Network::HttpValid(int http_ctx_id) +bool Network::HttpValid(Id http_ctx_id) { Core::LockGuard lock(m_mutex); - return (http_ctx_id >= 0 && http_ctx_id < HTTP_MAX && m_http[http_ctx_id].used); + return (http_ctx_id.GetId() >= 0 && http_ctx_id.GetId() < HTTP_MAX && m_http[http_ctx_id.GetId()].used); } -bool Network::HttpTerm(int http_ctx_id) +bool Network::HttpTerm(Id http_ctx_id) { Core::LockGuard lock(m_mutex); if (HttpValid(http_ctx_id)) { - m_http[http_ctx_id].used = false; + m_http[http_ctx_id.GetId()].used = false; return true; } @@ -198,18 +222,18 @@ bool Network::HttpTerm(int http_ctx_id) return false; } -int Network::HttpCreateTemplate(int http_ctx_id, const char* user_agent, int http_ver, bool is_auto_proxy_conf) +Network::Id Network::HttpCreateTemplate(Id http_ctx_id, const char* user_agent, int http_ver, bool is_auto_proxy_conf) { Core::LockGuard lock(m_mutex); - if (http_ctx_id >= 0 && http_ctx_id < HTTP_MAX && m_http[http_ctx_id].used) + if (http_ctx_id.GetId() >= 0 && http_ctx_id.GetId() < HTTP_MAX && m_http[http_ctx_id.GetId()].used) { HttpTemplate tn {}; tn.used = true; tn.http_ver = http_ver; tn.user_agent = String::FromUtf8(user_agent); tn.is_auto_proxy_conf = is_auto_proxy_conf; - tn.http_ctx_id = http_ctx_id; + tn.http_ctx_id = http_ctx_id.GetId(); int index = 0; for (auto& t: m_templates) @@ -217,26 +241,26 @@ int Network::HttpCreateTemplate(int http_ctx_id, const char* user_agent, int htt if (!t.used) { t = tn; - return index; + return Id::Create(index); } index++; } m_templates.Add(tn); - return index; + return Id::Create(index); } - return -1; + return Id::Invalid(); } -bool Network::HttpDeleteTemplate(int tmpl_id) +bool Network::HttpDeleteTemplate(Id tmpl_id) { Core::LockGuard lock(m_mutex); - if (m_templates.IndexValid(tmpl_id) && m_templates.At(tmpl_id).used) + if (m_templates.IndexValid(tmpl_id.GetId()) && m_templates.At(tmpl_id.GetId()).used) { - m_templates[tmpl_id].used = false; + m_templates[tmpl_id.GetId()].used = false; return true; } @@ -364,14 +388,14 @@ int KYTY_SYSV_ABI SslInit(uint64_t pool_size) EXIT_NOT_IMPLEMENTED(pool_size == 0); - int id = g_net->SslInit(pool_size); + auto id = g_net->SslInit(pool_size); - if (id < 0) + if (!id.IsValid()) { return SSL_ERROR_OUT_OF_SIZE; } - return id + 1; + return id.ToInt(); } int KYTY_SYSV_ABI SslTerm(int ssl_ctx_id) @@ -380,7 +404,7 @@ int KYTY_SYSV_ABI SslTerm(int ssl_ctx_id) EXIT_IF(g_net == nullptr); - if (!g_net->SslTerm(ssl_ctx_id - 1)) + if (!g_net->SslTerm(Network::Id(ssl_ctx_id))) { return SSL_ERROR_INVALID_ID; } @@ -406,14 +430,14 @@ int KYTY_SYSV_ABI HttpInit(int memid, int ssl_ctx_id, uint64_t pool_size) EXIT_NOT_IMPLEMENTED(pool_size == 0); - int id = g_net->HttpInit(memid, ssl_ctx_id - 1, pool_size); + auto id = g_net->HttpInit(memid, Network::Id(ssl_ctx_id), pool_size); - if (id < 0) + if (!id.IsValid()) { return HTTP_ERROR_OUT_OF_MEMORY; } - return id + 1; + return id.ToInt(); } int KYTY_SYSV_ABI HttpTerm(int http_ctx_id) @@ -422,7 +446,7 @@ int KYTY_SYSV_ABI HttpTerm(int http_ctx_id) EXIT_IF(g_net == nullptr); - if (!g_net->HttpTerm(http_ctx_id - 1)) + if (!g_net->HttpTerm(Network::Id(http_ctx_id))) { return HTTP_ERROR_INVALID_ID; } @@ -441,14 +465,14 @@ int KYTY_SYSV_ABI HttpCreateTemplate(int http_ctx_id, const char* user_agent, in EXIT_IF(g_net == nullptr); - int id = g_net->HttpCreateTemplate(http_ctx_id, user_agent, http_ver, is_auto_proxy_conf != 0); + auto id = g_net->HttpCreateTemplate(Network::Id(http_ctx_id), user_agent, http_ver, is_auto_proxy_conf != 0); - if (id < 0) + if (!id.IsValid()) { return HTTP_ERROR_OUT_OF_MEMORY; } - return id + 1; + return id.ToInt(); } int KYTY_SYSV_ABI HttpDeleteTemplate(int tmpl_id) @@ -457,7 +481,7 @@ int KYTY_SYSV_ABI HttpDeleteTemplate(int tmpl_id) EXIT_IF(g_net == nullptr); - if (!g_net->HttpDeleteTemplate(tmpl_id - 1)) + if (!g_net->HttpDeleteTemplate(Network::Id(tmpl_id))) { return HTTP_ERROR_INVALID_ID; } @@ -689,6 +713,19 @@ int KYTY_SYSV_ABI NpRegisterPlusEventCallback(void* /*callback*/, void* /*userda } // namespace NpManager +namespace NpManagerForToolkit { + +LIB_NAME("NpManagerForToolkit", "NpManager"); + +int KYTY_SYSV_ABI NpRegisterStateCallbackForToolkit(void* /*callback*/, void* /*userdata*/) +{ + PRINT_NAME(); + + return OK; +} + +} // namespace NpManagerForToolkit + namespace NpTrophy { LIB_NAME("NpTrophy", "NpTrophy"); @@ -719,7 +756,7 @@ int KYTY_SYSV_ABI NpWebApiInitialize(int http_ctx_id, size_t pool_size) printf("\t http_ctx_id = %d\n", http_ctx_id); printf("\t pool_size = %" PRIu64 "\n", pool_size); - EXIT_NOT_IMPLEMENTED(!g_net->HttpValid(http_ctx_id - 1)); + EXIT_NOT_IMPLEMENTED(!g_net->HttpValid(Network::Id(http_ctx_id))); static int id = 0; diff --git a/source/emulator/src/RuntimeLinker.cpp b/source/emulator/src/RuntimeLinker.cpp index a229d25..ea0c417 100644 --- a/source/emulator/src/RuntimeLinker.cpp +++ b/source/emulator/src/RuntimeLinker.cpp @@ -675,7 +675,8 @@ Program* RuntimeLinker::LoadProgram(const String& elf_name) } if (elf_name.FilenameWithoutExtension().EndsWith(U"libc") || elf_name.FilenameWithoutExtension().EndsWith(U"Fios2") || - elf_name.FilenameWithoutExtension().EndsWith(U"Fios2_debug")) + elf_name.FilenameWithoutExtension().EndsWith(U"Fios2_debug") || elf_name.FilenameWithoutExtension().EndsWith(U"NpToolkit") || + elf_name.FilenameWithoutExtension().EndsWith(U"NpToolkit2")) { program->fail_if_global_not_resolved = false; } @@ -701,6 +702,23 @@ void RuntimeLinker::SaveMainProgram(const String& elf_name) } } +void RuntimeLinker::SaveProgram(Program* program, const String& elf_name) +{ + EXIT_NOT_IMPLEMENTED(!Core::Thread::IsMainThread()); + + Core::LockGuard lock(m_mutex); + + if (auto index = m_programs.Find(program); m_programs.IndexValid(index)) + { + EXIT_IF(m_programs.At(index)->elf == nullptr); + + m_programs.At(index)->elf->Save(elf_name); + } else + { + EXIT("program not found"); + } +} + void RuntimeLinker::Clear() { // EXIT_NOT_IMPLEMENTED(!Core::Thread::IsMainThread()); diff --git a/source/emulator/src/VirtualMemory.cpp b/source/emulator/src/VirtualMemory.cpp index a4fee1a..7c815cd 100644 --- a/source/emulator/src/VirtualMemory.cpp +++ b/source/emulator/src/VirtualMemory.cpp @@ -6,7 +6,10 @@ #include "Emulator/Jit.h" #include "Emulator/Profiler.h" -#include +#include "cpuinfo.h" + +//#include +//#include // NOLINTNEXTLINE //#define NTDDI_VERSION 0x0A000005 @@ -52,6 +55,12 @@ SystemInfo GetSystemInfo() ret.ProcessorLevel = system_info.wProcessorLevel; ret.ProcessorRevision = system_info.wProcessorRevision; + const auto* p = cpuinfo_get_package(0); + + EXIT_IF(p == nullptr); + + ret.ProcessorName = String::FromUtf8(p->name); + return ret; } @@ -280,6 +289,11 @@ static VirtualMemory::Mode get_protection_flag(DWORD mode) } } +void Init() +{ + cpuinfo_initialize(); +} + uint64_t Alloc(uint64_t address, uint64_t size, Mode mode) { auto ptr = reinterpret_cast(VirtualAlloc(reinterpret_cast(static_cast(address)), size, diff --git a/source/include/Kyty/Core/MSpace.h b/source/include/Kyty/Core/MSpace.h new file mode 100644 index 0000000..354cdad --- /dev/null +++ b/source/include/Kyty/Core/MSpace.h @@ -0,0 +1,37 @@ +#ifndef INCLUDE_KYTY_CORE_MSPACE_H_ +#define INCLUDE_KYTY_CORE_MSPACE_H_ + +#include "Kyty/Core/Common.h" + +namespace Kyty::Core { + +using mspace_t = void*; +using mspace_dbg_callback_t = void (*)(mspace_t, size_t, size_t); + +struct MSpaceSize +{ + size_t max_system_size; + size_t current_system_size; + size_t max_inuse_size; + size_t current_inuse_size; +}; + +mspace_t MSpaceCreate(const char* name, void* base, size_t capacity, bool thread_safe, mspace_dbg_callback_t dbg_callback); +bool MSpaceDestroy(mspace_t msp); +void* MSpaceMalloc(mspace_t msp, size_t size); +bool MSpaceFree(mspace_t msp, void* ptr); +void* MSpaceRealloc(mspace_t msp, void* ptr, size_t size); + +void* MSpaceCalloc(mspace_t msp, size_t nelem, size_t size); +void* MSpaceAlignedAlloc(mspace_t msp, size_t alignment, size_t size); +void* MSpaceMemalign(mspace_t msp, size_t boundary, size_t size); +void* MSpaceReallocalign(mspace_t msp, void* ptr, size_t boundary, size_t size); +bool MSpacePosixMemalign(mspace_t msp, void** ptr, size_t boundary, size_t size); +size_t MSpaceMallocUsableSize(void* ptr); +bool MSpaceMallocStats(mspace_t msp, MSpaceSize* mmsize); +bool MSpaceMallocStatsFast(mspace_t msp, MSpaceSize* mmsize); +bool MSpaceIsHeapEmpty(mspace_t msp); + +} // namespace Kyty::Core + +#endif /* INCLUDE_KYTY_CORE_MSPACE_H_ */ diff --git a/source/include/Kyty/UnitTest.h b/source/include/Kyty/UnitTest.h new file mode 100644 index 0000000..35fd440 --- /dev/null +++ b/source/include/Kyty/UnitTest.h @@ -0,0 +1,54 @@ +#ifndef UNIT_TEST_INCLUDE_UNITTEST_H_ +#define UNIT_TEST_INCLUDE_UNITTEST_H_ + +#include "Kyty/Core/Common.h" // IWYU pragma: export +#include "Kyty/Core/MemoryAlloc.h" // IWYU pragma: keep +#include "Kyty/Core/Subsystems.h" + +#include "gtest/gtest-message.h" // IWYU pragma: export +#include "gtest/gtest-test-part.h" // IWYU pragma: export +#include "gtest/gtest.h" // IWYU pragma: export + +#include // IWYU pragma: export + +namespace Kyty::UnitTest { + +bool unit_test_all(); + +KYTY_SUBSYSTEM_DEFINE(UnitTest); + +#define UT_MEM_CHECK_INIT() int test_ms = Core::mem_new_state(); + +#define UT_MEM_CHECK() \ + if (!HasFailure()) \ + { \ + Core::MemStats test_mem_stat = {test_ms, 0, 0}; \ + Core::mem_get_stat(&test_mem_stat); \ + size_t ut_total_allocated = test_mem_stat.total_allocated; \ + uint32_t ut_blocks_num = test_mem_stat.blocks_num; \ + if (ut_total_allocated != 0U || ut_blocks_num != 0U) Core::mem_print(test_ms); \ + EXPECT_EQ(ut_total_allocated, 0U); \ + EXPECT_EQ(ut_blocks_num, 0U); \ + } + +#define UT_NAMESPACE(name) \ + namespace Kyty { \ + namespace UnitTest { \ + namespace name { + +#define UT_BEGIN(name) \ + namespace Kyty { \ + namespace UnitTest { \ + KYTY_FORCE_LINK_THIS(UnitTest##name); \ + namespace name { + +#define UT_END() \ + } \ + } \ + } + +#define UT_LINK(test) KYTY_FORCE_LINK_THAT(UnitTest##test); + +} // namespace Kyty::UnitTest + +#endif /* UNIT_TEST_INCLUDE_UNITTEST_H_ */ diff --git a/source/lib/Core/src/MSpace.cpp b/source/lib/Core/src/MSpace.cpp new file mode 100644 index 0000000..9550722 --- /dev/null +++ b/source/lib/Core/src/MSpace.cpp @@ -0,0 +1,555 @@ +// +// Original algorithm is from: +// https://sqlite.org/src/file?name=src/mem3.c +// SQLite source code is in the public-domain and is free to everyone to use for any purpose. + +#include "Kyty/Core/MSpace.h" + +#include "Kyty/Core/Common.h" +#include "Kyty/Core/String.h" +#include "Kyty/Core/Threads.h" + +namespace Kyty::Core { + +static constexpr size_t MSPACE_HEADER_SIZE = 1440; + +static uint64_t align(uint64_t v, uint64_t a) +{ + return (v + (a - 1)) & ~(a - 1); +} + +static constexpr uint32_t MSPACE_ARRAY_SMALL = 10; +static constexpr uint32_t MSPACE_ARRAY_HASH = 61; + +struct MSpaceBlock +{ + union + { + struct + { + uint32_t prev_size; + uint32_t size_4x; + } hdr; + struct + { + uint32_t next; + uint32_t prev; + } list; + } u; +}; + +struct MSpaceContext +{ + uint32_t capacity = 0; + MSpaceBlock* base = nullptr; + bool in_callback = false; + Core::Mutex* mutex = nullptr; + mspace_dbg_callback_t dbg_callback = nullptr; + char name[32] = {0}; + uint32_t index_of_key_chunk = 0; + uint32_t size_of_key_chunk = 0; + uint32_t array_small[MSPACE_ARRAY_SMALL - 1] = {}; + uint32_t array_hash[MSPACE_ARRAY_HASH] = {}; +}; + +static void MSpaceInternalUnlinkFromList(MSpaceContext& ctx, uint32_t i, uint32_t* root) +{ + uint32_t next = ctx.base[i].u.list.next; + uint32_t prev = ctx.base[i].u.list.prev; + + if (prev == 0) + { + *root = next; + } else + { + ctx.base[prev].u.list.next = next; + } + if (next != 0) + { + ctx.base[next].u.list.prev = prev; + } + ctx.base[i].u.list.next = 0; + ctx.base[i].u.list.prev = 0; +} + +static void MSpaceInternalUnlink(MSpaceContext& ctx, uint32_t i) +{ + uint32_t size = ctx.base[i - 1].u.hdr.size_4x / 4; + if (size <= MSPACE_ARRAY_SMALL) + { + MSpaceInternalUnlinkFromList(ctx, i, &ctx.array_small[size - 2]); + } else + { + uint32_t hash = size % MSPACE_ARRAY_HASH; + MSpaceInternalUnlinkFromList(ctx, i, &ctx.array_hash[hash]); + } +} + +static void MSpaceInternalLinkIntoList(MSpaceContext& ctx, uint32_t i, uint32_t* root) +{ + ctx.base[i].u.list.next = *root; + ctx.base[i].u.list.prev = 0; + if (*root != 0u) + { + ctx.base[*root].u.list.prev = i; + } + *root = i; +} + +static void MSpaceInternalLink(MSpaceContext& ctx, uint32_t i) +{ + uint32_t size = ctx.base[i - 1].u.hdr.size_4x / 4; + + if (size <= MSPACE_ARRAY_SMALL) + { + MSpaceInternalLinkIntoList(ctx, i, &ctx.array_small[size - 2]); + } else + { + uint32_t hash = size % MSPACE_ARRAY_HASH; + MSpaceInternalLinkIntoList(ctx, i, &ctx.array_hash[hash]); + } +} + +static void MSpaceInternalEnter(MSpaceContext& ctx) +{ + if (ctx.mutex != nullptr) + { + ctx.mutex->Lock(); + } +} + +static void MSpaceInternalLeave(MSpaceContext& ctx) +{ + if (ctx.mutex != nullptr) + { + ctx.mutex->Unlock(); + } +} + +static void MSpaceInternalOutOfMemory(MSpaceContext& ctx, uint32_t free_bytes, uint32_t bytes) +{ + if (!ctx.in_callback) + { + ctx.in_callback = true; + + if (ctx.dbg_callback != nullptr) + { + MSpaceInternalLeave(ctx); + + ctx.dbg_callback(&ctx, free_bytes, bytes); + + MSpaceInternalEnter(ctx); + } + + ctx.in_callback = false; + } +} + +static void* MSpaceInternalCheckout(MSpaceContext& ctx, uint32_t i, uint32_t num_blocks) +{ + uint32_t x = ctx.base[i - 1].u.hdr.size_4x; + ctx.base[i - 1].u.hdr.size_4x = num_blocks * 4 | 1u | (x & 2u); + ctx.base[i + num_blocks - 1].u.hdr.prev_size = num_blocks; + ctx.base[i + num_blocks - 1].u.hdr.size_4x |= 2u; + return &ctx.base[i]; +} + +static void* MSpaceInternalFromKeyBlk(MSpaceContext& ctx, uint32_t num_blocks) +{ + if (num_blocks >= ctx.size_of_key_chunk - 1) + { + void* p = MSpaceInternalCheckout(ctx, ctx.index_of_key_chunk, ctx.size_of_key_chunk); + ctx.index_of_key_chunk = 0; + ctx.size_of_key_chunk = 0; + return p; + } + + uint32_t newi = ctx.index_of_key_chunk + ctx.size_of_key_chunk - num_blocks; + + ctx.base[ctx.index_of_key_chunk + ctx.size_of_key_chunk - 1].u.hdr.prev_size = num_blocks; + ctx.base[ctx.index_of_key_chunk + ctx.size_of_key_chunk - 1].u.hdr.size_4x |= 2u; + ctx.base[newi - 1].u.hdr.size_4x = num_blocks * 4 + 1; + ctx.size_of_key_chunk -= num_blocks; + ctx.base[newi - 1].u.hdr.prev_size = ctx.size_of_key_chunk; + uint32_t x = ctx.base[ctx.index_of_key_chunk - 1].u.hdr.size_4x & 2u; + ctx.base[ctx.index_of_key_chunk - 1].u.hdr.size_4x = ctx.size_of_key_chunk * 4 | x; + return static_cast(&ctx.base[newi]); +} + +static void MSpaceInternalMerge(MSpaceContext& ctx, uint32_t* root) +{ + uint32_t next = 0; + + for (uint32_t i = *root; i > 0; i = next) + { + next = ctx.base[i].u.list.next; + uint32_t size = ctx.base[i - 1].u.hdr.size_4x; + if ((size & 2u) == 0) + { + MSpaceInternalUnlinkFromList(ctx, i, root); + uint32_t prev = i - ctx.base[i - 1].u.hdr.prev_size; + if (prev == next) + { + next = ctx.base[prev].u.list.next; + } + MSpaceInternalUnlink(ctx, prev); + size = i + size / 4 - prev; + uint32_t x = ctx.base[prev - 1].u.hdr.size_4x & 2u; + ctx.base[prev - 1].u.hdr.size_4x = size * 4 | x; + ctx.base[prev + size - 1].u.hdr.prev_size = size; + MSpaceInternalLink(ctx, prev); + i = prev; + } else + { + size /= 4; + } + if (size > ctx.size_of_key_chunk) + { + ctx.index_of_key_chunk = i; + ctx.size_of_key_chunk = size; + } + } +} + +static void* MSpaceInternalMallocUnsafe(MSpaceContext& ctx, uint32_t size, uint32_t report_size) +{ + uint32_t num_blocks = 0; + + if (size <= 12) + { + num_blocks = 2; + } else + { + num_blocks = (size + 11) / 8; + } + + if (num_blocks <= MSPACE_ARRAY_SMALL) + { + uint32_t i = ctx.array_small[num_blocks - 2]; + if (i > 0) + { + MSpaceInternalUnlinkFromList(ctx, i, &ctx.array_small[num_blocks - 2]); + return MSpaceInternalCheckout(ctx, i, num_blocks); + } + } else + { + uint32_t hash = num_blocks % MSPACE_ARRAY_HASH; + for (uint32_t i = ctx.array_hash[hash]; i > 0; i = ctx.base[i].u.list.next) + { + if (ctx.base[i - 1].u.hdr.size_4x / 4 == num_blocks) + { + MSpaceInternalUnlinkFromList(ctx, i, &ctx.array_hash[hash]); + return MSpaceInternalCheckout(ctx, i, num_blocks); + } + } + } + + if (ctx.size_of_key_chunk >= num_blocks) + { + return MSpaceInternalFromKeyBlk(ctx, num_blocks); + } + + for (uint32_t to_free = num_blocks * 16; to_free < (ctx.capacity * 16); to_free *= 2) + { + MSpaceInternalOutOfMemory(ctx, to_free, report_size); + if (ctx.index_of_key_chunk != 0u) + { + MSpaceInternalLink(ctx, ctx.index_of_key_chunk); + ctx.index_of_key_chunk = 0; + ctx.size_of_key_chunk = 0; + } + for (auto& hash: ctx.array_hash) + { + MSpaceInternalMerge(ctx, &hash); + } + for (auto& small: ctx.array_small) + { + MSpaceInternalMerge(ctx, &small); + } + if (ctx.size_of_key_chunk != 0u) + { + MSpaceInternalUnlink(ctx, ctx.index_of_key_chunk); + if (ctx.size_of_key_chunk >= num_blocks) + { + return MSpaceInternalFromKeyBlk(ctx, num_blocks); + } + } + } + + return nullptr; +} + +static void MSpaceInternalFreeUnsafe(MSpaceContext& ctx, void* old) +{ + auto* p = static_cast(old); + + uint32_t index = p - ctx.base; + uint32_t size = ctx.base[index - 1].u.hdr.size_4x / 4; + ctx.base[index - 1].u.hdr.size_4x &= ~1u; + ctx.base[index + size - 1].u.hdr.prev_size = size; + ctx.base[index + size - 1].u.hdr.size_4x &= ~2u; + MSpaceInternalLink(ctx, index); + + if (ctx.index_of_key_chunk != 0u) + { + while ((ctx.base[ctx.index_of_key_chunk - 1].u.hdr.size_4x & 2u) == 0) + { + size = ctx.base[ctx.index_of_key_chunk - 1].u.hdr.prev_size; + ctx.index_of_key_chunk -= size; + ctx.size_of_key_chunk += size; + MSpaceInternalUnlink(ctx, ctx.index_of_key_chunk); + uint32_t x = ctx.base[ctx.index_of_key_chunk - 1].u.hdr.size_4x & 2u; + ctx.base[ctx.index_of_key_chunk - 1].u.hdr.size_4x = ctx.size_of_key_chunk * 4 | x; + ctx.base[ctx.index_of_key_chunk + ctx.size_of_key_chunk - 1].u.hdr.prev_size = ctx.size_of_key_chunk; + } + uint32_t x = ctx.base[ctx.index_of_key_chunk - 1].u.hdr.size_4x & 2u; + while ((ctx.base[ctx.index_of_key_chunk + ctx.size_of_key_chunk - 1].u.hdr.size_4x & 1u) == 0) + { + MSpaceInternalUnlink(ctx, ctx.index_of_key_chunk + ctx.size_of_key_chunk); + ctx.size_of_key_chunk += ctx.base[ctx.index_of_key_chunk + ctx.size_of_key_chunk - 1].u.hdr.size_4x / 4; + ctx.base[ctx.index_of_key_chunk - 1].u.hdr.size_4x = ctx.size_of_key_chunk * 4 | x; + ctx.base[ctx.index_of_key_chunk + ctx.size_of_key_chunk - 1].u.hdr.prev_size = ctx.size_of_key_chunk; + } + } +} + +static uint32_t MSpaceInternalSize(void* p) +{ + auto* block = static_cast(p); + return (block[-1].u.hdr.size_4x & ~3u) * 2 - 4; +} + +static void* MSpaceInternalMalloc(MSpaceContext& ctx, uint32_t size, uint32_t report_size) +{ + MSpaceInternalEnter(ctx); + auto* p = MSpaceInternalMallocUnsafe(ctx, size, report_size); + MSpaceInternalLeave(ctx); + return p; +} + +static void* MSpaceInternalMalloc_align(MSpaceContext& ctx, uint32_t size, uint64_t boundary) +{ + auto addr = reinterpret_cast(MSpaceInternalMalloc(ctx, size + boundary + 8, size)); + if (addr != 0) + { + uint64_t aligned_addr = align(addr + 8, boundary); + auto* buf = reinterpret_cast(aligned_addr); + buf[-1] = addr; + return buf; + } + return nullptr; +} + +static void MSpaceInternalFree(MSpaceContext& ctx, void* prior) +{ + MSpaceInternalEnter(ctx); + MSpaceInternalFreeUnsafe(ctx, prior); + MSpaceInternalLeave(ctx); +} + +static void MSpaceInternalFree_align(MSpaceContext& ctx, void* buf) +{ + auto* buf64 = static_cast(buf); + uint64_t real_addr = buf64[-1]; + MSpaceInternalFree(ctx, reinterpret_cast(real_addr)); +} + +static void* MSpaceInternalRealloc(MSpaceContext& ctx, void* prior, uint32_t size, uint32_t report_size) +{ + if (prior == nullptr) + { + return MSpaceInternalMalloc(ctx, size, report_size); + } + if (size <= 0) + { + MSpaceInternalFree(ctx, prior); + return nullptr; + } + auto old = MSpaceInternalSize(prior); + if (size <= old && size >= old - 128) + { + return prior; + } + MSpaceInternalEnter(ctx); + auto* p = MSpaceInternalMallocUnsafe(ctx, size, report_size); + if (p != nullptr) + { + if (old < size) + { + memcpy(p, prior, old); + } else + { + memcpy(p, prior, size); + } + MSpaceInternalFreeUnsafe(ctx, prior); + } + MSpaceInternalLeave(ctx); + return p; +} + +static void* MSpaceInternalRealloc_align(MSpaceContext& ctx, void* buf, uint32_t size, uint64_t boundary) +{ + auto* buf64 = static_cast(buf); + uint64_t real_addr = (buf64 != nullptr ? buf64[-1] : 0); + auto addr = reinterpret_cast(MSpaceInternalRealloc(ctx, reinterpret_cast(real_addr), size + boundary + 8, size)); + if (addr != 0) + { + uint64_t aligned_addr = align(addr + 8, boundary); + if (addr != real_addr) + { + auto* dst = reinterpret_cast(aligned_addr); + auto* src = reinterpret_cast(addr + reinterpret_cast(buf) - real_addr); + if (dst != src) + { + memmove(dst, src, size); + } + } + auto* buf = reinterpret_cast(aligned_addr); + buf[-1] = addr; + return buf; + } + return nullptr; +} + +static bool MSpaceInternalInit(MSpaceContext& ctx, const char* name, void* base, size_t capacity, bool thread_safe, + mspace_dbg_callback_t dbg_callback) +{ + if (sizeof(MSpaceBlock) != 8) + { + return false; + } + + if ((capacity / sizeof(MSpaceBlock)) < 3) + { + return false; + } + + ctx = MSpaceContext(); + + ctx.base = static_cast(base); + ctx.capacity = (capacity / sizeof(MSpaceBlock)) - 2; + + ctx.size_of_key_chunk = ctx.capacity; + ctx.index_of_key_chunk = 1; + ctx.base[0].u.hdr.size_4x = (ctx.size_of_key_chunk << 2u) + 2; + ctx.base[ctx.capacity].u.hdr.prev_size = ctx.capacity; + ctx.base[ctx.capacity].u.hdr.size_4x = 1; + + ctx.mutex = (thread_safe ? new Core::Mutex : nullptr); + ctx.dbg_callback = dbg_callback; + + snprintf(ctx.name, sizeof(ctx.name), "%s", name); + + return true; +} + +static void MSpaceInternalShutdown(MSpaceContext& ctx) +{ + delete ctx.mutex; +} + +mspace_t MSpaceCreate(const char* name, void* base, size_t capacity, bool thread_safe, mspace_dbg_callback_t dbg_callback) +{ + auto addr = reinterpret_cast(base); + + if ((addr & 0x7u) != 0) + { + return nullptr; + } + + if ((capacity & 0x7u) != 0) + { + return nullptr; + } + + if (sizeof(MSpaceContext) > MSPACE_HEADER_SIZE) + { + return nullptr; + } + + if (base == nullptr || capacity < MSPACE_HEADER_SIZE) + { + return nullptr; + } + + if (name == nullptr) + { + return nullptr; + } + + uint64_t aligned_buf_addr = align(addr + MSPACE_HEADER_SIZE, 1); + + auto* ctx = reinterpret_cast(addr); + auto* buf = reinterpret_cast(aligned_buf_addr); + + if (!MSpaceInternalInit(*ctx, name, buf, (capacity - (aligned_buf_addr - addr)), thread_safe, dbg_callback)) + { + return nullptr; + } + + return ctx; +} + +bool MSpaceDestroy(mspace_t msp) +{ + if (msp == nullptr) + { + return false; + } + + auto* ctx = static_cast(msp); + + MSpaceInternalShutdown(*ctx); + + return true; +} + +void* MSpaceMalloc(mspace_t msp, size_t size) +{ + if (msp == nullptr) + { + return nullptr; + } + + if ((size >> 32u) != 0) + { + return nullptr; + } + + auto* ctx = static_cast(msp); + + return MSpaceInternalMalloc_align(*ctx, size, 32); +} + +bool MSpaceFree(mspace_t msp, void* ptr) +{ + if (msp == nullptr) + { + return false; + } + + auto* ctx = static_cast(msp); + + MSpaceInternalFree_align(*ctx, ptr); + + return true; +} + +void* MSpaceRealloc(mspace_t msp, void* ptr, size_t size) +{ + if (msp == nullptr) + { + return nullptr; + } + + if ((size >> 32u) != 0) + { + return nullptr; + } + + auto* ctx = static_cast(msp); + + return MSpaceInternalRealloc_align(*ctx, ptr, size, 32); +} + +} // namespace Kyty::Core diff --git a/source/src_script.cmake b/source/src_script.cmake index ccdf12d..23038b4 100644 --- a/source/src_script.cmake +++ b/source/src_script.cmake @@ -1,4 +1,5 @@ set(KYTY_SCRIPT_SRC +3rdparty/gtest/src/gtest-all.cc KytyScripts.cpp ) diff --git a/source/unit_test/.clang-tidy b/source/unit_test/.clang-tidy new file mode 100644 index 0000000..868fe3a --- /dev/null +++ b/source/unit_test/.clang-tidy @@ -0,0 +1,50 @@ +Checks: '-*,bugprone-*,cert-*,clang-analyzer-*,google-*,llvm-*,-llvm-header-guard,misc-*,modernize-*,-modernize-use-trailing-return-type,-modernize-avoid-c-arrays,performance-*,portability-*,readability-*,-readability-magic-numbers,-readability-uppercase-literal-suffix,cppcoreguidelines-*,-cppcoreguidelines-pro-type-vararg,-cppcoreguidelines-avoid-magic-numbers,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-cppcoreguidelines-owning-memory,-cppcoreguidelines-pro-bounds-array-to-pointer-decay,-cppcoreguidelines-pro-type-reinterpret-cast,-cppcoreguidelines-avoid-c-arrays,hicpp-*,-hicpp-vararg,-hicpp-no-array-decay,-hicpp-avoid-c-arrays,-hicpp-uppercase-literal-suffix,-cppcoreguidelines-pro-bounds-constant-array-index,-cppcoreguidelines-pro-type-union-access,-cppcoreguidelines-pro-type-static-cast-downcast,-modernize-pass-by-value,-cert-dcl50-cpp,-cert-err58-cpp,-bugprone-undefined-memory-manipulation,-*-function-size,-misc-no-recursion,-cppcoreguidelines-avoid-non-const-global-variables,-readability-function-cognitive-complexity,-performance-no-int-to-ptr' +CheckOptions: + - key: readability-redundant-access-specifiers.CheckFirstDeclaration + value: 1 + - key: misc-non-private-member-variables-in-classes.IgnorePublicMemberVariables + value: 1 + - key: cppcoreguidelines-pro-type-member-init.IgnoreArrays + value: 1 + - key: cppcoreguidelines-macro-usage.AllowedRegexp + value: (KYTY_*)|(EXIT*)|(ASSERT*)|(UT_*) + - key: readability-identifier-naming.ClassCase + value: CamelCase + - key: readability-identifier-naming.MemberCase + value: lower_case + - key: readability-identifier-naming.PrivateMemberPrefix + value: 'm_' + - key: readability-identifier-naming.ProtectedMemberPrefix + value: 'm_' + - key: readability-identifier-naming.PublicMethodCase + value: CamelCase + - key: readability-identifier-naming.PublicMemberCase + value: aNy_CasE + - key: readability-identifier-naming.StructCase + value: CamelCase + - key: readability-identifier-naming.ConstantCase + value: UPPER_CASE + - key: readability-identifier-naming.EnumConstantCase + value: CamelCase + - key: readability-identifier-naming.EnumCase + value: CamelCase + - key: readability-identifier-naming.TemplateParameterCase + value: CamelCase + - key: readability-identifier-naming.ValueTemplateParameterCase + value: CamelCase + - key: readability-identifier-naming.LocalConstantCase + value: lower_case + - key: readability-identifier-naming.GlobalConstantCase + value: lower_case + - key: readability-identifier-naming.GlobalConstantPrefix + value: 'g_' + - key: readability-identifier-naming.GlobalVariableCase + value: lower_case + - key: readability-identifier-naming.GlobalVariablePrefix + value: 'g_' + - key: readability-identifier-naming.VariableCase + value: lower_case + - key: readability-identifier-naming.ParameterCase + value: lower_case + - key: readability-identifier-naming.ConstexprVariableCase + value: UPPER_CASE diff --git a/source/unit_test/CMakeLists.txt b/source/unit_test/CMakeLists.txt new file mode 100644 index 0000000..dc77bfc --- /dev/null +++ b/source/unit_test/CMakeLists.txt @@ -0,0 +1,39 @@ +file(GLOB unit_test_src + "include/*.h" + "src/*.cpp" + "src/core/*.cpp" +) + +if (MSVC AND CLANG) + set_source_files_properties(${unit_test_src} PROPERTIES COMPILE_FLAGS "-Wno-pragma-pack") +endif() + +add_library(unit_test STATIC ${unit_test_src}) + +target_link_libraries(unit_test core) +target_link_libraries(unit_test math) + +target_include_directories(unit_test PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") + +list(APPEND inc_headers + ${PROJECT_BINARY_DIR} + ${PROJECT_SOURCE_DIR} + ${CMAKE_SOURCE_DIR}/include +# ${CMAKE_SOURCE_DIR}/3rdparty/sdl2/include +# ${CMAKE_SOURCE_DIR}/3rdparty/lua/include + ${CMAKE_SOURCE_DIR}/3rdparty/gtest/include + ${CMAKE_SOURCE_DIR}/3rdparty/gtest +# ${CMAKE_SOURCE_DIR}/3rdparty/rijndael/source +# ${CMAKE_SOURCE_DIR}/3rdparty/miniz +# ${CMAKE_SOURCE_DIR}/3rdparty/magic_enum/include +) + +list(APPEND check_headers + ${CMAKE_SOURCE_DIR}/include +) + +clang_tidy_check(unit_test "" "${check_headers}" "${inc_headers}") + +include_what_you_use(unit_test "${inc_headers}") + + diff --git a/source/unit_test/src/UnitTest.cpp b/source/unit_test/src/UnitTest.cpp new file mode 100644 index 0000000..2c75ba0 --- /dev/null +++ b/source/unit_test/src/UnitTest.cpp @@ -0,0 +1,22 @@ +#include "Kyty/UnitTest.h" + +namespace Kyty::UnitTest { + +UT_LINK(CoreCharString); +UT_LINK(CoreMSpace); + +KYTY_SUBSYSTEM_INIT(UnitTest) +{ + testing::InitGoogleTest(KYTY_SUBSYSTEM_ARGC, KYTY_SUBSYSTEM_ARGV); +} + +KYTY_SUBSYSTEM_UNEXPECTED_SHUTDOWN(UnitTest) {} + +KYTY_SUBSYSTEM_DESTROY(UnitTest) {} + +bool unit_test_all() +{ + return RUN_ALL_TESTS() == 0; +} + +} // namespace Kyty::UnitTest diff --git a/source/unit_test/src/core/UnitTestCoreMSpace.cpp b/source/unit_test/src/core/UnitTestCoreMSpace.cpp new file mode 100644 index 0000000..007ccc6 --- /dev/null +++ b/source/unit_test/src/core/UnitTestCoreMSpace.cpp @@ -0,0 +1,230 @@ +#include "Kyty/Core/MSpace.h" +#include "Kyty/Core/Vector.h" +#include "Kyty/Math/Rand.h" +#include "Kyty/UnitTest.h" + +UT_BEGIN(CoreMSpace); + +using Core::MSpaceCreate; +using Core::MSpaceDestroy; +using Core::MSpaceFree; +using Core::MSpaceMalloc; +using Core::MSpaceRealloc; +using Math::Rand; + +static void test_fail() +{ + size_t s = 1000; + auto* buf = new uint8_t[s]; + + auto* m = MSpaceCreate("test", buf, s, true, nullptr); + + EXPECT_EQ(m, nullptr); + + void* ptr = MSpaceMalloc(m, 56); + + EXPECT_EQ(ptr, nullptr); + + EXPECT_FALSE(MSpaceFree(m, ptr)); + + EXPECT_FALSE(MSpaceDestroy(m)); + + delete[] buf; +} + +static size_t g_size = 0; +static uint8_t* g_ptr = nullptr; + +static void test_callback(Core::mspace_t m, size_t /*free_size*/, size_t size) +{ + g_size = size; + if (g_ptr != nullptr) + { + EXPECT_TRUE(MSpaceFree(m, g_ptr)); + } +} + +static void test_ok() +{ + size_t s = 2000; + auto* buf = new uint8_t[s]; + + auto* m = MSpaceCreate("test", buf, s, true, test_callback); + + EXPECT_NE(m, nullptr); + + auto* ptr = static_cast(MSpaceMalloc(m, 56)); + EXPECT_NE(ptr, nullptr); + EXPECT_TRUE(ptr > buf && ptr < buf + s); + + g_size = 0; + g_ptr = nullptr; + auto* ptr2 = static_cast(MSpaceMalloc(m, 460)); + EXPECT_EQ(ptr2, nullptr); + EXPECT_EQ(g_size, 460u); + + g_size = 0; + g_ptr = ptr; + ptr2 = static_cast(MSpaceMalloc(m, 460)); + EXPECT_NE(ptr2, nullptr); + EXPECT_TRUE(ptr2 > buf && ptr2 < buf + s); + + EXPECT_TRUE(MSpaceFree(m, ptr2)); + + g_size = 0; + g_ptr = nullptr; + ptr2 = static_cast(MSpaceRealloc(m, nullptr, 60)); + EXPECT_NE(ptr2, nullptr); + EXPECT_TRUE(ptr2 > buf && ptr2 < buf + s); + ptr2 = static_cast(MSpaceRealloc(m, ptr2, 160)); + EXPECT_NE(ptr2, nullptr); + EXPECT_TRUE(ptr2 > buf && ptr2 < buf + s); + + EXPECT_TRUE(MSpaceDestroy(m)); + + delete[] buf; +} + +static void test_align() +{ + uint32_t s = Rand::UintInclusiveRange(5000, 20000) & ~0x7u; + auto* buf = new uint8_t[s]; + + auto* m = MSpaceCreate("test", buf, s, true, nullptr); + + EXPECT_NE(m, nullptr); + + // printf("buf = %016" PRIx64 ", %u\n", reinterpret_cast(buf), s); + + int iter_num = Rand::IntInclusiveRange(10, 20); + + for (int i = 0; i < iter_num; i++) + { + uint32_t size = Rand::UintInclusiveRange(1, 64); + uint32_t size_r = Rand::UintInclusiveRange(32, 128); + auto* buf32 = MSpaceMalloc(m, size); + auto* buf32_r = MSpaceRealloc(m, buf32, size_r); + auto addr32 = reinterpret_cast(buf32); + auto addr32_r = reinterpret_cast(buf32_r); + // printf("%016" PRIx64 ", %u => %016" PRIx64 ", %u\n", addr32, size, addr32_r, size_r); + EXPECT_NE(addr32, 0u); + EXPECT_EQ(addr32 & 0x1fu, 0u); + EXPECT_NE(addr32_r, 0u); + EXPECT_EQ(addr32_r & 0x1fu, 0u); + } + + EXPECT_TRUE(MSpaceDestroy(m)); + + delete[] buf; +} + +struct TestRecord +{ + uint8_t* buf = nullptr; + uint8_t pattern = 0; + uint32_t size = 0; +}; + +static void test_fill() +{ + uint32_t s = Rand::UintInclusiveRange(2000, 10000) & ~0x7u; + auto* buf = new uint8_t[s]; + + auto* m = MSpaceCreate("test", buf, s, true, nullptr); + EXPECT_NE(m, nullptr); + + Vector rs; + + for (int step = 0; step < 5; step++) + { + int add = 0; + int del = 0; + int rea = 0; + for (;;) + { + uint32_t size = Rand::UintInclusiveRange(1, 200); + auto* buf32 = static_cast(MSpaceMalloc(m, size)); + if (buf32 == nullptr) + { + break; + } + uint8_t pattern = Rand::UintInclusiveRange(0, 255); + memset(buf32, pattern, size); + + TestRecord r {}; + r.buf = buf32; + r.pattern = pattern; + r.size = size; + + rs.Add(r); + add++; + } + + for (auto& r: rs) + { + if (r.buf != nullptr && (Rand::Uint() % 8) == 0) + { + MSpaceFree(m, r.buf); + r.buf = nullptr; + del++; + } + } + + for (auto& r: rs) + { + if (r.buf != nullptr && (Rand::Uint() % 4) == 0) + { + auto* n = static_cast(MSpaceRealloc(m, r.buf, r.size + Rand::UintInclusiveRange(1, 200))); + if (n != nullptr) + { + r.buf = n; + rea++; + } + } + } + + // printf("add = %d, del = %d, rea = %d\n", add, del, rea); + } + + bool ok = true; + for (const auto& r: rs) + { + if (r.buf != nullptr) + { + for (uint32_t i = 0; i < r.size; i++) + { + if (r.pattern != r.buf[i]) + { + ok = false; + break; + } + } + if (!ok) + { + break; + } + } + } + EXPECT_TRUE(ok); + + EXPECT_TRUE(MSpaceDestroy(m)); + delete[] buf; +} + +TEST(Core, MSpace) +{ + UT_MEM_CHECK_INIT(); + + test_fail(); + test_ok(); + + for (int i = 0; i < 5; i++) + { + test_align(); + test_fill(); + } + + UT_MEM_CHECK(); +} + +UT_END(); diff --git a/source/unit_test/src/core/UnitTestCoreString.cpp b/source/unit_test/src/core/UnitTestCoreString.cpp new file mode 100644 index 0000000..3769559 --- /dev/null +++ b/source/unit_test/src/core/UnitTestCoreString.cpp @@ -0,0 +1,1022 @@ +#include "Kyty/Core/String.h" +#include "Kyty/Core/Vector.h" +#include "Kyty/UnitTest.h" + +UT_BEGIN(CoreCharString); + +using Core::Char; +using Core::CharProperty; +using Core::StringList; + +void test_char() +{ + EXPECT_EQ(sizeof(char32_t), 4U); + EXPECT_EQ(sizeof(CharProperty), 4U); + + EXPECT_TRUE(Char::IsDecimal(U'1')); + EXPECT_TRUE(Char::IsDecimal(U'9')); + EXPECT_TRUE(!Char::IsDecimal(U'g')); + EXPECT_TRUE(!Char::IsDecimal(U'У')); + + EXPECT_TRUE(!Char::IsAlpha(U'4')); + EXPECT_TRUE(!Char::IsAlpha(U'0')); + EXPECT_TRUE(Char::IsAlpha(U'P')); + EXPECT_TRUE(Char::IsAlpha(U'ы')); + + EXPECT_TRUE(!Char::IsAlphaNum(U'!')); + EXPECT_TRUE(!Char::IsAlphaNum(U'#')); + EXPECT_TRUE(Char::IsAlphaNum(U'8')); + EXPECT_TRUE(Char::IsAlphaNum(U'Ё')); + + EXPECT_TRUE(Char::IsLower(U'r')); + EXPECT_TRUE(Char::IsLower(U'д')); + EXPECT_TRUE(!Char::IsLower(U'W')); + EXPECT_TRUE(!Char::IsLower(U'И')); + + EXPECT_TRUE(Char::IsUpper(U'Q')); + EXPECT_TRUE(Char::IsUpper(U'Б')); + EXPECT_TRUE(!Char::IsUpper(U'f')); + EXPECT_TRUE(!Char::IsUpper(U'ж')); + + EXPECT_TRUE(Char::IsSpace(U' ')); + EXPECT_TRUE(Char::IsSpace(U'\t')); + EXPECT_TRUE(Char::IsSpace(U'\r')); + EXPECT_TRUE(Char::IsSpace(U'\n')); + EXPECT_TRUE(!Char::IsSpace(U'.')); + EXPECT_TRUE(!Char::IsSpace(U'_')); + EXPECT_TRUE(!Char::IsSpace(U'5')); + EXPECT_TRUE(!Char::IsSpace(U'Y')); + + EXPECT_EQ(Char::HexDigit(U'0'), 0); + EXPECT_EQ(Char::HexDigit(U'9'), 9); + EXPECT_EQ(Char::HexDigit(U'B'), 11); + EXPECT_EQ(Char::HexDigit(U'f'), 15); + EXPECT_EQ(Char::HexDigit(U'g'), -1); + EXPECT_EQ(Char::HexDigit(U'x'), -1); + + EXPECT_EQ(Char::ToUpper(U'a'), U'A'); + EXPECT_EQ(Char::ToUpper(U'ю'), U'Ю'); + EXPECT_EQ(Char::ToUpper(U';'), U';'); + EXPECT_EQ(Char::ToUpper(U'P'), U'P'); + + EXPECT_EQ(Char::ToLower(U'a'), U'a'); + EXPECT_EQ(Char::ToLower(U'T'), U't'); + EXPECT_EQ(Char::ToLower(U'Ц'), U'ц'); + EXPECT_EQ(Char::ToLower(U'?'), U'?'); +} + +void test() +{ + String e; + String s = String::FromUtf8("abcdABCDАБВГДабвгд"); + + EXPECT_EQ(s.Size(), 18U); + EXPECT_TRUE(!s.IsEmpty()); + EXPECT_EQ(e.Size(), 0U); + EXPECT_TRUE(e.IsEmpty()); + + e = s; + s.Clear(); + + EXPECT_EQ(s.Size(), 0U); + EXPECT_TRUE(s.IsEmpty()); + EXPECT_EQ(e.Size(), 18U); + EXPECT_TRUE(!e.IsEmpty()); + + EXPECT_EQ(e[1], U'b'); + EXPECT_EQ(e[9], U'Б'); + + s = String::FromUtf8("acdABCАВГДабвгд"); + s = String::FromUtf8("abcdABCDАБВГДабвгд"); + + EXPECT_EQ(s[3], U'd'); + EXPECT_EQ(s[11], U'Г'); + + s = '0'; + + EXPECT_EQ(s.Size(), 1U); + + String::Utf8 utf = String::FromUtf8("abcdABCDАБВГДабвгд").utf8_str(); + EXPECT_EQ(strcmp(utf.GetData(), "abcdABCDАБВГДабвгд"), 0); + + EXPECT_EQ(utf.Size(), 29U); + EXPECT_EQ(utf.At(0), 0x61); + EXPECT_EQ(utf.At(4), 0x41); + EXPECT_EQ(utf.At(16), (char)0xD0); + EXPECT_EQ(utf.At(17), (char)0x94); + + String::Utf8 cp866 = String::FromUtf8("abcABCАБВабвЭЮЯэюя╫▓").cp866_str(); + + EXPECT_EQ(cp866.Size(), 21U); + EXPECT_EQ(cp866.At(0), 0x61); + EXPECT_EQ(cp866.At(3), 0x41); + EXPECT_EQ(cp866.At(9), (char)0xA0); + EXPECT_EQ(cp866.At(19), (char)0xB2); + + String ss = String::FromUtf8("abcd"); + ss += String::FromUtf8("efgh"); + EXPECT_EQ(strcmp(ss.utf8_str().GetData(), "abcdefgh"), 0); + + String sum1 = String::FromUtf8("ab1") + String::FromUtf8("cd2"); + String sum2 = sum1 + U"cd2" + U"апр"; + + EXPECT_TRUE(sum2 == "ab1cd2cd2апр"); + EXPECT_TRUE(sum2.EqualNoCase("AB1CD2CD2АПР")); + EXPECT_TRUE(sum2 != "ab1cd2cd2апР"); + EXPECT_TRUE(sum2 == U"ab1cd2cd2апр"); + EXPECT_TRUE(sum2.EqualNoCase(U"AB1CD2CD2АПР")); + EXPECT_TRUE(sum2 != U"ab1cd2cd2апР"); + + String mt = String::FromUtf8("0123456789"); + + EXPECT_TRUE(String::FromUtf8("").IsEmpty()); + + EXPECT_EQ(mt.Mid(0, 4), "0123"); + EXPECT_EQ(mt.Mid(6, 4), "6789"); + EXPECT_EQ(mt.Mid(6, 5), "6789"); + EXPECT_EQ(mt.Mid(10, 1), ""); + EXPECT_EQ(mt.Mid(0, 0), ""); + + EXPECT_EQ(mt.Left(2), "01"); + EXPECT_EQ(mt.Left(0), ""); + EXPECT_EQ(mt.Left(12), "0123456789"); + + EXPECT_EQ(mt.Right(2), "89"); + EXPECT_EQ(mt.Right(0), ""); + EXPECT_EQ(mt.Right(12), "0123456789"); + + EXPECT_EQ(String::FromUtf8("abcdабвг%#123").ToUpper(), "ABCDАБВГ%#123"); + EXPECT_EQ(String::FromUtf8("ABCDАБВГ%#123").ToLower(), "abcdабвг%#123"); + EXPECT_EQ(String::FromUtf8("abcdабвг%#123").ToUpper(), U"ABCDАБВГ%#123"); + EXPECT_EQ(String::FromUtf8("ABCDАБВГ%#123").ToLower(), U"abcdабвг%#123"); +} + +void test_3() +{ + String tt = String::FromUtf8(" 123 abc\r\n"); + String tt2 = String::FromUtf8(" \t \r\n "); + String tt3 = String::FromUtf8("no_space"); + EXPECT_EQ(tt.TrimLeft(), "123 abc\r\n"); + EXPECT_EQ(tt.TrimRight(), " 123 abc"); + EXPECT_EQ(tt.Trim(), "123 abc"); + EXPECT_EQ(tt.Simplify(), "123 abc"); + EXPECT_EQ(tt2.TrimLeft(), ""); + EXPECT_EQ(tt2.TrimRight(), ""); + EXPECT_EQ(tt2.Trim(), ""); + EXPECT_EQ(tt2.Simplify(), ""); + EXPECT_EQ(tt3.TrimLeft(), tt3); + EXPECT_EQ(tt3.TrimRight(), tt3); + EXPECT_EQ(tt3.Trim(), tt3); + EXPECT_EQ(tt3.Simplify(), tt3); + + EXPECT_EQ(String::FromUtf8("й ц у к е н").ReplaceChar(U' ', U'_'), "й_ц_у_к_е_н"); + EXPECT_EQ(String::FromUtf8("й ц у к е н").RemoveChar(U' '), "йцукен"); + EXPECT_EQ(String::FromUtf8("йцУКен").RemoveChar(U'у', String::Case::Insensitive), "йцКен"); + EXPECT_EQ(String::FromUtf8("йцУКен").ReplaceChar(U'к', U'q', String::Case::Insensitive), "йцУqен"); + EXPECT_EQ(String::FromUtf8("й ц у к е н").RemoveAt(1).RemoveAt(2).RemoveAt(3), "йцук е н"); + EXPECT_EQ(String::FromUtf8("йцукен").RemoveAt(0, 0), "йцукен"); + EXPECT_EQ(String::FromUtf8("йцукен").RemoveAt(10, 1), "йцукен"); + EXPECT_EQ(String::FromUtf8("йцукен").RemoveAt(0, 10), ""); + EXPECT_EQ(String::FromUtf8("йцукен").RemoveAt(1, 10), "й"); + EXPECT_EQ(String::FromUtf8("йцукен").RemoveAt(1, 2), "йкен"); + + EXPECT_EQ(String::FromUtf8("\nй\nц\n\nу к е н\n").ReplaceStr(U"\n", U"\r\n"), "\r\nй\r\nц\r\n\r\nу к е н\r\n"); + EXPECT_EQ(String::FromUtf8("\nй\nц\n\nу к е н\n").RemoveStr(U"\n"), "йцу к е н"); + EXPECT_EQ(String::FromUtf8("abcabcabc").RemoveStr(U"bc"), "aaa"); + + String it = String::FromUtf8("abcЙЦУ123"); + EXPECT_EQ(it.InsertAt(0, U'q'), "qabcЙЦУ123"); + EXPECT_EQ(it.InsertAt(3, U"_d_"), "abc_d_ЙЦУ123"); + EXPECT_EQ(it.InsertAt(10, U"000"), "abcЙЦУ123000"); + + String ft = String::FromUtf8("ab ab ab"); + EXPECT_EQ(ft.FindIndex(U"ab", 0), 0U); + EXPECT_EQ(ft.FindIndex(U"ab", 1), 3U); + EXPECT_EQ(ft.FindIndex(U"AB", 0), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U"ab", 7), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U"ab", 15), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U"abc", 0), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U"", 5), 5U); + + EXPECT_EQ(ft.FindLastIndex(U"ab"), 6U); + EXPECT_EQ(ft.FindLastIndex(U"ab", 6), 6U); + EXPECT_EQ(ft.FindLastIndex(U"ab", 5), 3U); + EXPECT_EQ(ft.FindLastIndex(U"AB"), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindLastIndex(U""), 7U); + + EXPECT_EQ(ft.FindIndex(U'a', 0), 0U); + EXPECT_EQ(ft.FindIndex(U'a', 1), 3U); + EXPECT_EQ(ft.FindIndex(U'A', 0), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U'a', 7), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U'a', 15), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U'c', 0), Core::STRING_INVALID_INDEX); + + EXPECT_EQ(ft.FindLastIndex(U'a'), 6U); + EXPECT_EQ(ft.FindLastIndex(U'a', 6), 6U); + EXPECT_EQ(ft.FindLastIndex(U'a', 5), 3U); + EXPECT_EQ(ft.FindLastIndex(U'A'), Core::STRING_INVALID_INDEX); + + String ft2 = String::FromUtf8("ab AB ab"); + EXPECT_EQ(ft2.FindIndex(U"aB", 0, String::Case::Insensitive), 0U); + EXPECT_EQ(ft2.FindIndex(U"Ab", 1, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindIndex(U"AB", 3, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindIndex(U"ab", 4, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U"aB", Core::STRING_INVALID_INDEX, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U"Ab", 6, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U"AB", 5, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindLastIndex(U"ab", 0, String::Case::Insensitive), 0U); + EXPECT_EQ(ft2.FindLastIndex(U"abc", Core::STRING_INVALID_INDEX, String::Case::Insensitive), Core::STRING_INVALID_INDEX); + + EXPECT_EQ(ft2.FindIndex(U'a', 0, String::Case::Insensitive), 0U); + EXPECT_EQ(ft2.FindIndex(U'A', 1, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindIndex(U'A', 3, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindIndex(U'a', 4, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U'a', Core::STRING_INVALID_INDEX, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U'A', 6, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U'A', 5, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindLastIndex(U'a', 0, String::Case::Insensitive), 0U); + EXPECT_EQ(ft2.FindLastIndex(U'c', Core::STRING_INVALID_INDEX, String::Case::Insensitive), Core::STRING_INVALID_INDEX); +} + +void test_2() +{ + String pf; + EXPECT_TRUE(pf.Printf("s%s_d%d\n", "abcdАБВГ", 4)); + EXPECT_EQ(pf, "sabcdАБВГ_d4\n"); + + EXPECT_TRUE(String::FromUtf8("abcdabcd").StartsWith(U"abc")); + EXPECT_TRUE(String::FromUtf8("abcdabcd").StartsWith(U"")); + EXPECT_TRUE(String::FromUtf8("").StartsWith(U"")); + EXPECT_TRUE(!String::FromUtf8("").StartsWith(U"abc")); + EXPECT_TRUE(!String::FromUtf8("abcdabcd").StartsWith(U"aBc")); + EXPECT_TRUE(String::FromUtf8("abcdabcd").StartsWith(U"aBc", String::Case::Insensitive)); + EXPECT_TRUE(String::FromUtf8("abcdabcd").StartsWith(U'a')); + EXPECT_TRUE(!String::FromUtf8("").StartsWith(U'a')); + EXPECT_TRUE(!String::FromUtf8("abcdabcd").StartsWith(U'A')); + EXPECT_TRUE(String::FromUtf8("abcdabcd").StartsWith(U'A', String::Case::Insensitive)); + EXPECT_TRUE(String::FromUtf8("abcdabcd").EndsWith(U"cd")); + EXPECT_TRUE(!String::FromUtf8("abcdabcd").EndsWith(U"bc")); + EXPECT_TRUE(String::FromUtf8("abcdabcd").EndsWith(U"cD", String::Case::Insensitive)); + EXPECT_TRUE(!String::FromUtf8("").EndsWith(U"cd")); + EXPECT_TRUE(String::FromUtf8("abcdabcd").EndsWith(U"")); + EXPECT_TRUE(String::FromUtf8("").EndsWith(U"")); + EXPECT_TRUE(String::FromUtf8("abcdabcd").EndsWith(U'd')); + EXPECT_TRUE(!String::FromUtf8("abcdabcd").EndsWith(U'D')); + EXPECT_TRUE(String::FromUtf8("abcdabcd").EndsWith(U'D', String::Case::Insensitive)); + EXPECT_TRUE(!String::FromUtf8("").EndsWith(U'd')); + + EXPECT_TRUE(String::FromUtf8("abcdabcd").ContainsStr(U"cda")); + EXPECT_TRUE(String::FromUtf8("abcdabcd").ContainsStr(U"")); + EXPECT_TRUE(!String::FromUtf8("").ContainsStr(U"cda")); + EXPECT_TRUE(String::FromUtf8("").ContainsStr(U"")); + EXPECT_TRUE(!String::FromUtf8("abcdabcd").ContainsStr(U"cDa")); + EXPECT_TRUE(String::FromUtf8("abcdabcd").ContainsStr(U"cDa", String::Case::Insensitive)); + + EXPECT_TRUE(String::FromUtf8("abcd").ContainsChar(U'b')); + EXPECT_TRUE(!String::FromUtf8("").ContainsChar(U'c')); + EXPECT_TRUE(!String::FromUtf8("abcdabcd").ContainsChar(U'D')); + EXPECT_TRUE(String::FromUtf8("abcdabcd").ContainsChar(U'D', String::Case::Insensitive)); + + EXPECT_EQ(String::FromUtf8("123456789012345678900").ToInt32(), INT32_MAX); + EXPECT_EQ(String::FromUtf8("-123456789012345678900").ToInt32(), INT32_MIN); + EXPECT_EQ(String::FromUtf8("123456789012345678900").ToUint32(), UINT32_MAX); +#if FC_PLATFORM == FC_PLATFORM_ANDROID || FC_COMPILER == FC_COMPILER_MSVC + // EXPECT_EQ(String::FromUtf8("-123456789012345678900").ToUint32(), UINT32_MAX); +#else + // EXPECT_EQ(String::FromUtf8("-123456789012345678900").ToUint32(), (~UINT32_MAX) + 1); +#endif + EXPECT_EQ(String::FromUtf8("123456789012345678900").ToInt64(), INT64_MAX); + EXPECT_EQ(String::FromUtf8("-123456789012345678900").ToInt64(), INT64_MIN); + EXPECT_EQ(String::FromUtf8("123456789012345678900").ToUint64(), UINT64_MAX); +#if FC_PLATFORM == FC_PLATFORM_ANDROID || FC_COMPILER == FC_COMPILER_MSVC + // EXPECT_EQ(String::FromUtf8("-123456789012345678900").ToUint64(), UINT64_MAX); +#else + // EXPECT_EQ(String::FromUtf8("-123456789012345678900").ToUint64(), (~UINT64_MAX) + 1); +#endif + EXPECT_EQ(String::FromUtf8("0xabcd").ToUint32(16), 0xabcdU); + EXPECT_EQ(String::FromUtf8("-0.345").ToDouble(), -0.345); + + EXPECT_EQ(String::FromUtf8("abcd/dede/dcdc").DirectoryWithoutFilename(), String::FromUtf8("abcd/dede/")); + EXPECT_EQ(String::FromUtf8("abcd/dede/").DirectoryWithoutFilename(), String::FromUtf8("abcd/dede/")); + EXPECT_EQ(String::FromUtf8("/abcd/dede/dcdc").DirectoryWithoutFilename(), String::FromUtf8("/abcd/dede/")); + EXPECT_EQ(String::FromUtf8("abcddededcdc").DirectoryWithoutFilename(), String::FromUtf8("")); + EXPECT_EQ(String::FromUtf8("abcd/dede/dcdc").FilenameWithoutDirectory(), String::FromUtf8("dcdc")); + EXPECT_EQ(String::FromUtf8("abcd/dede/").FilenameWithoutDirectory(), String::FromUtf8("")); + EXPECT_EQ(String::FromUtf8("/abcd/dede/dcdc").FilenameWithoutDirectory(), String::FromUtf8("dcdc")); + EXPECT_EQ(String::FromUtf8("abcddededcdc").FilenameWithoutDirectory(), String::FromUtf8("abcddededcdc")); + + EXPECT_EQ(String::FromUtf8("abcdef").RemoveLast(0), String::FromUtf8("abcdef")); + EXPECT_EQ(String::FromUtf8("abcdef").RemoveLast(1), String::FromUtf8("abcde")); + EXPECT_EQ(String::FromUtf8("abcdef").RemoveLast(6), String::FromUtf8("")); + EXPECT_EQ(String::FromUtf8("abcdef").RemoveLast(7), String::FromUtf8("")); + EXPECT_EQ(String::FromUtf8("abcdef").RemoveFirst(0), String::FromUtf8("abcdef")); + EXPECT_EQ(String::FromUtf8("abcdef").RemoveFirst(1), String::FromUtf8("bcdef")); + EXPECT_EQ(String::FromUtf8("abcdef").RemoveFirst(6), String::FromUtf8("")); + EXPECT_EQ(String::FromUtf8("abcdef").RemoveFirst(7), String::FromUtf8("")); + + EXPECT_EQ(String::FromUtf8("abcd.ext").FilenameWithoutExtension(), String::FromUtf8("abcd")); + EXPECT_EQ(String::FromUtf8("abcd").FilenameWithoutExtension(), String::FromUtf8("abcd")); + EXPECT_EQ(String::FromUtf8("abcd.").FilenameWithoutExtension(), String::FromUtf8("abcd")); + EXPECT_EQ(String::FromUtf8(".ext").FilenameWithoutExtension(), String::FromUtf8("")); + + EXPECT_EQ(String::FromUtf8("abcd.ext").ExtensionWithoutFilename(), String::FromUtf8(".ext")); + EXPECT_EQ(String::FromUtf8("abcd").ExtensionWithoutFilename(), String::FromUtf8("")); + EXPECT_EQ(String::FromUtf8("abcd.").ExtensionWithoutFilename(), String::FromUtf8(".")); + EXPECT_EQ(String::FromUtf8(".ext").ExtensionWithoutFilename(), String::FromUtf8(".ext")); + + EXPECT_TRUE(String::FromUtf8("abcd").EqualAscii("abcd")); + EXPECT_TRUE(!String::FromUtf8("abc").EqualAscii("abcd")); + EXPECT_TRUE(!String::FromUtf8("abcd").EqualAscii("abc")); + + EXPECT_TRUE(Char::EqualAscii(String::FromUtf8("abcd").GetDataConst(), "abcd")); + EXPECT_TRUE(!Char::EqualAscii(String::FromUtf8("abc").GetDataConst(), "abcd")); + EXPECT_TRUE(!Char::EqualAscii(String::FromUtf8("abcd").GetDataConst(), "abc")); + EXPECT_TRUE(Char::EqualAsciiN(String::FromUtf8("abcd").GetDataConst(), "abcd", 4)); + EXPECT_TRUE(Char::EqualAsciiN(String::FromUtf8("abcd").GetDataConst(), "abcd", 3)); + EXPECT_TRUE(!Char::EqualAsciiN(String::FromUtf8("abc").GetDataConst(), "abcd", 4)); + EXPECT_TRUE(Char::EqualAsciiN(String::FromUtf8("abc").GetDataConst(), "abcd", 3)); + EXPECT_TRUE(Char::EqualAsciiN(String::FromUtf8("abcd").GetDataConst(), "abc", 3)); + + String s = String::FromUtf8("test"); + String s2(s.GetDataConst() + 2); + EXPECT_EQ(s2, "st"); + + String word = U"ebadc"; + EXPECT_EQ(word.SortChars(), U"abcde"); + + EXPECT_EQ(String::FromUtf8("abc123abcd", 0), U""); + EXPECT_EQ(String::FromUtf8("abc123abcd", 1), U"a"); + EXPECT_EQ(String::FromUtf8("abc123abcd", 4), U"abc1"); +} + +void test_U() +{ + String e; + String s = U"abcdABCDАБВГДабвгд"; + + EXPECT_EQ(s.Size(), 18U); + EXPECT_TRUE(!s.IsEmpty()); + EXPECT_EQ(e.Size(), 0U); + EXPECT_TRUE(e.IsEmpty()); + + e = s; + s.Clear(); + + EXPECT_EQ(s.Size(), 0U); + EXPECT_TRUE(s.IsEmpty()); + EXPECT_EQ(e.Size(), 18U); + EXPECT_TRUE(!e.IsEmpty()); + + EXPECT_EQ(e[1], U'b'); + EXPECT_EQ(e[9], U'Б'); + + s = U"acdABCАВГДабвгд"; + s = U"abcdABCDАБВГДабвгд"; + + EXPECT_EQ(s[3], U'd'); + EXPECT_EQ(s[11], U'Г'); + + s = U'0'; + + EXPECT_EQ(s.Size(), 1U); + + String::Utf8 utf = String(U"abcdABCDАБВГДабвгд").utf8_str(); + EXPECT_EQ(strcmp(utf.GetData(), "abcdABCDАБВГДабвгд"), 0); + + EXPECT_EQ(utf.Size(), 29U); + EXPECT_EQ(utf.At(0), 0x61); + EXPECT_EQ(utf.At(4), 0x41); + EXPECT_EQ(utf.At(16), (char)0xD0); + EXPECT_EQ(utf.At(17), (char)0x94); + + String::Utf8 cp866 = String(U"abcABCАБВабвЭЮЯэюя╫▓").cp866_str(); + + EXPECT_EQ(cp866.Size(), 21U); + EXPECT_EQ(cp866.At(0), 0x61); + EXPECT_EQ(cp866.At(3), 0x41); + EXPECT_EQ(cp866.At(9), (char)0xA0); + EXPECT_EQ(cp866.At(19), (char)0xB2); + + String ss = U"abcd"; + ss += U"efgh"; + EXPECT_EQ(strcmp(ss.utf8_str().GetData(), "abcdefgh"), 0); + + ss = "abcd"; + ss += "efgh2"; + EXPECT_EQ(strcmp(ss.utf8_str().GetData(), "abcdefgh2"), 0); + + String sum1 = U"ab1" + String(U"cd2"); + String sum2 = sum1 + U"cd2" + U"апр"; + + EXPECT_TRUE(sum2 == "ab1cd2cd2апр"); + EXPECT_TRUE(sum2.EqualNoCase("AB1CD2CD2АПР")); + EXPECT_TRUE(sum2 != "ab1cd2cd2апР"); + EXPECT_TRUE(sum2 == U"ab1cd2cd2апр"); + EXPECT_TRUE(sum2.EqualNoCase(U"AB1CD2CD2АПР")); + EXPECT_TRUE(sum2 != U"ab1cd2cd2апР"); + + sum1 = "_ab1" + String(U"cd2"); + sum2 = sum1 + "cd2" + "апр"; + + EXPECT_TRUE(sum2 == "_ab1cd2cd2апр"); + EXPECT_TRUE(sum2.EqualNoCase("_AB1CD2CD2АПР")); + EXPECT_TRUE(sum2 != "_ab1cd2cd2апР"); + EXPECT_TRUE(sum2 == U"_ab1cd2cd2апр"); + EXPECT_TRUE(sum2.EqualNoCase(U"_AB1CD2CD2АПР")); + EXPECT_TRUE(sum2 != U"_ab1cd2cd2апР"); + + String mt = U"0123456789"; + + EXPECT_TRUE(String(U"").IsEmpty()); + + EXPECT_EQ(mt.Mid(0, 4), "0123"); + EXPECT_EQ(mt.Mid(6, 4), "6789"); + EXPECT_EQ(mt.Mid(6, 5), "6789"); + EXPECT_EQ(mt.Mid(10, 1), ""); + EXPECT_EQ(mt.Mid(0, 0), ""); + + EXPECT_EQ(mt.Left(2), "01"); + EXPECT_EQ(mt.Left(0), ""); + EXPECT_EQ(mt.Left(12), "0123456789"); + + EXPECT_EQ(mt.Right(2), "89"); + EXPECT_EQ(mt.Right(0), ""); + EXPECT_EQ(mt.Right(12), "0123456789"); + + EXPECT_EQ(String(U"abcdабвг%#123").ToUpper(), "ABCDАБВГ%#123"); + EXPECT_EQ(String(U"ABCDАБВГ%#123").ToLower(), "abcdабвг%#123"); +} + +void test_U_3() +{ + String tt = U" 123 abc\r\n"; + String tt2 = U" \t \r\n "; + String tt3 = U"no_space"; + EXPECT_EQ(tt.TrimLeft(), "123 abc\r\n"); + EXPECT_EQ(tt.TrimRight(), " 123 abc"); + EXPECT_EQ(tt.Trim(), "123 abc"); + EXPECT_EQ(tt.Simplify(), "123 abc"); + EXPECT_EQ(tt2.TrimLeft(), ""); + EXPECT_EQ(tt2.TrimRight(), ""); + EXPECT_EQ(tt2.Trim(), ""); + EXPECT_EQ(tt2.Simplify(), ""); + EXPECT_EQ(tt3.TrimLeft(), tt3); + EXPECT_EQ(tt3.TrimRight(), tt3); + EXPECT_EQ(tt3.Trim(), tt3); + EXPECT_EQ(tt3.Simplify(), tt3); + + EXPECT_EQ(String(U"й ц у к е н").ReplaceChar(U' ', U'_'), "й_ц_у_к_е_н"); + EXPECT_EQ(String(U"й ц у к е н").RemoveChar(U' '), "йцукен"); + EXPECT_EQ(String(U"йцУКен").RemoveChar(U'у', String::Case::Insensitive), "йцКен"); + EXPECT_EQ(String(U"йцУКен").ReplaceChar(U'к', U'q', String::Case::Insensitive), "йцУqен"); + EXPECT_EQ(String(U"й ц у к е н").RemoveAt(1).RemoveAt(2).RemoveAt(3), "йцук е н"); + EXPECT_EQ(String(U"йцукен").RemoveAt(0, 0), "йцукен"); + EXPECT_EQ(String(U"йцукен").RemoveAt(10, 1), "йцукен"); + EXPECT_EQ(String(U"йцукен").RemoveAt(0, 10), ""); + EXPECT_EQ(String(U"йцукен").RemoveAt(1, 10), "й"); + EXPECT_EQ(String(U"йцукен").RemoveAt(1, 2), "йкен"); + + EXPECT_EQ(String(U"\nй\nц\n\nу к е н\n").ReplaceStr(U"\n", U"\r\n"), "\r\nй\r\nц\r\n\r\nу к е н\r\n"); + EXPECT_EQ(String(U"\nй\nц\n\nу к е н\n").RemoveStr(U"\n"), "йцу к е н"); + EXPECT_EQ(String(U"abcabcabc").RemoveStr(U"bc"), "aaa"); + + String it = U"abcЙЦУ123"; + EXPECT_EQ(it.InsertAt(0, U'q'), "qabcЙЦУ123"); + EXPECT_EQ(it.InsertAt(3, U"_d_"), "abc_d_ЙЦУ123"); + EXPECT_EQ(it.InsertAt(10, U"000"), "abcЙЦУ123000"); + + String ft = U"ab ab ab"; + EXPECT_EQ(ft.FindIndex(U"ab", 0), 0U); + EXPECT_EQ(ft.FindIndex(U"ab", 1), 3U); + EXPECT_EQ(ft.FindIndex(U"AB", 0), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U"ab", 7), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U"ab", 15), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U"abc", 0), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U"", 5), 5U); + + EXPECT_EQ(ft.FindLastIndex(U"ab"), 6U); + EXPECT_EQ(ft.FindLastIndex(U"ab", 6), 6U); + EXPECT_EQ(ft.FindLastIndex(U"ab", 5), 3U); + EXPECT_EQ(ft.FindLastIndex(U"AB"), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindLastIndex(U""), 7U); + + EXPECT_EQ(ft.FindIndex(U'a', 0), 0U); + EXPECT_EQ(ft.FindIndex(U'a', 1), 3U); + EXPECT_EQ(ft.FindIndex(U'A', 0), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U'a', 7), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U'a', 15), Core::STRING_INVALID_INDEX); + EXPECT_EQ(ft.FindIndex(U'c', 0), Core::STRING_INVALID_INDEX); + + EXPECT_EQ(ft.FindLastIndex(U'a'), 6U); + EXPECT_EQ(ft.FindLastIndex(U'a', 6), 6U); + EXPECT_EQ(ft.FindLastIndex(U'a', 5), 3U); + EXPECT_EQ(ft.FindLastIndex(U'A'), Core::STRING_INVALID_INDEX); + + String ft2 = U"ab AB ab"; + EXPECT_EQ(ft2.FindIndex(U"aB", 0, String::Case::Insensitive), 0U); + EXPECT_EQ(ft2.FindIndex(U"Ab", 1, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindIndex(U"AB", 3, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindIndex(U"ab", 4, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U"aB", Core::STRING_INVALID_INDEX, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U"Ab", 6, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U"AB", 5, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindLastIndex(U"ab", 0, String::Case::Insensitive), 0U); + EXPECT_EQ(ft2.FindLastIndex(U"abc", Core::STRING_INVALID_INDEX, String::Case::Insensitive), Core::STRING_INVALID_INDEX); + + EXPECT_EQ(ft2.FindIndex(U'a', 0, String::Case::Insensitive), 0U); + EXPECT_EQ(ft2.FindIndex(U'A', 1, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindIndex(U'A', 3, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindIndex(U'a', 4, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U'a', Core::STRING_INVALID_INDEX, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U'A', 6, String::Case::Insensitive), 6U); + EXPECT_EQ(ft2.FindLastIndex(U'A', 5, String::Case::Insensitive), 3U); + EXPECT_EQ(ft2.FindLastIndex(U'a', 0, String::Case::Insensitive), 0U); + EXPECT_EQ(ft2.FindLastIndex(U'c', Core::STRING_INVALID_INDEX, String::Case::Insensitive), Core::STRING_INVALID_INDEX); +} + +void test_U_2() +{ + String pf; + EXPECT_TRUE(pf.Printf("s%s_d%d\n", "abcdАБВГ", 4)); + EXPECT_EQ(pf, "sabcdАБВГ_d4\n"); + + EXPECT_TRUE(String(U"abcdabcd").StartsWith(U"abc")); + EXPECT_TRUE(String(U"abcdabcd").StartsWith(U"")); + EXPECT_TRUE(String(U"").StartsWith(U"")); + EXPECT_TRUE(!String(U"").StartsWith(U"abc")); + EXPECT_TRUE(!String(U"abcdabcd").StartsWith(U"aBc")); + EXPECT_TRUE(String(U"abcdabcd").StartsWith(U"aBc", String::Case::Insensitive)); + EXPECT_TRUE(String(U"abcdabcd").StartsWith(U'a')); + EXPECT_TRUE(!String(U"").StartsWith(U'a')); + EXPECT_TRUE(!String(U"abcdabcd").StartsWith(U'A')); + EXPECT_TRUE(String(U"abcdabcd").StartsWith(U'A', String::Case::Insensitive)); + EXPECT_TRUE(String(U"abcdabcd").EndsWith(U"cd")); + EXPECT_TRUE(!String(U"abcdabcd").EndsWith(U"bc")); + EXPECT_TRUE(String(U"abcdabcd").EndsWith(U"cD", String::Case::Insensitive)); + EXPECT_TRUE(!String(U"").EndsWith(U"cd")); + EXPECT_TRUE(String(U"abcdabcd").EndsWith(U"")); + EXPECT_TRUE(String(U"").EndsWith(U"")); + EXPECT_TRUE(String(U"abcdabcd").EndsWith(U'd')); + EXPECT_TRUE(!String(U"abcdabcd").EndsWith(U'D')); + EXPECT_TRUE(String(U"abcdabcd").EndsWith(U'D', String::Case::Insensitive)); + EXPECT_TRUE(!String(U"").EndsWith(U'd')); + + EXPECT_TRUE(String(U"abcdabcd").ContainsStr(U"cda")); + EXPECT_TRUE(String(U"abcdabcd").ContainsStr(U"")); + EXPECT_TRUE(!String(U"").ContainsStr(U"cda")); + EXPECT_TRUE(String(U"").ContainsStr(U"")); + EXPECT_TRUE(!String(U"abcdabcd").ContainsStr(U"cDa")); + EXPECT_TRUE(String(U"abcdabcd").ContainsStr(U"cDa", String::Case::Insensitive)); + + EXPECT_TRUE(String(U"abcd").ContainsChar(U'b')); + EXPECT_TRUE(!String(U"").ContainsChar(U'c')); + EXPECT_TRUE(!String(U"abcdabcd").ContainsChar(U'D')); + EXPECT_TRUE(String(U"abcdabcd").ContainsChar(U'D', String::Case::Insensitive)); + + EXPECT_EQ(String(U"123456789012345678900").ToInt32(), INT32_MAX); + EXPECT_EQ(String(U"-123456789012345678900").ToInt32(), INT32_MIN); + EXPECT_EQ(String(U"123456789012345678900").ToUint32(), UINT32_MAX); +#if FC_PLATFORM == FC_PLATFORM_ANDROID || FC_COMPILER == FC_COMPILER_MSVC + // EXPECT_EQ(String(U"-123456789012345678900").ToUint32(), UINT32_MAX); +#else + // EXPECT_EQ(String(U"-123456789012345678900").ToUint32(), (~UINT32_MAX) + 1); +#endif + EXPECT_EQ(String(U"123456789012345678900").ToInt64(), INT64_MAX); + EXPECT_EQ(String(U"-123456789012345678900").ToInt64(), INT64_MIN); + EXPECT_EQ(String(U"123456789012345678900").ToUint64(), UINT64_MAX); +#if FC_PLATFORM == FC_PLATFORM_ANDROID || FC_COMPILER == FC_COMPILER_MSVC + // EXPECT_EQ(String(U"-123456789012345678900").ToUint64(), UINT64_MAX); +#else + // EXPECT_EQ(String(U"-123456789012345678900").ToUint64(), (~UINT64_MAX) + 1); +#endif + EXPECT_EQ(String(U"0xabcd").ToUint32(16), 0xabcdU); + EXPECT_EQ(String(U"-0.345").ToDouble(), -0.345); + + EXPECT_EQ(String(U"abcd/dede/dcdc").DirectoryWithoutFilename(), String(U"abcd/dede/")); + EXPECT_EQ(String(U"abcd/dede/").DirectoryWithoutFilename(), String(U"abcd/dede/")); + EXPECT_EQ(String(U"/abcd/dede/dcdc").DirectoryWithoutFilename(), String(U"/abcd/dede/")); + EXPECT_EQ(String(U"abcddededcdc").DirectoryWithoutFilename(), String(U"")); + EXPECT_EQ(String(U"abcd/dede/dcdc").FilenameWithoutDirectory(), String(U"dcdc")); + EXPECT_EQ(String(U"abcd/dede/").FilenameWithoutDirectory(), String(U"")); + EXPECT_EQ(String(U"/abcd/dede/dcdc").FilenameWithoutDirectory(), String(U"dcdc")); + EXPECT_EQ(String(U"abcddededcdc").FilenameWithoutDirectory(), String(U"abcddededcdc")); + + EXPECT_EQ(String(U"abcdef").RemoveLast(0), String(U"abcdef")); + EXPECT_EQ(String(U"abcdef").RemoveLast(1), String(U"abcde")); + EXPECT_EQ(String(U"abcdef").RemoveLast(6), String(U"")); + EXPECT_EQ(String(U"abcdef").RemoveLast(7), String(U"")); + EXPECT_EQ(String(U"abcdef").RemoveFirst(0), String(U"abcdef")); + EXPECT_EQ(String(U"abcdef").RemoveFirst(1), String(U"bcdef")); + EXPECT_EQ(String(U"abcdef").RemoveFirst(6), String(U"")); + EXPECT_EQ(String(U"abcdef").RemoveFirst(7), String(U"")); + + EXPECT_EQ(String(U"abcd.ext").FilenameWithoutExtension(), String(U"abcd")); + EXPECT_EQ(String(U"abcd").FilenameWithoutExtension(), String(U"abcd")); + EXPECT_EQ(String(U"abcd.").FilenameWithoutExtension(), String(U"abcd")); + EXPECT_EQ(String(U".ext").FilenameWithoutExtension(), String(U"")); + + EXPECT_EQ(String(U"abcd.ext").ExtensionWithoutFilename(), String(U".ext")); + EXPECT_EQ(String(U"abcd").ExtensionWithoutFilename(), String(U"")); + EXPECT_EQ(String(U"abcd.").ExtensionWithoutFilename(), String(U".")); + EXPECT_EQ(String(U".ext").ExtensionWithoutFilename(), String(U".ext")); + + EXPECT_TRUE(String(U"abcd").EqualAscii("abcd")); + EXPECT_TRUE(!String(U"abc").EqualAscii("abcd")); + EXPECT_TRUE(!String(U"abcd").EqualAscii("abc")); + + EXPECT_TRUE(Char::EqualAscii(String(U"abcd").GetDataConst(), "abcd")); + EXPECT_TRUE(!Char::EqualAscii(String(U"abc").GetDataConst(), "abcd")); + EXPECT_TRUE(!Char::EqualAscii(String(U"abcd").GetDataConst(), "abc")); + EXPECT_TRUE(Char::EqualAsciiN(String(U"abcd").GetDataConst(), "abcd", 4)); + EXPECT_TRUE(Char::EqualAsciiN(String(U"abcd").GetDataConst(), "abcd", 3)); + EXPECT_TRUE(!Char::EqualAsciiN(String(U"abc").GetDataConst(), "abcd", 4)); + EXPECT_TRUE(Char::EqualAsciiN(String(U"abc").GetDataConst(), "abcd", 3)); + EXPECT_TRUE(Char::EqualAsciiN(String(U"abcd").GetDataConst(), "abc", 3)); + + String s = U"test"; + String s2(s.GetDataConst() + 2); + EXPECT_EQ(s2, "st"); +} + +void test_list() +{ + StringList list = String(U" a b У р e ").Split(U" "); + + EXPECT_EQ(list[0], "a"); + EXPECT_EQ(list[1], "b"); + EXPECT_EQ(list[2], "У"); + EXPECT_EQ(list[3], "р"); + EXPECT_EQ(list[4], "e"); + + list = String(U",a,b,,У,р,e,").Split(U",", String::SplitType::WithEmptyParts); + + EXPECT_EQ(list[0], ""); + EXPECT_EQ(list[1], "a"); + EXPECT_EQ(list[2], "b"); + EXPECT_EQ(list[3], ""); + EXPECT_EQ(list[4], "У"); + EXPECT_EQ(list[5], "р"); + EXPECT_EQ(list[6], "e"); + EXPECT_EQ(list[7], ""); + + list = String(U"qaQbqqУQрqeQ").Split(U"q", String::SplitType::SplitNoEmptyParts, String::Case::Insensitive); + + EXPECT_EQ(list[0], "a"); + EXPECT_EQ(list[1], "b"); + EXPECT_EQ(list[2], "У"); + EXPECT_EQ(list[3], "р"); + EXPECT_EQ(list[4], "e"); + + list = String(U"qaQbqqУQрqeQ").Split(U"q", String::SplitType::SplitNoEmptyParts, String::Case::Sensitive); + + EXPECT_EQ(list[0], "aQb"); + EXPECT_EQ(list[1], "УQр"); + EXPECT_EQ(list[2], "eQ"); + + StringList list2 = list; + + list[0] = String::FromUtf8("a"); + list[1] = U"b"; + list[2] = U"c"; + + EXPECT_EQ(list2[0], "aQb"); + EXPECT_EQ(list2[1], "УQр"); + EXPECT_EQ(list2[2], "eQ"); + + EXPECT_TRUE(list.Contains(U"b")); + EXPECT_TRUE(!list.Contains(U"B")); + EXPECT_TRUE(list.Contains(U"B", String::Case::Insensitive)); + + EXPECT_TRUE(String(U"fdabn").ContainsAnyStr(list)); + EXPECT_TRUE(!String(U"Cfdabn").ContainsAllStr(list)); + EXPECT_TRUE(String(U"cfdabn").ContainsAllStr(list)); + EXPECT_TRUE(String(U"Cfdabn").ContainsAllStr(list, String::Case::Insensitive)); + + EXPECT_EQ(list.Concat(U", "), U"a, b, c"); + + auto chars = list.Concat(U""); + + EXPECT_TRUE(String(U"fdabn").ContainsAnyChar(chars)); + EXPECT_TRUE(!String(U"Cfdabn").ContainsAllChar(chars)); + EXPECT_TRUE(String(U"cfdabn").ContainsAllChar(chars)); + EXPECT_TRUE(String(U"Cfdabn").ContainsAllChar(chars, String::Case::Insensitive)); + + StringList l1 = String(U"a b c d e f").Split(U" "); + StringList l2 = String(U"a b c d e f").Split(U" "); + EXPECT_TRUE(l1.Equal(l2)); + EXPECT_TRUE(l1 == l2); + l1.Add(U"Q"); + EXPECT_TRUE(!l1.Equal(l2)); + EXPECT_TRUE(l1 != l2); + l2.Add(U"q"); + EXPECT_TRUE(!l1.Equal(l2)); + EXPECT_TRUE(l1 != l2); + EXPECT_TRUE(l1.EqualNoCase(l2)); +} + +void test_list_2() +{ + StringList list = String(U" a b У р e ").Split(U' '); + + EXPECT_EQ(list[0], "a"); + EXPECT_EQ(list[1], "b"); + EXPECT_EQ(list[2], "У"); + EXPECT_EQ(list[3], "р"); + EXPECT_EQ(list[4], "e"); + + list = String(U",a,b,,У,р,e,").Split(U',', String::SplitType::WithEmptyParts); + + EXPECT_EQ(list[0], ""); + EXPECT_EQ(list[1], "a"); + EXPECT_EQ(list[2], "b"); + EXPECT_EQ(list[3], ""); + EXPECT_EQ(list[4], "У"); + EXPECT_EQ(list[5], "р"); + EXPECT_EQ(list[6], "e"); + EXPECT_EQ(list[7], ""); + + list = String(U"qaQbqqУQрqeQ").Split(U'q', String::SplitType::SplitNoEmptyParts, String::Case::Insensitive); + + EXPECT_EQ(list[0], "a"); + EXPECT_EQ(list[1], "b"); + EXPECT_EQ(list[2], "У"); + EXPECT_EQ(list[3], "р"); + EXPECT_EQ(list[4], "e"); + + list = String(U"qaQbqqУQрqeQ").Split(U'q', String::SplitType::SplitNoEmptyParts, String::Case::Sensitive); + + EXPECT_EQ(list[0], "aQb"); + EXPECT_EQ(list[1], "УQр"); + EXPECT_EQ(list[2], "eQ"); + + StringList list2 = list; + + list[0] = String::FromUtf8("a"); + list[1] = U"b"; + list[2] = U"c"; + + EXPECT_EQ(list2[0], "aQb"); + EXPECT_EQ(list2[1], "УQр"); + EXPECT_EQ(list2[2], "eQ"); + + EXPECT_TRUE(list.Contains(U"b")); + EXPECT_TRUE(!list.Contains(U"B")); + EXPECT_TRUE(list.Contains(U"B", String::Case::Insensitive)); + + EXPECT_TRUE(String(U"fdabn").ContainsAnyStr(list)); + EXPECT_TRUE(!String(U"Cfdabn").ContainsAllStr(list)); + EXPECT_TRUE(String(U"cfdabn").ContainsAllStr(list)); + EXPECT_TRUE(String(U"Cfdabn").ContainsAllStr(list, String::Case::Insensitive)); + + EXPECT_EQ(list.Concat(U','), U"a,b,c"); + + auto chars = list.Concat(U""); + + EXPECT_TRUE(String(U"fdabn").ContainsAnyChar(chars)); + EXPECT_TRUE(!String(U"Cfdabn").ContainsAllChar(chars)); + EXPECT_TRUE(String(U"cfdabn").ContainsAllChar(chars)); + EXPECT_TRUE(String(U"Cfdabn").ContainsAllChar(chars, String::Case::Insensitive)); + + StringList l1 = String(U"a b c d e f").Split(U' '); + StringList l2 = String(U"a b c d e f").Split(U' '); + EXPECT_TRUE(l1.Equal(l2)); + EXPECT_TRUE(l1 == l2); + l1.Add(U"Q"); + EXPECT_TRUE(!l1.Equal(l2)); + EXPECT_TRUE(l1 != l2); + l2.Add(U"q"); + EXPECT_TRUE(!l1.Equal(l2)); + EXPECT_TRUE(l1 != l2); + EXPECT_TRUE(l1.EqualNoCase(l2)); +} + +static void test_printf() +{ + int8_t i8_1 = -1; + uint8_t u8_1 = i8_1; + int16_t i16_1 = -1; + uint16_t u16_1 = i16_1; + int32_t i32_1 = -1; + uint32_t u32_1 = i32_1; + int64_t i64_1 = -1; + uint64_t u64_1 = i64_1; + + int8_t i8_0 = 0; + uint8_t u8_0 = 0; + int16_t i16_0 = 0; + uint16_t u16_0 = 0; + int32_t i32_0 = 0; + uint32_t u32_0 = 0; + int64_t i64_0 = 0; + uint64_t u64_0 = 0; + + String s; + s.Printf("%" PRIi8 " %" PRIi8 " %" PRIi8 " %" PRIi8, i8_1, i8_0, i8_1, i8_0); + EXPECT_EQ(s, "-1 0 -1 0"); + s.Printf("%" PRId8 " %" PRId8 " %" PRId8 " %" PRId8, i8_1, i8_0, i8_1, i8_0); + EXPECT_EQ(s, "-1 0 -1 0"); + s.Printf("%" PRIu8 " %" PRIu8 " %" PRIu8 " %" PRIu8, u8_1, u8_0, u8_1, u8_0); + EXPECT_EQ(s, "255 0 255 0"); + s.Printf("%02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8, u8_1, u8_0, u8_1, u8_0); + EXPECT_EQ(s, "ff 00 ff 00"); + s.Printf("%02" PRIX8 " %02" PRIX8 " %02" PRIX8 " %02" PRIX8, u8_1, u8_0, u8_1, u8_0); + EXPECT_EQ(s, "FF 00 FF 00"); + + s.Printf("%" PRIi16 " %" PRIi16 " %" PRIi16 " %" PRIi16, i16_1, i16_0, i16_1, i16_0); + EXPECT_EQ(s, "-1 0 -1 0"); + s.Printf("%" PRId16 " %" PRId16 " %" PRId16 " %" PRId16, i16_1, i16_0, i16_1, i16_0); + EXPECT_EQ(s, "-1 0 -1 0"); + s.Printf("%" PRIu16 " %" PRIu16 " %" PRIu16 " %" PRIu16, u16_1, u16_0, u16_1, u16_0); + EXPECT_EQ(s, "65535 0 65535 0"); + s.Printf("%04" PRIx16 " %04" PRIx16 " %04" PRIx16 " %04" PRIx16, u16_1, u16_0, u16_1, u16_0); + EXPECT_EQ(s, "ffff 0000 ffff 0000"); + s.Printf("%04" PRIX16 " %04" PRIX16 " %04" PRIX16 " %04" PRIX16, u16_1, u16_0, u16_1, u16_0); + EXPECT_EQ(s, "FFFF 0000 FFFF 0000"); + + s.Printf("%" PRIi32 " %" PRIi32 " %" PRIi32 " %" PRIi32, i32_1, i32_0, i32_1, i32_0); + EXPECT_EQ(s, "-1 0 -1 0"); + s.Printf("%" PRId32 " %" PRId32 " %" PRId32 " %" PRId32, i32_1, i32_0, i32_1, i32_0); + EXPECT_EQ(s, "-1 0 -1 0"); + s.Printf("%" PRIu32 " %" PRIu32 " %" PRIu32 " %" PRIu32, u32_1, u32_0, u32_1, u32_0); + EXPECT_EQ(s, "4294967295 0 4294967295 0"); + s.Printf("%08" PRIx32 " %08" PRIx32 " %08" PRIx32 " %08" PRIx32, u32_1, u32_0, u32_1, u32_0); + EXPECT_EQ(s, "ffffffff 00000000 ffffffff 00000000"); + s.Printf("%08" PRIX32 " %08" PRIX32 " %08" PRIX32 " %08" PRIX32, u32_1, u32_0, u32_1, u32_0); + EXPECT_EQ(s, "FFFFFFFF 00000000 FFFFFFFF 00000000"); + + s.Printf("%" PRIi64 " %" PRIi64 " %" PRIi64 " %" PRIi64, i64_1, i64_0, i64_1, i64_0); + EXPECT_EQ(s, "-1 0 -1 0"); + s.Printf("%" PRId64 " %" PRId64 " %" PRId64 " %" PRId64, i64_1, i64_0, i64_1, i64_0); + EXPECT_EQ(s, "-1 0 -1 0"); + s.Printf("%" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64, u64_1, u64_0, u64_1, u64_0); + EXPECT_EQ(s, "18446744073709551615 0 18446744073709551615 0"); + s.Printf("%016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64, u64_1, u64_0, u64_1, u64_0); + EXPECT_EQ(s, "ffffffffffffffff 0000000000000000 ffffffffffffffff 0000000000000000"); + s.Printf("%016" PRIX64 " %016" PRIX64 " %016" PRIX64 " %016" PRIX64, u64_1, u64_0, u64_1, u64_0); + EXPECT_EQ(s, "FFFFFFFFFFFFFFFF 0000000000000000 FFFFFFFFFFFFFFFF 0000000000000000"); +} + +void test_hex() +{ + String s = U"00112233445566778899AABBCCDDEEFF"; + + // printf("%s\n", bin_to_hex(hex_to_bin(s)).C_Str()); + + EXPECT_EQ(s, String::HexFromBin(s.HexToBin())); + + s = U"24FC79CCBF0979E9371AC23C6D68DE36"; + + // printf("%s\n", bin_to_hex(hex_to_bin(s)).C_Str()); + + EXPECT_EQ(s, String::HexFromBin(s.HexToBin())); +} + +void text_utf16() +{ + String s(U"abcdЙцукен123"); + + EXPECT_EQ(String::FromUtf16(s.utf16_str().GetData()), s); +} + +void text_cp866() +{ + String s(U"abcdЙцукен123"); + + EXPECT_EQ(String::FromCp866(s.cp866_str().GetData()), s); +} + +void text_cp1251() +{ + String s(U"abcdЙцукен123"); + + EXPECT_EQ(String::FromCp1251(s.cp1251_str().GetData()), s); +} + +void test_cpp14() +{ + StringList s = {U"a", U"b", U"cd"}; + + EXPECT_EQ(s.Size(), 3U); + EXPECT_EQ(s.At(0), "a"); + EXPECT_EQ(s.At(1), "b"); + EXPECT_EQ(s.At(2), "cd"); + + String ss; + for (const auto& str: s) + { + ss += str; + } + + EXPECT_EQ(ss, U"abcd"); + + uint32_t int_c[] = {0xEE, 0xD0B0, 0x20FFF}; + String s1 = String::FromUtf8("î킰𠿿"); + String s2 = U"î킰𠿿"; + String s3 = String::FromUtf16(u"î킰𠿿"); + + int i = 0; + i = 0; + for (auto ch: s1) + { + EXPECT_EQ(ch, int_c[i++]); + } + i = 0; + for (auto& ch: s1) + { + EXPECT_EQ(ch, int_c[i++]); + } + i = 0; + for (const auto& ch: s1) + { + EXPECT_EQ(ch, int_c[i++]); + } + i = 0; + for (auto ch: s2) + { + EXPECT_EQ(ch, int_c[i++]); + } + i = 0; + for (auto& ch: s2) + { + EXPECT_EQ(ch, int_c[i++]); + } + i = 0; + for (const auto& ch: s2) + { + EXPECT_EQ(ch, int_c[i++]); + } + i = 0; + for (auto ch: s3) + { + EXPECT_EQ(ch, int_c[i++]); + } + i = 0; + for (auto& ch: s3) + { + EXPECT_EQ(ch, int_c[i++]); + } + i = 0; + for (const auto& ch: s3) + { + EXPECT_EQ(ch, int_c[i++]); + } + + EXPECT_EQ(s1, s2); + EXPECT_EQ(s2, s3); + + EXPECT_EQ(s1.Hash(), s2.Hash()); + EXPECT_EQ(s2.Hash(), s3.Hash()); + + for (auto& ch: s1) + { + ch++; + } + + EXPECT_NE(s1, s2); + EXPECT_NE(s1.Hash(), s2.Hash()); + + i = 0; + for (const auto& ch: s1) + { + EXPECT_EQ(ch, int_c[i++] + 1); + } +} + +void test_move() +{ + String str = U"123"; + StringList list; + EXPECT_TRUE(!str.IsInvalid()); + list.Add(std::move(str)); + EXPECT_TRUE(str.IsInvalid()); // NOLINT(hicpp-invalid-access-moved,bugprone-use-after-move) + str = U"234"; + EXPECT_TRUE(!str.IsInvalid()); + EXPECT_EQ(str, U"234"); + EXPECT_EQ(list, StringList({U"123"})); +} + +TEST(Core, CharString) +{ + UT_MEM_CHECK_INIT(); + + test_char(); + test(); + test_2(); + test_3(); + test_U(); + test_U_2(); + test_U_3(); + test_list(); + test_list_2(); + test_printf(); + test_hex(); + text_utf16(); + text_cp866(); + text_cp1251(); + test_cpp14(); + test_move(); + + UT_MEM_CHECK(); +} + +UT_END();