diff --git a/third-party/libyuv/third_party/libyuv/AUTHORS b/third-party/libyuv/third_party/libyuv/AUTHORS index 9686ac13eb..28c08956a8 100644 --- a/third-party/libyuv/third_party/libyuv/AUTHORS +++ b/third-party/libyuv/third_party/libyuv/AUTHORS @@ -2,3 +2,5 @@ # Name or Organization Google Inc. + +Ivan Pavlotskiy diff --git a/third-party/libyuv/third_party/libyuv/Android.bp b/third-party/libyuv/third_party/libyuv/Android.bp index e2ea47a038..361562870d 100644 --- a/third-party/libyuv/third_party/libyuv/Android.bp +++ b/third-party/libyuv/third_party/libyuv/Android.bp @@ -17,7 +17,6 @@ license { ], license_text: [ "LICENSE", - "LICENSE_THIRD_PARTY", "PATENTS", ], } @@ -35,7 +34,6 @@ cc_library { "source/compare.cc", "source/compare_common.cc", "source/compare_gcc.cc", - "source/compare_mmi.cc", "source/compare_msa.cc", "source/compare_neon.cc", "source/compare_neon64.cc", @@ -55,14 +53,12 @@ cc_library { "source/rotate_argb.cc", "source/rotate_common.cc", "source/rotate_gcc.cc", - "source/rotate_mmi.cc", "source/rotate_msa.cc", "source/rotate_neon.cc", "source/rotate_neon64.cc", "source/row_any.cc", "source/row_common.cc", "source/row_gcc.cc", - "source/row_mmi.cc", "source/row_msa.cc", "source/row_neon.cc", "source/row_neon64.cc", @@ -71,10 +67,10 @@ cc_library { "source/scale_argb.cc", "source/scale_common.cc", "source/scale_gcc.cc", - "source/scale_mmi.cc", "source/scale_msa.cc", "source/scale_neon.cc", "source/scale_neon64.cc", + "source/scale_rgb.cc", "source/scale_uv.cc", "source/video_common.cc", ], @@ -134,6 +130,7 @@ cc_test { "unit_test/rotate_argb_test.cc", "unit_test/rotate_test.cc", "unit_test/scale_argb_test.cc", + "unit_test/scale_rgb_test.cc", "unit_test/scale_test.cc", "unit_test/scale_uv_test.cc", "unit_test/unit_test.cc", diff --git a/third-party/libyuv/third_party/libyuv/Android.mk b/third-party/libyuv/third_party/libyuv/Android.mk index 2ceb49281b..45d9daa8e3 100644 --- a/third-party/libyuv/third_party/libyuv/Android.mk +++ b/third-party/libyuv/third_party/libyuv/Android.mk @@ -9,7 +9,6 @@ LOCAL_SRC_FILES := \ source/compare.cc \ source/compare_common.cc \ source/compare_gcc.cc \ - source/compare_mmi.cc \ source/compare_msa.cc \ source/compare_neon.cc \ source/compare_neon64.cc \ @@ -27,7 +26,6 @@ LOCAL_SRC_FILES := \ source/rotate_argb.cc \ source/rotate_common.cc \ source/rotate_gcc.cc \ - source/rotate_mmi.cc \ source/rotate_msa.cc \ source/rotate_neon.cc \ source/rotate_neon64.cc \ @@ -35,7 +33,6 @@ LOCAL_SRC_FILES := \ source/row_any.cc \ source/row_common.cc \ source/row_gcc.cc \ - source/row_mmi.cc \ source/row_msa.cc \ source/row_neon.cc \ source/row_neon64.cc \ @@ -45,10 +42,10 @@ LOCAL_SRC_FILES := \ source/scale_argb.cc \ source/scale_common.cc \ source/scale_gcc.cc \ - source/scale_mmi.cc \ source/scale_msa.cc \ source/scale_neon.cc \ source/scale_neon64.cc \ + source/scale_rgb.cc \ source/scale_uv.cc \ source/scale_win.cc \ source/video_common.cc @@ -101,6 +98,7 @@ LOCAL_SRC_FILES := \ unit_test/rotate_argb_test.cc \ unit_test/rotate_test.cc \ unit_test/scale_argb_test.cc \ + unit_test/scale_rgb_test.cc \ unit_test/scale_test.cc \ unit_test/scale_uv_test.cc \ unit_test/unit_test.cc \ diff --git a/third-party/libyuv/third_party/libyuv/BUILD.gn b/third-party/libyuv/third_party/libyuv/BUILD.gn index e1c7c1da4d..a72ff06558 100644 --- a/third-party/libyuv/third_party/libyuv/BUILD.gn +++ b/third-party/libyuv/third_party/libyuv/BUILD.gn @@ -27,6 +27,10 @@ config("libyuv_config") { if (is_android && current_cpu != "arm64") { ldflags = [ "-Wl,--dynamic-linker,/system/bin/linker" ] } + + if (!libyuv_use_neon) { + defines = [ "LIBYUV_DISABLE_NEON" ] + } } # This target is built when no specific target is specified on the command line. @@ -65,10 +69,6 @@ group("libyuv") { deps += [ ":libyuv_msa" ] } - if (libyuv_use_mmi) { - deps += [ ":libyuv_mmi" ] - } - if (!is_ios && !libyuv_disable_jpeg) { # Make sure that clients of libyuv link with libjpeg. This can't go in # libyuv_internal because in Windows x64 builds that will generate a clang @@ -98,6 +98,7 @@ static_library("libyuv_internal") { "include/libyuv/row.h", "include/libyuv/scale.h", "include/libyuv/scale_argb.h", + "include/libyuv/scale_rgb.h", "include/libyuv/scale_row.h", "include/libyuv/scale_uv.h", "include/libyuv/version.h", @@ -134,6 +135,7 @@ static_library("libyuv_internal") { "source/scale_argb.cc", "source/scale_common.cc", "source/scale_gcc.cc", + "source/scale_rgb.cc", "source/scale_uv.cc", "source/scale_win.cc", "source/video_common.cc", @@ -174,9 +176,6 @@ static_library("libyuv_internal") { "-ffp-contract=fast", # Enable fma vectorization for NEON. ] } - if (!libyuv_use_mmi) { - defines += [ "LIBYUV_DISABLE_MMI" ] - } } if (libyuv_use_neon) { @@ -230,22 +229,6 @@ if (libyuv_use_msa) { } } -if (libyuv_use_mmi) { - static_library("libyuv_mmi") { - sources = [ - # MMI Source Files - "source/compare_mmi.cc", - "source/rotate_mmi.cc", - "source/row_mmi.cc", - "source/scale_mmi.cc", - ] - - deps = [ ":libyuv_internal" ] - - public_configs = [ ":libyuv_config" ] - } -} - if (libyuv_include_tests) { config("libyuv_unittest_warnings_config") { if (!is_win) { @@ -281,6 +264,7 @@ if (libyuv_include_tests) { "unit_test/rotate_argb_test.cc", "unit_test/rotate_test.cc", "unit_test/scale_argb_test.cc", + "unit_test/scale_rgb_test.cc", "unit_test/scale_test.cc", "unit_test/scale_uv_test.cc", "unit_test/unit_test.cc", diff --git a/third-party/libyuv/third_party/libyuv/CM_linux_packages.cmake b/third-party/libyuv/third_party/libyuv/CM_linux_packages.cmake index 5f676f8998..a073edfaec 100644 --- a/third-party/libyuv/third_party/libyuv/CM_linux_packages.cmake +++ b/third-party/libyuv/third_party/libyuv/CM_linux_packages.cmake @@ -8,7 +8,7 @@ SET ( YUV_VER_MAJOR 0 ) SET ( YUV_VER_MINOR 0 ) SET ( YUV_VER_PATCH ${YUV_VERSION_NUMBER} ) SET ( YUV_VERSION ${YUV_VER_MAJOR}.${YUV_VER_MINOR}.${YUV_VER_PATCH} ) -MESSAGE ( "Building ver.: ${YUV_VERSION}" ) +MESSAGE ( VERBOSE "Building ver.: ${YUV_VERSION}" ) # is this a 32-bit or 64-bit build? IF ( CMAKE_SIZEOF_VOID_P EQUAL 8 ) @@ -45,7 +45,7 @@ ELSE () SET ( YUV_SYSTEM_NAME "amd-${YUV_BIT_SIZE}" ) ENDIF () ENDIF () -MESSAGE ( "Packaging for: ${YUV_SYSTEM_NAME}" ) +MESSAGE ( VERBOSE "Packaging for: ${YUV_SYSTEM_NAME}" ) # define all the variables needed by CPack to create .deb and .rpm packages SET ( CPACK_PACKAGE_VENDOR "Frank Barchard" ) diff --git a/third-party/libyuv/third_party/libyuv/CMakeLists.txt b/third-party/libyuv/third_party/libyuv/CMakeLists.txt index 636531eee0..ea45a5ed09 100644 --- a/third-party/libyuv/third_party/libyuv/CMakeLists.txt +++ b/third-party/libyuv/third_party/libyuv/CMakeLists.txt @@ -3,7 +3,7 @@ # Run with -DTEST=ON to build unit tests PROJECT ( YUV C CXX ) # "C" is required even for C++ projects -CMAKE_MINIMUM_REQUIRED( VERSION 2.8 ) +CMAKE_MINIMUM_REQUIRED( VERSION 2.8.12 ) OPTION( TEST "Built unit tests" OFF ) SET ( ly_base_dir ${PROJECT_SOURCE_DIR} ) @@ -22,6 +22,10 @@ LIST ( SORT ly_unittest_sources ) INCLUDE_DIRECTORIES( BEFORE ${ly_inc_dir} ) +if(MSVC) + ADD_DEFINITIONS ( -D_CRT_SECURE_NO_WARNINGS ) +endif() + # this creates the static library (.a) ADD_LIBRARY ( ${ly_lib_static} STATIC ${ly_source_files} ) @@ -29,13 +33,19 @@ ADD_LIBRARY ( ${ly_lib_static} STATIC ${ly_source_files} ) ADD_LIBRARY ( ${ly_lib_shared} SHARED ${ly_source_files} ) SET_TARGET_PROPERTIES ( ${ly_lib_shared} PROPERTIES OUTPUT_NAME "${ly_lib_name}" ) SET_TARGET_PROPERTIES ( ${ly_lib_shared} PROPERTIES PREFIX "lib" ) +if(WIN32) + SET_TARGET_PROPERTIES ( ${ly_lib_shared} PROPERTIES IMPORT_PREFIX "lib" ) +endif() # this creates the conversion tool ADD_EXECUTABLE ( yuvconvert ${ly_base_dir}/util/yuvconvert.cc ) TARGET_LINK_LIBRARIES ( yuvconvert ${ly_lib_static} ) +# this creates the yuvconstants tool +ADD_EXECUTABLE ( yuvconstants ${ly_base_dir}/util/yuvconstants.c ) +TARGET_LINK_LIBRARIES ( yuvconstants ${ly_lib_static} ) -INCLUDE ( FindJPEG ) +find_package ( JPEG ) if (JPEG_FOUND) include_directories( ${JPEG_INCLUDE_DIR} ) target_link_libraries( yuvconvert ${JPEG_LIBRARY} ) @@ -71,6 +81,12 @@ if(TEST) if(NACL AND NACL_LIBC STREQUAL "newlib") target_link_libraries(libyuv_unittest glibc-compat) endif() + + find_library(GFLAGS_LIBRARY gflags) + if(NOT GFLAGS_LIBRARY STREQUAL "GFLAGS_LIBRARY-NOTFOUND") + target_link_libraries(libyuv_unittest gflags) + add_definitions(-DLIBYUV_USE_GFLAGS) + endif() endif() diff --git a/third-party/libyuv/third_party/libyuv/DEPS b/third-party/libyuv/third_party/libyuv/DEPS index b4bf027833..b33d5fa8d7 100644 --- a/third-party/libyuv/third_party/libyuv/DEPS +++ b/third-party/libyuv/third_party/libyuv/DEPS @@ -5,8 +5,13 @@ gclient_gn_args = [ vars = { 'chromium_git': 'https://chromium.googlesource.com', - 'chromium_revision': 'eaac4f14d951eb92181830ed7c346d3ad3ebe7a5', + 'chromium_revision': '1c174f8519b2926ff3e621467b6aa282b4934f4a', 'gn_version': 'git_revision:6f13aaac55a977e1948910942675c69f2b4f7a94', + # ninja CIPD package version. + # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja + 'ninja_version': 'version:2@1.8.2.chromium.3', + # reclient CIPD package version + 'reclient_version': 're_client_version:0.81.1.0853992-gomaip', # Keep the Chromium default of generating location tags. 'generate_location_tags': True, @@ -14,13 +19,13 @@ vars = { deps = { 'src/build': - Var('chromium_git') + '/chromium/src/build' + '@' + 'fd86d60f33cbc794537c4da2ef7e298d7f81138e', + Var('chromium_git') + '/chromium/src/build' + '@' + '18e9d3c3adadf2489507e4e62afffafa46717d26', 'src/buildtools': - Var('chromium_git') + '/chromium/src/buildtools' + '@' + '37dc929ecb351687006a61744b116cda601753d7', + Var('chromium_git') + '/chromium/src/buildtools' + '@' + '33b52eafd539278600d34cd9ba23550d28c933d2', 'src/testing': - Var('chromium_git') + '/chromium/src/testing' + '@' + 'c4bd9205eeb6037d567de781d736ab81ff63ecee', + Var('chromium_git') + '/chromium/src/testing' + '@' + 'aedf4723b9fcaf5a76164085f4a8e9797eee4bee', 'src/third_party': - Var('chromium_git') + '/chromium/src/third_party' + '@' + 'f7d9d7e9dd45109820780e5bfbc3e6f0892d56d7', + Var('chromium_git') + '/chromium/src/third_party' + '@' + 'd6591989fa347099fd4c7d47ba8bf6ce900b4f8e', 'src/buildtools/linux64': { 'packages': [ @@ -53,42 +58,50 @@ deps = { 'condition': 'checkout_win', }, + 'src/buildtools/reclient': { + 'packages': [ + { + 'package': 'infra/rbe/client/${{platform}}', + 'version': Var('reclient_version'), + } + ], + 'dep_type': 'cipd', + }, + 'src/buildtools/clang_format/script': - Var('chromium_git') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94', + Var('chromium_git') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7', 'src/buildtools/third_party/libc++/trunk': - Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7', + Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'fc6bbc5eb039769b5ed2de84444a3c6f9b45a598', 'src/buildtools/third_party/libc++abi/trunk': - Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '7d5c92f6cfb1ddb73158233a194bb568c5b13554', + Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '8dd405113a4f3694e910b79785dd7fb7535a888a', 'src/buildtools/third_party/libunwind/trunk': - Var('chromium_git') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'd7b11d7989774617bd7df93af95734faac8c0b2c', + Var('chromium_git') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'aabcd8753678f1536e15eb6385a948470debdae4', 'src/third_party/catapult': - Var('chromium_git') + '/catapult.git' + '@' + '5cb305306ad74c3b68e432ee221a1943dd79b64d', + Var('chromium_git') + '/catapult.git' + '@' + '3ffa6b222803f54188a7b249383b2f092a24d19a', 'src/third_party/colorama/src': Var('chromium_git') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8', 'src/third_party/depot_tools': - Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'a806594b95a39141fdbf1f359087a44ffb2deaaf', + Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'b52683fa2e74087464d32a1a9c76bf1b5275e4fe', 'src/third_party/freetype/src': - Var('chromium_git') + '/chromium/src/third_party/freetype2.git' + '@' + '86b9c9347f99174f4fea3e9deca5800e57a987f2', + Var('chromium_git') + '/chromium/src/third_party/freetype2.git' + '@' + 'dea2e6358b2f963008d447d27564dd79890b61f0', 'src/third_party/googletest/src': - Var('chromium_git') + '/external/github.com/google/googletest.git' + '@' + '4ec4cd23f486bf70efcc5d2caa40f24368f752e3', + Var('chromium_git') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07', 'src/third_party/harfbuzz-ng/src': - Var('chromium_git') + '/external/github.com/harfbuzz/harfbuzz.git' + '@' + 'cc9bb294919e846ef8a0731b5e9f304f95ef3bb8', + Var('chromium_git') + '/external/github.com/harfbuzz/harfbuzz.git' + '@' + '56c467093598ec559a7148b61e112e9de52b7076', 'src/third_party/libjpeg_turbo': - Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + 'ad8b3b0f84baf155f3bde5626c3bf9d20535bcae', + Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + 'ed683925e4897a84b3bffc5c1414c85b97a129a3', 'src/third_party/nasm': - Var('chromium_git') + '/chromium/deps/nasm.git' + '@' + 'e9be5fd6d723a435ca2da162f9e0ffcb688747c1', + Var('chromium_git') + '/chromium/deps/nasm.git' + '@' + '0873b2bae6a5388a1c55deac8456e3c60a47ca08', 'src/tools': - Var('chromium_git') + '/chromium/src/tools' + '@' + '4f73c6f51dc2198347630219417cbf21a7064bdd', - 'src/tools/swarming_client': - Var('chromium_git') + '/infra/luci/client-py.git' + '@' + 'a32a1607f6093d338f756c7e7c7b4333b0c50c9c', + Var('chromium_git') + '/chromium/src/tools' + '@' + 'a185bbc6c077438a59a89a97c6c6ae30895e976c', # libyuv-only dependencies (not present in Chromium). 'src/third_party/gtest-parallel': Var('chromium_git') + '/external/webrtc/deps/third_party/gtest-parallel' + '@' + '1dad0e9f6d82ff994130b529d7d814b40eb32b0e', 'src/third_party/lss': { - 'url': Var('chromium_git') + '/linux-syscall-support.git' + '@' + '92a65a8f5d705d1928874420c8d0d15bde8c89e5', + 'url': Var('chromium_git') + '/linux-syscall-support.git' + '@' + 'ce877209e11aa69dcfffbd53ef90ea1d07136521', 'condition': 'checkout_android or checkout_linux', }, @@ -104,13 +117,13 @@ deps = { 'dep_type': 'cipd', }, 'src/third_party/auto/src': { - 'url': Var('chromium_git') + '/external/github.com/google/auto.git' + '@' + '00cb81ed0959a55eb671e89768934094ca0e5e6f', + 'url': Var('chromium_git') + '/external/github.com/google/auto.git' + '@' + '3659a0e6436d3acfeda04e0bd1df3603f1e7ffac', 'condition': 'checkout_android', }, 'src/third_party/boringssl/src': - 'https://boringssl.googlesource.com/boringssl.git' + '@' + 'a10017c548b0805eb98e7847c37370dbd37cd8d6', + 'https://boringssl.googlesource.com/boringssl.git' + '@' + '1ee71185a2322dc354bee5e5a0abfb1810a27dc6', 'src/base': { - 'url': Var('chromium_git') + '/chromium/src/base' + '@' + '4045370905def8e415021737f13e02ed6444a45c', + 'url': Var('chromium_git') + '/chromium/src/base' + '@' + '077682171b88d0aa0cb77a8e1cd4d959f58a20a3', 'condition': 'checkout_android', }, 'src/third_party/bazel': { @@ -134,14 +147,14 @@ deps = { 'dep_type': 'cipd', }, 'src/third_party/android_ndk': { - 'url': Var('chromium_git') + '/android_ndk.git' + '@' + '401019bf85744311b26c88ced255cd53401af8b7', + 'url': Var('chromium_git') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d', 'condition': 'checkout_android', }, 'src/third_party/androidx': { 'packages': [ { 'package': 'chromium/third_party/androidx', - 'version': 'YiuL0FMMTU_K_n0aBAT3GBA4dMfL8JHhn6dkcz5SFgAC', + 'version': 'DRqe-W5-XlO2ZySLCwsYKy7iqIaQ77O-Y91txXGY_hMC', }, ], 'condition': 'checkout_android', @@ -160,16 +173,12 @@ deps = { 'src/third_party/android_sdk/public': { 'packages': [ { - 'package': 'chromium/third_party/android_sdk/public/build-tools/30.0.1', - 'version': '8LZujEmLjSh0g3JciDA3cslSptxKs9HOa_iUPXkOeYQC', + 'package': 'chromium/third_party/android_sdk/public/build-tools/33.0.0', + 'version': '-VRKr36Uw8L_iFqqo9nevIBgNMggND5iWxjidyjnCgsC', }, { 'package': 'chromium/third_party/android_sdk/public/emulator', - 'version': 'A4EvXZUIuQho0QRDJopMUpgyp6NA3aiDQjGKPUKbowMC', - }, - { - 'package': 'chromium/third_party/android_sdk/public/extras', - 'version': 'ppQ4TnqDvBHQ3lXx5KPq97egzF5X2FFyOrVHkGmiTMQC', + 'version': '9lGp8nTUCRRWGMnI_96HcKfzjnxEJKUcfvfwmA3wXNkC', }, { 'package': 'chromium/third_party/android_sdk/public/patcher', @@ -177,19 +186,19 @@ deps = { }, { 'package': 'chromium/third_party/android_sdk/public/platform-tools', - 'version': '8tF0AOj7Dwlv4j7_nfkhxWB0jzrvWWYjEIpirt8FIWYC', + 'version': 'RSI3iwryh7URLGRgJHsCvUxj092woTPnKt4pwFcJ6L8C', }, { - 'package': 'chromium/third_party/android_sdk/public/platforms/android-30', - 'version': 'YMUu9EHNZ__2Xcxl-KsaSf-dI5TMt_P62IseUVsxktMC', + 'package': 'chromium/third_party/android_sdk/public/platforms/android-33', + 'version': 'eo5KvW6UVor92LwZai8Zulc624BQZoCu-yn7wa1z_YcC', }, { - 'package': 'chromium/third_party/android_sdk/public/sources/android-29', - 'version': '4gxhM8E62bvZpQs7Q3d0DinQaW0RLCIefhXrQBFkNy8C', + 'package': 'chromium/third_party/android_sdk/public/sources/android-31', + 'version': '_a_BcnANjPYw5mSKlNHa7GFY8yc1kdqj2rmQgac7yUcC', }, { 'package': 'chromium/third_party/android_sdk/public/cmdline-tools', - 'version': 'V__2Ycej-H2-6AcXX5A3gi7sIk74SuN44PBm2uC_N1sC', + 'version': 'IPzAG-uU5zVMxohpg9-7-N0tQC1TCSW1VbrBFw7Ld04C', }, ], 'condition': 'checkout_android', @@ -225,6 +234,16 @@ deps = { 'condition': 'checkout_android', 'dep_type': 'cipd', }, + 'src/third_party/byte_buddy/android_sdk_build_tools_25_0_2': { + 'packages': [ + { + 'package': 'chromium/third_party/android_sdk/public/build-tools', + 'version': 'kwIs2vdfTm93yEP8LG5aSnchN4BVEdVxbqQtF4XpPdkC', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, 'src/third_party/ced/src': { 'url': Var('chromium_git') + '/external/github.com/google/compact_enc_det.git' + '@' + 'ba412eaaacd3186085babcd901679a48863c7dd5', 'condition': 'checkout_android', @@ -269,7 +288,7 @@ deps = { }, 'src/third_party/icu': { - 'url': Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '2a822c5626ab1ed40366758e4740b4f0ea40237d', + 'url': Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'da07448619763d1cde255b361324242646f5b268', }, 'src/third_party/icu4j': { 'packages': [ @@ -314,9 +333,18 @@ deps = { 'condition': 'checkout_android', }, 'src/third_party/libunwindstack': { - 'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + 'b34a0059a648f179ef05da2c0927f564bdaea2b3', + 'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '4dbfa0e8c844c8e243b297bc185e54a99ff94f9e', 'condition': 'checkout_android', }, + 'src/third_party/ninja': { + 'packages': [ + { + 'package': 'infra/3pp/tools/ninja/${{platform}}', + 'version': Var('ninja_version'), + } + ], + 'dep_type': 'cipd', + }, 'src/third_party/mockito/src': { 'url': Var('chromium_git') + '/external/mockito/mockito.git' + '@' + '04a2a289a4222f80ad20717c25144981210d2eac', 'condition': 'checkout_android', @@ -345,7 +373,20 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/r8', - 'version': 'Nu_mvQJe34CotIXadFlA3w732CJ9EvQGuVs4udcZedAC', + 'version': 'szXK3tCGU7smsNs4r2mGqxme7d9KWLaOk0_ghbCJxUQC', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + # This duplication is intentional, so we avoid updating the r8.jar used by + # dexing unless necessary, since each update invalidates all incremental + # dexing and unnecessarily slows down all bots. + 'src/third_party/r8/d8': { + 'packages': [ + { + 'package': 'chromium/third_party/r8', + 'version': 'Qn31g4m2cofkyvGgm46Uzkzds5DKdNYrdPePwRkVnv4C', }, ], 'condition': 'checkout_android', @@ -399,20 +440,10 @@ deps = { 'url': Var('chromium_git') + '/chromium/third_party/ub-uiautomator.git' + '@' + '00270549ce3161ae72ceb24712618ea28b4f9434', 'condition': 'checkout_android', }, - 'src/third_party/xstream': { - 'packages': [ - { - 'package': 'chromium/third_party/xstream', - 'version': '4278b1b78b86ab7a1a29e64d5aec9a47a9aab0fe', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, # iOS deps: 'src/ios': { - 'url': Var('chromium_git') + '/chromium/src/ios' + '@' + '57422bee2ccad3d8b1f0f288845c86df024430d5', + 'url': Var('chromium_git') + '/chromium/src/ios' + '@' + '211070da56a62cf7d2f7c7a81be29b57294c4343', 'condition': 'checkout_ios' }, @@ -423,7 +454,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_core_common', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -434,7 +465,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_core_runtime', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -445,7 +476,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_common', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -456,7 +487,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_common_java8', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -467,7 +498,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_livedata', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -478,7 +509,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_livedata_core', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -489,7 +520,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_runtime', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -500,29 +531,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_viewmodel', - 'version': 'version:2@1.1.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/backport_util_concurrent_backport_util_concurrent': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/backport_util_concurrent_backport_util_concurrent', - 'version': 'version:2@3.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/classworlds_classworlds': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/classworlds_classworlds', - 'version': 'version:2@1.1-alpha-2.cr0', + 'version': 'version:2@1.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -533,7 +542,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_animated_vector_drawable', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -544,7 +553,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_appcompat_v7', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -555,7 +564,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_asynclayoutinflater', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -566,7 +575,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_cardview_v7', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -577,7 +586,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_collections', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -588,7 +597,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_coordinatorlayout', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -599,7 +608,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_cursoradapter', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -610,7 +619,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_customview', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -621,7 +630,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_design', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -632,7 +641,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_documentfile', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -643,7 +652,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_drawerlayout', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -654,7 +663,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_interpolator', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -665,7 +674,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_loader', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -676,7 +685,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_localbroadcastmanager', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -687,7 +696,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_multidex', - 'version': 'version:2@1.0.0.cr0', + 'version': 'version:2@1.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -698,7 +707,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_print', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -709,7 +718,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_recyclerview_v7', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -720,7 +729,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_slidingpanelayout', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -731,7 +740,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_annotations', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -742,7 +751,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_compat', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -753,7 +762,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_core_ui', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -764,7 +773,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_core_utils', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -775,7 +784,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_fragment', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -786,7 +795,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_media_compat', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -797,7 +806,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_v4', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -808,7 +817,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_vector_drawable', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -819,7 +828,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_swiperefreshlayout', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -830,7 +839,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_transition', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -841,7 +850,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_versionedparcelable', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -852,7 +861,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_viewpager', - 'version': 'version:2@28.0.0.cr0', + 'version': 'version:2@28.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -863,7 +872,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_tools_common', - 'version': 'version:2@30.0.0-alpha10.cr0', + 'version': 'version:2@30.2.0-beta01.cr1', }, ], 'condition': 'checkout_android', @@ -874,7 +883,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_tools_desugar_jdk_libs', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.5.cr1', }, ], 'condition': 'checkout_android', @@ -885,7 +894,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_tools_desugar_jdk_libs_configuration', - 'version': 'version:2@1.1.1.cr0', + 'version': 'version:2@1.1.5.cr1', }, ], 'condition': 'checkout_android', @@ -896,7 +905,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_tools_layoutlib_layoutlib_api', - 'version': 'version:2@30.0.0-alpha10.cr0', + 'version': 'version:2@30.2.0-beta01.cr1', }, ], 'condition': 'checkout_android', @@ -907,7 +916,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_tools_sdk_common', - 'version': 'version:2@30.0.0-alpha10.cr0', + 'version': 'version:2@30.2.0-beta01.cr1', }, ], 'condition': 'checkout_android', @@ -918,7 +927,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_github_ben_manes_caffeine_caffeine', - 'version': 'version:2@2.8.8.cr0', + 'version': 'version:2@2.8.8.cr1', }, ], 'condition': 'checkout_android', @@ -929,7 +938,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_github_kevinstern_software_and_algorithms', - 'version': 'version:2@1.0.cr0', + 'version': 'version:2@1.0.cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_android_apps_common_testing_accessibility_framework_accessibility_test_framework': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_android_apps_common_testing_accessibility_framework_accessibility_test_framework', + 'version': 'version:2@4.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -940,7 +960,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_datatransport_transport_api', - 'version': 'version:2@2.2.1.cr0', + 'version': 'version:2@2.2.1.cr1', }, ], 'condition': 'checkout_android', @@ -951,7 +971,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@20.1.0.cr1', }, ], 'condition': 'checkout_android', @@ -962,7 +982,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth_api_phone', - 'version': 'version:2@17.5.0.cr0', + 'version': 'version:2@18.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -973,7 +993,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth_base', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@18.0.2.cr1', }, ], 'condition': 'checkout_android', @@ -984,7 +1004,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_base', - 'version': 'version:2@17.5.0.cr0', + 'version': 'version:2@18.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -995,7 +1015,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement', - 'version': 'version:2@17.5.0.cr0', + 'version': 'version:2@18.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -1006,7 +1026,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_cast', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1017,7 +1037,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_cast_framework', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1028,7 +1048,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_clearcut', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1039,7 +1059,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_cloud_messaging', - 'version': 'version:2@16.0.0.cr0', + 'version': 'version:2@16.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1050,7 +1070,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_fido', - 'version': 'version:2@19.0.0-beta.cr0', + 'version': 'version:2@19.0.0-beta.cr1', }, ], 'condition': 'checkout_android', @@ -1061,7 +1081,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_flags', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1072,7 +1092,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_gcm', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1083,7 +1103,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_iid', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1094,7 +1114,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_instantapps', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@18.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -1105,7 +1125,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_location', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@19.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -1116,7 +1136,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_phenotype', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1127,7 +1147,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_places_placereport', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1138,7 +1158,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_stats', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1149,7 +1169,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks', - 'version': 'version:2@17.2.0.cr0', + 'version': 'version:2@18.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -1160,7 +1180,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_vision', - 'version': 'version:2@18.0.0.cr0', + 'version': 'version:2@20.1.3.cr1', }, ], 'condition': 'checkout_android', @@ -1171,7 +1191,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_vision_common', - 'version': 'version:2@18.0.0.cr0', + 'version': 'version:2@19.1.3.cr1', }, ], 'condition': 'checkout_android', @@ -1182,7 +1202,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_material_material', - 'version': 'version:2@1.4.0-rc01.cr0', + 'version': 'version:2@1.7.0-alpha02.cr1', }, ], 'condition': 'checkout_android', @@ -1193,7 +1213,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core', - 'version': 'version:2@1.10.0.cr0', + 'version': 'version:2@1.10.0.cr1', }, ], 'condition': 'checkout_android', @@ -1204,7 +1224,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_auto_auto_common', - 'version': 'version:2@0.10.cr0', + 'version': 'version:2@1.2.1.cr1', }, ], 'condition': 'checkout_android', @@ -1215,7 +1235,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_auto_service_auto_service', - 'version': 'version:2@1.0-rc6.cr0', + 'version': 'version:2@1.0-rc6.cr1', }, ], 'condition': 'checkout_android', @@ -1226,7 +1246,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_auto_service_auto_service_annotations', - 'version': 'version:2@1.0-rc6.cr0', + 'version': 'version:2@1.0-rc6.cr1', }, ], 'condition': 'checkout_android', @@ -1237,18 +1257,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_auto_value_auto_value_annotations', - 'version': 'version:2@1.7.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_google_code_findbugs_jformatstring': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_google_code_findbugs_jformatstring', - 'version': 'version:2@3.0.0.cr0', + 'version': 'version:2@1.9.cr1', }, ], 'condition': 'checkout_android', @@ -1259,7 +1268,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_code_findbugs_jsr305', - 'version': 'version:2@3.0.2.cr0', + 'version': 'version:2@3.0.2.cr1', }, ], 'condition': 'checkout_android', @@ -1270,7 +1279,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_code_gson_gson', - 'version': 'version:2@2.8.0.cr0', + 'version': 'version:2@2.8.0.cr1', }, ], 'condition': 'checkout_android', @@ -1281,7 +1290,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_dagger_dagger', - 'version': 'version:2@2.30.cr0', + 'version': 'version:2@2.30.cr1', }, ], 'condition': 'checkout_android', @@ -1292,7 +1301,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_dagger_dagger_compiler', - 'version': 'version:2@2.30.cr0', + 'version': 'version:2@2.30.cr1', }, ], 'condition': 'checkout_android', @@ -1303,7 +1312,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_dagger_dagger_producers', - 'version': 'version:2@2.30.cr0', + 'version': 'version:2@2.30.cr1', }, ], 'condition': 'checkout_android', @@ -1314,7 +1323,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_dagger_dagger_spi', - 'version': 'version:2@2.30.cr0', + 'version': 'version:2@2.30.cr1', }, ], 'condition': 'checkout_android', @@ -1325,7 +1334,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_annotation', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.11.0.cr1', }, ], 'condition': 'checkout_android', @@ -1336,7 +1345,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_annotations', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.14.0.cr1', }, ], 'condition': 'checkout_android', @@ -1347,7 +1356,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_check_api', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.11.0.cr1', }, ], 'condition': 'checkout_android', @@ -1358,7 +1367,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_core', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.11.0.cr1', }, ], 'condition': 'checkout_android', @@ -1369,7 +1378,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_type_annotations', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.11.0.cr1', }, ], 'condition': 'checkout_android', @@ -1380,7 +1389,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_javac', - 'version': 'version:2@9+181-r4173-1.cr0', + 'version': 'version:2@9+181-r4173-1.cr1', }, ], 'condition': 'checkout_android', @@ -1391,7 +1400,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_javac_shaded', - 'version': 'version:2@9-dev-r4023-3.cr0', + 'version': 'version:2@9-dev-r4023-3.cr1', }, ], 'condition': 'checkout_android', @@ -1402,7 +1411,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_annotations', - 'version': 'version:2@16.0.0.cr0', + 'version': 'version:2@16.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1413,7 +1422,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_common', - 'version': 'version:2@19.5.0.cr0', + 'version': 'version:2@19.5.0.cr1', }, ], 'condition': 'checkout_android', @@ -1424,7 +1433,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_components', - 'version': 'version:2@16.1.0.cr0', + 'version': 'version:2@16.1.0.cr1', }, ], 'condition': 'checkout_android', @@ -1435,7 +1444,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_encoders', - 'version': 'version:2@16.1.0.cr0', + 'version': 'version:2@16.1.0.cr1', }, ], 'condition': 'checkout_android', @@ -1446,7 +1455,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_encoders_json', - 'version': 'version:2@17.1.0.cr0', + 'version': 'version:2@17.1.0.cr1', }, ], 'condition': 'checkout_android', @@ -1457,7 +1466,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_iid', - 'version': 'version:2@21.0.1.cr0', + 'version': 'version:2@21.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -1468,7 +1477,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_iid_interop', - 'version': 'version:2@17.0.0.cr0', + 'version': 'version:2@17.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1479,7 +1488,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_installations', - 'version': 'version:2@16.3.5.cr0', + 'version': 'version:2@16.3.5.cr1', }, ], 'condition': 'checkout_android', @@ -1490,7 +1499,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_installations_interop', - 'version': 'version:2@16.0.1.cr0', + 'version': 'version:2@16.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -1501,7 +1510,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_measurement_connector', - 'version': 'version:2@18.0.0.cr0', + 'version': 'version:2@18.0.0.cr1', }, ], 'condition': 'checkout_android', @@ -1512,7 +1521,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_messaging', - 'version': 'version:2@21.0.1.cr0', + 'version': 'version:2@21.0.1.cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_flatbuffers_flatbuffers_java': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_flatbuffers_flatbuffers_java', + 'version': 'version:2@2.0.3.cr1', }, ], 'condition': 'checkout_android', @@ -1523,7 +1543,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_googlejavaformat_google_java_format', - 'version': 'version:2@1.5.cr0', + 'version': 'version:2@1.5.cr1', }, ], 'condition': 'checkout_android', @@ -1534,7 +1554,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_failureaccess', - 'version': 'version:2@1.0.1.cr0', + 'version': 'version:2@1.0.1.cr1', }, ], 'condition': 'checkout_android', @@ -1545,7 +1565,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava', - 'version': 'version:2@30.1-jre.cr0', + 'version': 'version:2@31.0.1-jre.cr1', }, ], 'condition': 'checkout_android', @@ -1556,7 +1576,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava_android', - 'version': 'version:2@30.1-android.cr0', + 'version': 'version:2@31.0-android.cr1', }, ], 'condition': 'checkout_android', @@ -1567,7 +1587,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_listenablefuture', - 'version': 'version:2@1.0.cr0', + 'version': 'version:2@1.0.cr1', }, ], 'condition': 'checkout_android', @@ -1578,7 +1598,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_j2objc_j2objc_annotations', - 'version': 'version:2@1.3.cr0', + 'version': 'version:2@1.3.cr1', }, ], 'condition': 'checkout_android', @@ -1589,7 +1609,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_java', - 'version': 'version:2@3.4.0.cr0', + 'version': 'version:2@3.19.2.cr1', }, ], 'condition': 'checkout_android', @@ -1600,7 +1620,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_javalite', - 'version': 'version:2@3.13.0.cr0', + 'version': 'version:2@3.19.3.cr1', }, ], 'condition': 'checkout_android', @@ -1611,7 +1631,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_googlecode_java_diff_utils_diffutils', - 'version': 'version:2@1.3.0.cr0', + 'version': 'version:2@1.3.0.cr1', }, ], 'condition': 'checkout_android', @@ -1622,7 +1642,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_squareup_javapoet', - 'version': 'version:2@1.13.0.cr0', + 'version': 'version:2@1.13.0.cr1', }, ], 'condition': 'checkout_android', @@ -1633,7 +1653,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_squareup_javawriter', - 'version': 'version:2@2.1.1.cr0', + 'version': 'version:2@2.1.1.cr1', }, ], 'condition': 'checkout_android', @@ -1644,7 +1664,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/io_github_java_diff_utils_java_diff_utils', - 'version': 'version:2@4.0.cr0', + 'version': 'version:2@4.0.cr1', }, ], 'condition': 'checkout_android', @@ -1655,7 +1675,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/javax_annotation_javax_annotation_api', - 'version': 'version:2@1.3.2.cr0', + 'version': 'version:2@1.3.2.cr1', }, ], 'condition': 'checkout_android', @@ -1666,7 +1686,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/javax_annotation_jsr250_api', - 'version': 'version:2@1.0.cr0', + 'version': 'version:2@1.0.cr1', }, ], 'condition': 'checkout_android', @@ -1677,29 +1697,29 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/javax_inject_javax_inject', - 'version': 'version:2@1.cr0', + 'version': 'version:2@1.cr1', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/nekohtml_nekohtml': { + 'src/third_party/android_deps/libs/net_bytebuddy_byte_buddy': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/nekohtml_nekohtml', - 'version': 'version:2@1.9.6.2.cr0', + 'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy', + 'version': 'version:2@1.12.13.cr1', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/nekohtml_xercesminimal': { + 'src/third_party/android_deps/libs/net_bytebuddy_byte_buddy_agent': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/nekohtml_xercesminimal', - 'version': 'version:2@1.9.6.2.cr0', + 'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy_agent', + 'version': 'version:2@1.12.13.cr1', }, ], 'condition': 'checkout_android', @@ -1710,7 +1730,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/net_ltgt_gradle_incap_incap', - 'version': 'version:2@0.2.cr0', + 'version': 'version:2@0.2.cr1', }, ], 'condition': 'checkout_android', @@ -1721,183 +1741,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/net_sf_kxml_kxml2', - 'version': 'version:2@2.3.0.cr0', + 'version': 'version:2@2.3.0.cr1', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_apache_ant_ant': { + 'src/third_party/android_deps/libs/org_bouncycastle_bcprov_jdk15on': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_apache_ant_ant', - 'version': 'version:2@1.8.0.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_ant_ant_launcher': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_ant_ant_launcher', - 'version': 'version:2@1.8.0.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_ant_tasks': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_ant_tasks', - 'version': 'version:2@2.1.3.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_artifact': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_artifact', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_artifact_manager': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_artifact_manager', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_error_diagnostics': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_error_diagnostics', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_model': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_model', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_plugin_registry': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_plugin_registry', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_profile': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_profile', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_project': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_project', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_repository_metadata': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_repository_metadata', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_maven_settings': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_settings', - 'version': 'version:2@2.2.1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_wagon_wagon_file': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_wagon_wagon_file', - 'version': 'version:2@1.0-beta-6.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_wagon_wagon_http_lightweight': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_wagon_wagon_http_lightweight', - 'version': 'version:2@1.0-beta-6.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_wagon_wagon_http_shared': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_wagon_wagon_http_shared', - 'version': 'version:2@1.0-beta-6.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_apache_maven_wagon_wagon_provider_api': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_wagon_wagon_provider_api', - 'version': 'version:2@1.0-beta-6.cr0', + 'package': 'chromium/third_party/android_deps/libs/org_bouncycastle_bcprov_jdk15on', + 'version': 'version:2@1.68.cr1', }, ], 'condition': 'checkout_android', @@ -1908,7 +1763,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_ccil_cowan_tagsoup_tagsoup', - 'version': 'version:2@1.2.1.cr0', + 'version': 'version:2@1.2.1.cr1', }, ], 'condition': 'checkout_android', @@ -1919,7 +1774,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_compat_qual', - 'version': 'version:2@2.5.5.cr0', + 'version': 'version:2@2.5.5.cr1', }, ], 'condition': 'checkout_android', @@ -1930,18 +1785,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_qual', - 'version': 'version:2@3.8.0.cr0', + 'version': 'version:2@3.25.0.cr1', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_checkerframework_dataflow_shaded': { + 'src/third_party/android_deps/libs/org_checkerframework_dataflow_errorprone': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_dataflow_shaded', - 'version': 'version:2@3.11.0.cr0', + 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_dataflow_errorprone', + 'version': 'version:2@3.15.0.cr1', }, ], 'condition': 'checkout_android', @@ -1952,40 +1807,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_codehaus_mojo_animal_sniffer_annotations', - 'version': 'version:2@1.17.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_codehaus_plexus_plexus_container_default': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_codehaus_plexus_plexus_container_default', - 'version': 'version:2@1.0-alpha-9-stable-1.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_codehaus_plexus_plexus_interpolation': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_codehaus_plexus_plexus_interpolation', - 'version': 'version:2@1.11.cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/org_codehaus_plexus_plexus_utils': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/org_codehaus_plexus_plexus_utils', - 'version': 'version:2@1.5.15.cr0', + 'version': 'version:2@1.17.cr1', }, ], 'condition': 'checkout_android', @@ -1996,7 +1818,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_eclipse_jgit_org_eclipse_jgit', - 'version': 'version:2@4.4.1.201607150455-r.cr0', + 'version': 'version:2@4.4.1.201607150455-r.cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_hamcrest_hamcrest': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_hamcrest_hamcrest', + 'version': 'version:2@2.2.cr1', }, ], 'condition': 'checkout_android', @@ -2007,7 +1840,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_annotations', - 'version': 'version:2@13.0.cr0', + 'version': 'version:2@13.0.cr1', }, ], 'condition': 'checkout_android', @@ -2018,7 +1851,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib', - 'version': 'version:2@1.5.10.cr0', + 'version': 'version:2@1.7.10.cr1', }, ], 'condition': 'checkout_android', @@ -2029,7 +1862,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common', - 'version': 'version:2@1.5.10.cr0', + 'version': 'version:2@1.7.10.cr1', }, ], 'condition': 'checkout_android', @@ -2040,7 +1873,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7', - 'version': 'version:2@1.5.0.cr0', + 'version': 'version:2@1.6.20.cr1', }, ], 'condition': 'checkout_android', @@ -2051,7 +1884,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk8', - 'version': 'version:2@1.5.0.cr0', + 'version': 'version:2@1.6.20.cr1', }, ], 'condition': 'checkout_android', @@ -2062,7 +1895,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_android', - 'version': 'version:2@1.5.0.cr0', + 'version': 'version:2@1.6.1.cr1', }, ], 'condition': 'checkout_android', @@ -2073,7 +1906,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_core_jvm', - 'version': 'version:2@1.5.0.cr0', + 'version': 'version:2@1.6.1.cr1', }, ], 'condition': 'checkout_android', @@ -2084,7 +1917,40 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_metadata_jvm', - 'version': 'version:2@0.1.0.cr0', + 'version': 'version:2@0.1.0.cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_jsoup_jsoup': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_jsoup_jsoup', + 'version': 'version:2@1.15.1.cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_mockito_mockito_core': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_core', + 'version': 'version:2@4.7.0.cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_objenesis_objenesis': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_objenesis_objenesis', + 'version': 'version:2@3.2.cr1', }, ], 'condition': 'checkout_android', @@ -2095,7 +1961,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm', - 'version': 'version:2@7.0.cr0', + 'version': 'version:2@9.2.cr1', }, ], 'condition': 'checkout_android', @@ -2106,7 +1972,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_analysis', - 'version': 'version:2@7.0.cr0', + 'version': 'version:2@9.2.cr1', }, ], 'condition': 'checkout_android', @@ -2117,7 +1983,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_commons', - 'version': 'version:2@7.0.cr0', + 'version': 'version:2@9.2.cr1', }, ], 'condition': 'checkout_android', @@ -2128,7 +1994,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_tree', - 'version': 'version:2@7.0.cr0', + 'version': 'version:2@9.2.cr1', }, ], 'condition': 'checkout_android', @@ -2139,7 +2005,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_util', - 'version': 'version:2@7.0.cr0', + 'version': 'version:2@9.2.cr1', }, ], 'condition': 'checkout_android', @@ -2150,7 +2016,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_pcollections_pcollections', - 'version': 'version:2@2.1.2.cr0', + 'version': 'version:2@3.1.4.cr1', }, ], 'condition': 'checkout_android', @@ -2161,7 +2027,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_annotations', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2172,7 +2038,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_junit', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_robolectric_nativeruntime': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_robolectric_nativeruntime', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2183,7 +2060,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_pluginapi', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2194,7 +2071,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_plugins_maven_dependency_resolver', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2205,7 +2082,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_resources', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2216,7 +2093,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_robolectric', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2227,7 +2104,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_sandbox', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2238,7 +2115,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadowapi', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2249,7 +2126,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadows_framework', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2260,7 +2137,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadows_playservices', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2271,7 +2148,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_utils', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2282,7 +2159,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_utils_reflector', - 'version': 'version:2@4.3.1.cr0', + 'version': 'version:2@4.8.1.cr1', }, ], 'condition': 'checkout_android', @@ -2298,7 +2175,7 @@ pre_deps_hooks = [ # TODO(kjellander): Remove this in March 2017. 'name': 'cleanup_links', 'pattern': '.', - 'action': ['python', 'src/cleanup_links.py'], + 'action': ['python3', 'src/cleanup_links.py'], }, ] @@ -2310,7 +2187,7 @@ hooks = [ 'name': 'landmines', 'pattern': '.', 'action': [ - 'python', + 'python3', 'src/build/landmines.py', '--landmine-scripts', 'src/tools_libyuv/get_landmines.py', @@ -2323,73 +2200,72 @@ hooks = [ 'name': 'sysroot_arm', 'pattern': '.', 'condition': 'checkout_linux and checkout_arm', - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=arm'], }, { 'name': 'sysroot_arm64', 'pattern': '.', 'condition': 'checkout_linux and checkout_arm64', - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=arm64'], }, { 'name': 'sysroot_x86', 'pattern': '.', 'condition': 'checkout_linux and (checkout_x86 or checkout_x64)', - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=x86'], }, { 'name': 'sysroot_mips', 'pattern': '.', 'condition': 'checkout_linux and checkout_mips', - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=mips'], }, { 'name': 'sysroot_x64', 'pattern': '.', 'condition': 'checkout_linux and checkout_x64', - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=x64'], }, { # Update the Windows toolchain if necessary. 'name': 'win_toolchain', 'pattern': '.', - 'action': ['python', 'src/build/vs_toolchain.py', 'update'], + 'action': ['python3', 'src/build/vs_toolchain.py', 'update'], }, { # Update the Mac toolchain if necessary. 'name': 'mac_toolchain', 'pattern': '.', - 'action': ['python', 'src/build/mac_toolchain.py'], + 'action': ['python3', 'src/build/mac_toolchain.py'], 'condition': 'checkout_mac', }, - # Pull the msan libraries on linux. { 'name': 'msan_chained_origins', 'pattern': '.', - 'condition': 'checkout_linux', - 'action': [ 'python', + 'condition': 'checkout_instrumented_libraries', + 'action': [ 'python3', 'src/third_party/depot_tools/download_from_google_storage.py', - '--no_resume', - '--no_auth', - '--bucket', 'chromium-instrumented-libraries', - '-s', 'src/third_party/instrumented_libraries/binaries/msan-chained-origins-trusty.tgz.sha1', + "--no_resume", + "--no_auth", + "--bucket", "chromium-instrumented-libraries", + "-s", "src/third_party/instrumented_libraries/binaries/msan-chained-origins.tgz.sha1", ], }, { 'name': 'msan_no_origins', 'pattern': '.', - 'condition': 'checkout_linux', - 'action': [ 'python', + 'condition': 'checkout_instrumented_libraries', + 'action': [ 'python3', 'src/third_party/depot_tools/download_from_google_storage.py', - '--no_resume', - '--no_auth', - '--bucket', 'chromium-instrumented-libraries', - '-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-trusty.tgz.sha1', + "--no_resume", + "--no_auth", + "--bucket", "chromium-instrumented-libraries", + "-s", "src/third_party/instrumented_libraries/binaries/msan-no-origins.tgz.sha1", ], }, { @@ -2397,20 +2273,22 @@ hooks = [ # Note: On Win, this should run after win_toolchain, as it may use it. 'name': 'clang', 'pattern': '.', - 'action': ['python', 'src/tools/clang/scripts/update.py'], + 'action': ['python3', 'src/tools/clang/scripts/update.py'], }, { # Update LASTCHANGE. 'name': 'lastchange', 'pattern': '.', - 'action': ['python', 'src/build/util/lastchange.py', + 'action': ['python3', 'src/build/util/lastchange.py', '-o', 'src/build/util/LASTCHANGE'], }, # Pull clang-format binaries using checked-in hashes. { 'name': 'clang_format_win', 'pattern': '.', - 'action': [ 'download_from_google_storage', + 'condition': 'host_os == "win"', + 'action': [ 'python3', + 'src/third_party/depot_tools/download_from_google_storage.py', '--no_resume', '--platform=win32', '--no_auth', @@ -2419,21 +2297,38 @@ hooks = [ ], }, { - 'name': 'clang_format_mac', + 'name': 'clang_format_mac_x64', 'pattern': '.', - 'action': [ 'download_from_google_storage', + 'condition': 'host_os == "mac" and host_cpu == "x64"', + 'action': [ 'python3', + 'src/third_party/depot_tools/download_from_google_storage.py', '--no_resume', '--platform=darwin', '--no_auth', '--bucket', 'chromium-clang-format', - '-s', 'src/buildtools/mac/clang-format.sha1', + '-s', 'src/buildtools/mac/clang-format.x64.sha1', + '-o', 'src/buildtools/mac/clang-format', ], }, + { + 'name': 'clang_format_mac_arm64', + 'pattern': '.', + 'condition': 'host_os == "mac" and host_cpu == "arm64"', + 'action': [ 'python3', + 'src/third_party/depot_tools/download_from_google_storage.py', + '--no_resume', + '--no_auth', + '--bucket', 'chromium-clang-format', + '-s', 'src/buildtools/mac/clang-format.arm64.sha1', + '-o', 'src/buildtools/mac/clang-format', + ], + }, { 'name': 'clang_format_linux', 'pattern': '.', 'condition': 'host_os == "linux"', - 'action': [ 'download_from_google_storage', + 'action': [ 'python3', + 'src/third_party/depot_tools/download_from_google_storage.py', '--no_resume', '--platform=linux*', '--no_auth', @@ -2475,23 +2370,11 @@ hooks = [ '-d', 'src/tools/luci-go/linux64', ], }, - { - # We used to use src as a CIPD root. We moved it to a different directory - # in crrev.com/c/930178 but left the clobber here to ensure that that CL - # could be reverted safely. This can be safely removed once crbug.com/794764 - # is resolved. - 'name': 'Android Clobber Deprecated CIPD Root', - 'pattern': '.', - 'condition': 'checkout_android', - 'action': ['src/build/cipd/clobber_cipd_root.py', - '--root', 'src', - ], - }, { 'name': 'Generate component metadata for tests', 'pattern': '.', 'action': [ - 'vpython', + 'vpython3', 'src/testing/generate_location_tags.py', '--out', 'src/testing/location_tags.json', diff --git a/third-party/libyuv/third_party/libyuv/OWNERS b/third-party/libyuv/third_party/libyuv/OWNERS index a96669f9ae..f11a7bfdc5 100644 --- a/third-party/libyuv/third_party/libyuv/OWNERS +++ b/third-party/libyuv/third_party/libyuv/OWNERS @@ -1,10 +1,11 @@ mbonadei@chromium.org fbarchard@chromium.org magjed@chromium.org -pbos@chromium.org +wtc@google.com +jansson@google.com -per-file *.gn=mbonadei@chromium.org +per-file *.gn=mbonadei@chromium.org,jansson@google.com per-file .gitignore=* per-file AUTHORS=* per-file DEPS=* -per-file PRESUBMIT.py=mbonadei@chromium.org +per-file PRESUBMIT.py=mbonadei@chromium.org,jansson@google.com diff --git a/third-party/libyuv/third_party/libyuv/PRESUBMIT.py b/third-party/libyuv/third_party/libyuv/PRESUBMIT.py index b867239a10..d3901caf3a 100644 --- a/third-party/libyuv/third_party/libyuv/PRESUBMIT.py +++ b/third-party/libyuv/third_party/libyuv/PRESUBMIT.py @@ -6,6 +6,8 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. +# Runs PRESUBMIT.py in py3 mode by git cl presubmit. +USE_PYTHON3 = True def _CommonChecks(input_api, output_api): """Checks common to both upload and commit.""" @@ -26,7 +28,8 @@ def _CommonChecks(input_api, output_api): 'E0611', # No package y in x 'W0232', # Class has no __init__ method ], - pylintrc='pylintrc')) + pylintrc='pylintrc', + version='2.7')) return results diff --git a/third-party/libyuv/third_party/libyuv/README.chromium b/third-party/libyuv/third_party/libyuv/README.chromium index b60adf7c75..357334de00 100644 --- a/third-party/libyuv/third_party/libyuv/README.chromium +++ b/third-party/libyuv/third_party/libyuv/README.chromium @@ -1,8 +1,9 @@ Name: libyuv URL: http://code.google.com/p/libyuv/ -Version: 1789 +Version: 1850 License: BSD License File: LICENSE Description: libyuv is an open source project that includes YUV conversion and scaling functionality. + diff --git a/third-party/libyuv/third_party/libyuv/build_overrides/partition_alloc.gni b/third-party/libyuv/third_party/libyuv/build_overrides/partition_alloc.gni new file mode 100644 index 0000000000..dcf8ac2d25 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/build_overrides/partition_alloc.gni @@ -0,0 +1,17 @@ +# Copyright 2022 The LibYuv Project Authors. All rights reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +# Use default values for PartitionAlloc as standalone library from +# base/allocator/partition_allocator/build_overrides/partition_alloc.gni +use_partition_alloc_as_malloc_default = false +use_allocator_shim_default = false +enable_backup_ref_ptr_support_default = false +enable_mte_checked_ptr_support_default = false +put_ref_count_in_previous_slot_default = false +enable_backup_ref_ptr_slow_checks_default = false +enable_dangling_raw_ptr_checks_default = false diff --git a/third-party/libyuv/third_party/libyuv/cleanup_links.py b/third-party/libyuv/third_party/libyuv/cleanup_links.py index ba2907892f..7d1eba9b6b 100755 --- a/third-party/libyuv/third_party/libyuv/cleanup_links.py +++ b/third-party/libyuv/third_party/libyuv/cleanup_links.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright 2017 The LibYuv Project Authors. All rights reserved. # # Use of this source code is governed by a BSD-style license @@ -18,8 +19,8 @@ landing that change, this script cleans up any old symlinks, avoiding annoying manual cleanup needed in order to complete gclient sync. """ +import argparse import logging -import optparse import os import shelve import subprocess @@ -32,14 +33,14 @@ LINKS_DB = 'links' # Version management to make future upgrades/downgrades easier to support. SCHEMA_VERSION = 1 -class WebRTCLinkSetup(object): +class WebRTCLinkSetup(): def __init__(self, links_db, dry_run=False): self._dry_run = dry_run self._links_db = links_db def CleanupLinks(self): logging.debug('CleanupLinks') - for source, link_path in self._links_db.iteritems(): + for source, link_path in self._links_db.tems(): if source == 'SCHEMA_VERSION': continue if os.path.islink(link_path) or sys.platform.startswith('win'): @@ -71,15 +72,15 @@ def _initialize_database(filename): def main(): - parser = optparse.OptionParser() - parser.add_option('-d', '--dry-run', action='store_true', default=False, - help='Print what would be done, but don\'t perform any ' - 'operations. This will automatically set logging to ' - 'verbose.') - parser.add_option('-v', '--verbose', action='store_const', - const=logging.DEBUG, default=logging.INFO, - help='Print verbose output for debugging.') - options, _ = parser.parse_args() + p = argparse.ArgumentParser() + p.add_argument('-d', '--dry-run', action='store_true', default=False, + help='Print what would be done, but don\'t perform any ' + 'operations. This will automatically set logging to ' + 'verbose.') + p.add_argument('-v', '--verbose', action='store_const', + const=logging.DEBUG, default=logging.INFO, + help='Print verbose output for debugging.') + options = p.parse_args() if options.dry_run: options.verbose = logging.DEBUG diff --git a/third-party/libyuv/third_party/libyuv/docs/environment_variables.md b/third-party/libyuv/third_party/libyuv/docs/environment_variables.md index cd8159ad5a..dd5d59fbef 100644 --- a/third-party/libyuv/third_party/libyuv/docs/environment_variables.md +++ b/third-party/libyuv/third_party/libyuv/docs/environment_variables.md @@ -22,6 +22,7 @@ By default the cpu is detected and the most advanced form of SIMD is used. But LIBYUV_DISABLE_F16C LIBYUV_DISABLE_AVX512BW LIBYUV_DISABLE_AVX512VL + LIBYUV_DISABLE_AVX512VNNI LIBYUV_DISABLE_AVX512VBMI LIBYUV_DISABLE_AVX512VBMI2 LIBYUV_DISABLE_AVX512VBITALG @@ -34,7 +35,10 @@ By default the cpu is detected and the most advanced form of SIMD is used. But ## MIPS CPUs LIBYUV_DISABLE_MSA - LIBYUV_DISABLE_MMI + +## LOONGARCH CPUs + LIBYUV_DISABLE_LSX + LIBYUV_DISABLE_LASX # Test Width/Height/Repeat diff --git a/third-party/libyuv/third_party/libyuv/docs/getting_started.md b/third-party/libyuv/third_party/libyuv/docs/getting_started.md index e363c4ef09..15b19ab210 100644 --- a/third-party/libyuv/third_party/libyuv/docs/getting_started.md +++ b/third-party/libyuv/third_party/libyuv/docs/getting_started.md @@ -180,8 +180,8 @@ Running test with C code: mips - gn gen out/Release "--args=is_debug=false target_os=\"linux\" target_cpu=\"mips64el\" mips_arch_variant=\"loongson3\" mips_use_mmi=true is_component_build=false use_sysroot=false use_gold=false" - gn gen out/Debug "--args=is_debug=true target_os=\"linux\" target_cpu=\"mips64el\" mips_arch_variant=\"loongson3\" mips_use_mmi=true is_component_build=false use_sysroot=false use_gold=false" + gn gen out/Release "--args=is_debug=false target_os=\"linux\" target_cpu=\"mips64el\" mips_arch_variant=\"loongson3\" is_component_build=false use_sysroot=false use_gold=false" + gn gen out/Debug "--args=is_debug=true target_os=\"linux\" target_cpu=\"mips64el\" mips_arch_variant=\"loongson3\" is_component_build=false use_sysroot=false use_gold=false" ninja -v -C out/Debug libyuv_unittest ninja -v -C out/Release libyuv_unittest diff --git a/third-party/libyuv/third_party/libyuv/download_vs_toolchain.py b/third-party/libyuv/third_party/libyuv/download_vs_toolchain.py index 49d069305d..6bc086d68f 100644 --- a/third-party/libyuv/third_party/libyuv/download_vs_toolchain.py +++ b/third-party/libyuv/third_party/libyuv/download_vs_toolchain.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python -# +#!/usr/bin/env vpython3 + # Copyright 2014 The LibYuv Project Authors. All rights reserved. # # Use of this source code is governed by a BSD-style license diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/compare_row.h b/third-party/libyuv/third_party/libyuv/include/libyuv/compare_row.h index 64115b3a3f..d8e82d721b 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/compare_row.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/compare_row.h @@ -84,11 +84,6 @@ extern "C" { #define HAS_SUMSQUAREERROR_MSA #endif -#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A) -#define HAS_HAMMINGDISTANCE_MMI -#define HAS_SUMSQUAREERROR_MMI -#endif - uint32_t HammingDistance_C(const uint8_t* src_a, const uint8_t* src_b, int count); @@ -107,9 +102,6 @@ uint32_t HammingDistance_NEON(const uint8_t* src_a, uint32_t HammingDistance_MSA(const uint8_t* src_a, const uint8_t* src_b, int count); -uint32_t HammingDistance_MMI(const uint8_t* src_a, - const uint8_t* src_b, - int count); uint32_t SumSquareError_C(const uint8_t* src_a, const uint8_t* src_b, int count); @@ -125,9 +117,6 @@ uint32_t SumSquareError_NEON(const uint8_t* src_a, uint32_t SumSquareError_MSA(const uint8_t* src_a, const uint8_t* src_b, int count); -uint32_t SumSquareError_MMI(const uint8_t* src_a, - const uint8_t* src_b, - int count); uint32_t HashDjb2_C(const uint8_t* src, int count, uint32_t seed); uint32_t HashDjb2_SSE41(const uint8_t* src, int count, uint32_t seed); diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/convert.h b/third-party/libyuv/third_party/libyuv/include/libyuv/convert.h index 93e7550be8..e1eb36b62d 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/convert.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/convert.h @@ -106,6 +106,62 @@ int I422ToI444(const uint8_t* src_y, int width, int height); +// Convert I422 to I210. +LIBYUV_API +int I422ToI210(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint16_t* dst_y, + int dst_stride_y, + uint16_t* dst_u, + int dst_stride_u, + uint16_t* dst_v, + int dst_stride_v, + int width, + int height); + +// Convert MM21 to NV12. +LIBYUV_API +int MM21ToNV12(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_uv, + int dst_stride_uv, + int width, + int height); + +// Convert MM21 to I420. +LIBYUV_API +int MM21ToI420(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height); + +// Convert MM21 to YUY2 +LIBYUV_API +int MM21ToYUY2(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_yuy2, + int dst_stride_yuy2, + int width, + int height); + // Convert I422 to NV21. LIBYUV_API int I422ToNV21(const uint8_t* src_y, @@ -193,6 +249,23 @@ int I010ToI420(const uint16_t* src_y, int width, int height); +#define H210ToH420 I210ToI420 +LIBYUV_API +int I210ToI420(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height); + #define H210ToH422 I210ToI422 LIBYUV_API int I210ToI422(const uint16_t* src_y, diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/convert_argb.h b/third-party/libyuv/third_party/libyuv/include/libyuv/convert_argb.h index eb4ebd54a8..8e4562efc9 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/convert_argb.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/convert_argb.h @@ -14,6 +14,7 @@ #include "libyuv/basic_types.h" #include "libyuv/rotate.h" // For enum RotationMode. +#include "libyuv/scale.h" // For enum FilterMode. #ifdef __cplusplus namespace libyuv { @@ -403,6 +404,32 @@ int U444ToABGR(const uint8_t* src_y, int width, int height); +// Convert I444 to RGB24. +LIBYUV_API +int I444ToRGB24(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + int width, + int height); + +// Convert I444 to RAW. +LIBYUV_API +int I444ToRAW(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_raw, + int dst_stride_raw, + int width, + int height); + // Convert I010 to ARGB. LIBYUV_API int I010ToARGB(const uint16_t* src_y, @@ -1311,6 +1338,32 @@ int J420ToRAW(const uint8_t* src_y, int width, int height); +// Convert I422 to RGB24. +LIBYUV_API +int I422ToRGB24(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + int width, + int height); + +// Convert I422 to RAW. +LIBYUV_API +int I422ToRAW(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_raw, + int dst_stride_raw, + int width, + int height); + LIBYUV_API int I420ToRGB565(const uint8_t* src_y, int src_stride_y, @@ -1494,6 +1547,20 @@ int I444ToARGBMatrix(const uint8_t* src_y, int width, int height); +// Convert I444 to RGB24 with matrix. +LIBYUV_API +int I444ToRGB24Matrix(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height); + // Convert 10 bit 420 YUV to ARGB with matrix. LIBYUV_API int I010ToAR30Matrix(const uint16_t* src_y, @@ -1864,7 +1931,7 @@ int I422ToRGBAMatrix(const uint8_t* src_y, int width, int height); -// Convert I422 to RGBA with matrix. +// Convert I420 to RGBA with matrix. LIBYUV_API int I420ToRGBAMatrix(const uint8_t* src_y, int src_stride_y, @@ -1892,6 +1959,20 @@ int I420ToRGB24Matrix(const uint8_t* src_y, int width, int height); +// Convert I422 to RGB24 with matrix. +LIBYUV_API +int I422ToRGB24Matrix(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height); + // Convert I420 to RGB565 with specified color matrix. LIBYUV_API int I420ToRGB565Matrix(const uint8_t* src_y, @@ -1906,6 +1987,20 @@ int I420ToRGB565Matrix(const uint8_t* src_y, int width, int height); +// Convert I422 to RGB565 with specified color matrix. +LIBYUV_API +int I422ToRGB565Matrix(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb565, + int dst_stride_rgb565, + const struct YuvConstants* yuvconstants, + int width, + int height); + // Convert I420 to AR30 with matrix. LIBYUV_API int I420ToAR30Matrix(const uint8_t* src_y, @@ -1930,6 +2025,250 @@ int I400ToARGBMatrix(const uint8_t* src_y, int width, int height); +// Convert I420 to ARGB with matrix and UV filter mode. +LIBYUV_API +int I420ToARGBMatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert I422 to ARGB with matrix and UV filter mode. +LIBYUV_API +int I422ToARGBMatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert I422 to RGB24 with matrix and UV filter mode. +LIBYUV_API +int I422ToRGB24MatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert I420 to RGB24 with matrix and UV filter mode. +LIBYUV_API +int I420ToRGB24MatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert I010 to AR30 with matrix and UV filter mode. +LIBYUV_API +int I010ToAR30MatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert I210 to AR30 with matrix and UV filter mode. +LIBYUV_API +int I210ToAR30MatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert I010 to ARGB with matrix and UV filter mode. +LIBYUV_API +int I010ToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert I210 to ARGB with matrix and UV filter mode. +LIBYUV_API +int I210ToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert I420 with Alpha to attenuated ARGB with matrix and UV filter mode. +LIBYUV_API +int I420AlphaToARGBMatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + const uint8_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate, + enum FilterMode filter); + +// Convert I422 with Alpha to attenuated ARGB with matrix and UV filter mode. +LIBYUV_API +int I422AlphaToARGBMatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + const uint8_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate, + enum FilterMode filter); + +// Convert I010 with Alpha to attenuated ARGB with matrix and UV filter mode. +LIBYUV_API +int I010AlphaToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + const uint16_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate, + enum FilterMode filter); + +// Convert I210 with Alpha to attenuated ARGB with matrix and UV filter mode. +LIBYUV_API +int I210AlphaToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + const uint16_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate, + enum FilterMode filter); + +// Convert P010 to ARGB with matrix and UV filter mode. +LIBYUV_API +int P010ToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert P210 to ARGB with matrix and UV filter mode. +LIBYUV_API +int P210ToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert P010 to AR30 with matrix and UV filter mode. +LIBYUV_API +int P010ToAR30MatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + +// Convert P210 to AR30 with matrix and UV filter mode. +LIBYUV_API +int P210ToAR30MatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter); + // Convert camera sample to ARGB with cropping, rotation and vertical flip. // "sample_size" is needed to parse MJPG. // "dst_stride_argb" number of bytes in a row of the dst_argb plane. diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/convert_from_argb.h b/third-party/libyuv/third_party/libyuv/include/libyuv/convert_from_argb.h index bf48786041..ff2a581ac4 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/convert_from_argb.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/convert_from_argb.h @@ -209,10 +209,10 @@ int ARGBToJ420(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_yj, int dst_stride_yj, - uint8_t* dst_u, - int dst_stride_u, - uint8_t* dst_v, - int dst_stride_v, + uint8_t* dst_uj, + int dst_stride_uj, + uint8_t* dst_vj, + int dst_stride_vj, int width, int height); @@ -222,10 +222,10 @@ int ARGBToJ422(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_yj, int dst_stride_yj, - uint8_t* dst_u, - int dst_stride_u, - uint8_t* dst_v, - int dst_stride_v, + uint8_t* dst_uj, + int dst_stride_uj, + uint8_t* dst_vj, + int dst_stride_vj, int width, int height); @@ -238,6 +238,41 @@ int ARGBToJ400(const uint8_t* src_argb, int width, int height); +// Convert ABGR to J420. (JPeg full range I420). +LIBYUV_API +int ABGRToJ420(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_yj, + int dst_stride_yj, + uint8_t* dst_uj, + int dst_stride_uj, + uint8_t* dst_vj, + int dst_stride_vj, + int width, + int height); + +// Convert ABGR to J422. +LIBYUV_API +int ABGRToJ422(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_yj, + int dst_stride_yj, + uint8_t* dst_uj, + int dst_stride_uj, + uint8_t* dst_vj, + int dst_stride_vj, + int width, + int height); + +// Convert ABGR to J400. (JPeg full range). +LIBYUV_API +int ABGRToJ400(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_yj, + int dst_stride_yj, + int width, + int height); + // Convert RGBA to J400. (JPeg full range). LIBYUV_API int RGBAToJ400(const uint8_t* src_rgba, @@ -327,6 +362,17 @@ int ARGBToUYVY(const uint8_t* src_argb, int width, int height); +// RAW to JNV21 full range NV21 +LIBYUV_API +int RAWToJNV21(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_vu, + int dst_stride_vu, + int width, + int height); + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/cpu_id.h b/third-party/libyuv/third_party/libyuv/include/libyuv/cpu_id.h index 3e27cc107d..fb90c6c737 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/cpu_id.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/cpu_id.h @@ -40,15 +40,20 @@ static const int kCpuHasF16C = 0x2000; static const int kCpuHasGFNI = 0x4000; static const int kCpuHasAVX512BW = 0x8000; static const int kCpuHasAVX512VL = 0x10000; -static const int kCpuHasAVX512VBMI = 0x20000; -static const int kCpuHasAVX512VBMI2 = 0x40000; -static const int kCpuHasAVX512VBITALG = 0x80000; -static const int kCpuHasAVX512VPOPCNTDQ = 0x100000; +static const int kCpuHasAVX512VNNI = 0x20000; +static const int kCpuHasAVX512VBMI = 0x40000; +static const int kCpuHasAVX512VBMI2 = 0x80000; +static const int kCpuHasAVX512VBITALG = 0x100000; +static const int kCpuHasAVX512VPOPCNTDQ = 0x200000; // These flags are only valid on MIPS processors. -static const int kCpuHasMIPS = 0x200000; -static const int kCpuHasMSA = 0x400000; -static const int kCpuHasMMI = 0x800000; +static const int kCpuHasMIPS = 0x400000; +static const int kCpuHasMSA = 0x800000; + +// These flags are only valid on LOONGARCH processors. +static const int kCpuHasLOONGARCH = 0x2000000; +static const int kCpuHasLSX = 0x4000000; +static const int kCpuHasLASX = 0x8000000; // Optional init function. TestCpuFlag does an auto-init. // Returns cpu_info flags. diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/loongson_intrinsics.h b/third-party/libyuv/third_party/libyuv/include/libyuv/loongson_intrinsics.h new file mode 100644 index 0000000000..1d613defb1 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/loongson_intrinsics.h @@ -0,0 +1,1949 @@ +/* + * Copyright 2022 The LibYuv Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef INCLUDE_LIBYUV_LOONGSON_INTRINSICS_H +#define INCLUDE_LIBYUV_LOONGSON_INTRINSICS_H + +/* + * Copyright (c) 2022 Loongson Technology Corporation Limited + * All rights reserved. + * Contributed by Shiyou Yin + * Xiwei Gu + * Lu Wang + * + * This file is a header file for loongarch builtin extension. + * + */ + +#ifndef LOONGSON_INTRINSICS_H +#define LOONGSON_INTRINSICS_H + +/** + * MAJOR version: Macro usage changes. + * MINOR version: Add new functions, or bug fixes. + * MICRO version: Comment changes or implementation changes. + */ +#define LSOM_VERSION_MAJOR 1 +#define LSOM_VERSION_MINOR 1 +#define LSOM_VERSION_MICRO 0 + +#define DUP2_ARG1(_INS, _IN0, _IN1, _OUT0, _OUT1) \ + { \ + _OUT0 = _INS(_IN0); \ + _OUT1 = _INS(_IN1); \ + } + +#define DUP2_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1) \ + { \ + _OUT0 = _INS(_IN0, _IN1); \ + _OUT1 = _INS(_IN2, _IN3); \ + } + +#define DUP2_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _OUT0, _OUT1) \ + { \ + _OUT0 = _INS(_IN0, _IN1, _IN2); \ + _OUT1 = _INS(_IN3, _IN4, _IN5); \ + } + +#define DUP4_ARG1(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1, _OUT2, _OUT3) \ + { \ + DUP2_ARG1(_INS, _IN0, _IN1, _OUT0, _OUT1); \ + DUP2_ARG1(_INS, _IN2, _IN3, _OUT2, _OUT3); \ + } + +#define DUP4_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _IN6, _IN7, _OUT0, \ + _OUT1, _OUT2, _OUT3) \ + { \ + DUP2_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1); \ + DUP2_ARG2(_INS, _IN4, _IN5, _IN6, _IN7, _OUT2, _OUT3); \ + } + +#define DUP4_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _IN6, _IN7, _IN8, \ + _IN9, _IN10, _IN11, _OUT0, _OUT1, _OUT2, _OUT3) \ + { \ + DUP2_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _OUT0, _OUT1); \ + DUP2_ARG3(_INS, _IN6, _IN7, _IN8, _IN9, _IN10, _IN11, _OUT2, _OUT3); \ + } + +#ifdef __loongarch_sx +#include +/* + * ============================================================================= + * Description : Dot product & addition of byte vector elements + * Arguments : Inputs - in_c, in_h, in_l + * Outputs - out + * Return Type - halfword + * Details : Signed byte elements from in_h are multiplied by + * signed byte elements from in_l, and then added adjacent to + * each other to get results with the twice size of input. + * Then the results plus to signed half-word elements from in_c. + * Example : out = __lsx_vdp2add_h_b(in_c, in_h, in_l) + * in_c : 1,2,3,4, 1,2,3,4 + * in_h : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * in_l : 8,7,6,5, 4,3,2,1, 8,7,6,5, 4,3,2,1 + * out : 23,40,41,26, 23,40,41,26 + * ============================================================================= + */ +static inline __m128i __lsx_vdp2add_h_b(__m128i in_c, + __m128i in_h, + __m128i in_l) { + __m128i out; + + out = __lsx_vmaddwev_h_b(in_c, in_h, in_l); + out = __lsx_vmaddwod_h_b(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product & addition of byte vector elements + * Arguments : Inputs - in_c, in_h, in_l + * Outputs - out + * Return Type - halfword + * Details : Unsigned byte elements from in_h are multiplied by + * unsigned byte elements from in_l, and then added adjacent to + * each other to get results with the twice size of input. + * The results plus to signed half-word elements from in_c. + * Example : out = __lsx_vdp2add_h_bu(in_c, in_h, in_l) + * in_c : 1,2,3,4, 1,2,3,4 + * in_h : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * in_l : 8,7,6,5, 4,3,2,1, 8,7,6,5, 4,3,2,1 + * out : 23,40,41,26, 23,40,41,26 + * ============================================================================= + */ +static inline __m128i __lsx_vdp2add_h_bu(__m128i in_c, + __m128i in_h, + __m128i in_l) { + __m128i out; + + out = __lsx_vmaddwev_h_bu(in_c, in_h, in_l); + out = __lsx_vmaddwod_h_bu(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product & addition of byte vector elements + * Arguments : Inputs - in_c, in_h, in_l + * Outputs - out + * Return Type - halfword + * Details : Unsigned byte elements from in_h are multiplied by + * signed byte elements from in_l, and then added adjacent to + * each other to get results with the twice size of input. + * The results plus to signed half-word elements from in_c. + * Example : out = __lsx_vdp2add_h_bu_b(in_c, in_h, in_l) + * in_c : 1,1,1,1, 1,1,1,1 + * in_h : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * in_l : -1,-2,-3,-4, -5,-6,-7,-8, 1,2,3,4, 5,6,7,8 + * out : -4,-24,-60,-112, 6,26,62,114 + * ============================================================================= + */ +static inline __m128i __lsx_vdp2add_h_bu_b(__m128i in_c, + __m128i in_h, + __m128i in_l) { + __m128i out; + + out = __lsx_vmaddwev_h_bu_b(in_c, in_h, in_l); + out = __lsx_vmaddwod_h_bu_b(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product & addition of half-word vector elements + * Arguments : Inputs - in_c, in_h, in_l + * Outputs - out + * Return Type - __m128i + * Details : Signed half-word elements from in_h are multiplied by + * signed half-word elements from in_l, and then added adjacent to + * each other to get results with the twice size of input. + * Then the results plus to signed word elements from in_c. + * Example : out = __lsx_vdp2add_h_b(in_c, in_h, in_l) + * in_c : 1,2,3,4 + * in_h : 1,2,3,4, 5,6,7,8 + * in_l : 8,7,6,5, 4,3,2,1 + * out : 23,40,41,26 + * ============================================================================= + */ +static inline __m128i __lsx_vdp2add_w_h(__m128i in_c, + __m128i in_h, + __m128i in_l) { + __m128i out; + + out = __lsx_vmaddwev_w_h(in_c, in_h, in_l); + out = __lsx_vmaddwod_w_h(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of byte vector elements + * Arguments : Inputs - in_h, in_l + * Outputs - out + * Return Type - halfword + * Details : Signed byte elements from in_h are multiplied by + * signed byte elements from in_l, and then added adjacent to + * each other to get results with the twice size of input. + * Example : out = __lsx_vdp2_h_b(in_h, in_l) + * in_h : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * in_l : 8,7,6,5, 4,3,2,1, 8,7,6,5, 4,3,2,1 + * out : 22,38,38,22, 22,38,38,22 + * ============================================================================= + */ +static inline __m128i __lsx_vdp2_h_b(__m128i in_h, __m128i in_l) { + __m128i out; + + out = __lsx_vmulwev_h_b(in_h, in_l); + out = __lsx_vmaddwod_h_b(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of byte vector elements + * Arguments : Inputs - in_h, in_l + * Outputs - out + * Return Type - halfword + * Details : Unsigned byte elements from in_h are multiplied by + * unsigned byte elements from in_l, and then added adjacent to + * each other to get results with the twice size of input. + * Example : out = __lsx_vdp2_h_bu(in_h, in_l) + * in_h : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * in_l : 8,7,6,5, 4,3,2,1, 8,7,6,5, 4,3,2,1 + * out : 22,38,38,22, 22,38,38,22 + * ============================================================================= + */ +static inline __m128i __lsx_vdp2_h_bu(__m128i in_h, __m128i in_l) { + __m128i out; + + out = __lsx_vmulwev_h_bu(in_h, in_l); + out = __lsx_vmaddwod_h_bu(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of byte vector elements + * Arguments : Inputs - in_h, in_l + * Outputs - out + * Return Type - halfword + * Details : Unsigned byte elements from in_h are multiplied by + * signed byte elements from in_l, and then added adjacent to + * each other to get results with the twice size of input. + * Example : out = __lsx_vdp2_h_bu_b(in_h, in_l) + * in_h : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * in_l : 8,7,6,5, 4,3,2,1, 8,7,6,5, 4,3,2,-1 + * out : 22,38,38,22, 22,38,38,6 + * ============================================================================= + */ +static inline __m128i __lsx_vdp2_h_bu_b(__m128i in_h, __m128i in_l) { + __m128i out; + + out = __lsx_vmulwev_h_bu_b(in_h, in_l); + out = __lsx_vmaddwod_h_bu_b(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of byte vector elements + * Arguments : Inputs - in_h, in_l + * Outputs - out + * Return Type - halfword + * Details : Signed byte elements from in_h are multiplied by + * signed byte elements from in_l, and then added adjacent to + * each other to get results with the twice size of input. + * Example : out = __lsx_vdp2_w_h(in_h, in_l) + * in_h : 1,2,3,4, 5,6,7,8 + * in_l : 8,7,6,5, 4,3,2,1 + * out : 22,38,38,22 + * ============================================================================= + */ +static inline __m128i __lsx_vdp2_w_h(__m128i in_h, __m128i in_l) { + __m128i out; + + out = __lsx_vmulwev_w_h(in_h, in_l); + out = __lsx_vmaddwod_w_h(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Clip all halfword elements of input vector between min & max + * out = ((_in) < (min)) ? (min) : (((_in) > (max)) ? (max) : + * (_in)) + * Arguments : Inputs - _in (input vector) + * - min (min threshold) + * - max (max threshold) + * Outputs - out (output vector with clipped elements) + * Return Type - signed halfword + * Example : out = __lsx_vclip_h(_in) + * _in : -8,2,280,249, -8,255,280,249 + * min : 1,1,1,1, 1,1,1,1 + * max : 9,9,9,9, 9,9,9,9 + * out : 1,2,9,9, 1,9,9,9 + * ============================================================================= + */ +static inline __m128i __lsx_vclip_h(__m128i _in, __m128i min, __m128i max) { + __m128i out; + + out = __lsx_vmax_h(min, _in); + out = __lsx_vmin_h(max, out); + return out; +} + +/* + * ============================================================================= + * Description : Set each element of vector between 0 and 255 + * Arguments : Inputs - _in + * Outputs - out + * Return Type - halfword + * Details : Signed byte elements from _in are clamped between 0 and 255. + * Example : out = __lsx_vclip255_h(_in) + * _in : -8,255,280,249, -8,255,280,249 + * out : 0,255,255,249, 0,255,255,249 + * ============================================================================= + */ +static inline __m128i __lsx_vclip255_h(__m128i _in) { + __m128i out; + + out = __lsx_vmaxi_h(_in, 0); + out = __lsx_vsat_hu(out, 7); + return out; +} + +/* + * ============================================================================= + * Description : Set each element of vector between 0 and 255 + * Arguments : Inputs - _in + * Outputs - out + * Return Type - word + * Details : Signed byte elements from _in are clamped between 0 and 255. + * Example : out = __lsx_vclip255_w(_in) + * _in : -8,255,280,249 + * out : 0,255,255,249 + * ============================================================================= + */ +static inline __m128i __lsx_vclip255_w(__m128i _in) { + __m128i out; + + out = __lsx_vmaxi_w(_in, 0); + out = __lsx_vsat_wu(out, 7); + return out; +} + +/* + * ============================================================================= + * Description : Swap two variables + * Arguments : Inputs - _in0, _in1 + * Outputs - _in0, _in1 (in-place) + * Details : Swapping of two input variables using xor + * Example : LSX_SWAP(_in0, _in1) + * _in0 : 1,2,3,4 + * _in1 : 5,6,7,8 + * _in0(out) : 5,6,7,8 + * _in1(out) : 1,2,3,4 + * ============================================================================= + */ +#define LSX_SWAP(_in0, _in1) \ + { \ + _in0 = __lsx_vxor_v(_in0, _in1); \ + _in1 = __lsx_vxor_v(_in0, _in1); \ + _in0 = __lsx_vxor_v(_in0, _in1); \ + } + +/* + * ============================================================================= + * Description : Transpose 4x4 block with word elements in vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1, out2, out3 + * Details : + * Example : + * 1, 2, 3, 4 1, 5, 9,13 + * 5, 6, 7, 8 to 2, 6,10,14 + * 9,10,11,12 =====> 3, 7,11,15 + * 13,14,15,16 4, 8,12,16 + * ============================================================================= + */ +#define LSX_TRANSPOSE4x4_W(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + __m128i _t0, _t1, _t2, _t3; \ + \ + _t0 = __lsx_vilvl_w(_in1, _in0); \ + _t1 = __lsx_vilvh_w(_in1, _in0); \ + _t2 = __lsx_vilvl_w(_in3, _in2); \ + _t3 = __lsx_vilvh_w(_in3, _in2); \ + _out0 = __lsx_vilvl_d(_t2, _t0); \ + _out1 = __lsx_vilvh_d(_t2, _t0); \ + _out2 = __lsx_vilvl_d(_t3, _t1); \ + _out3 = __lsx_vilvh_d(_t3, _t1); \ + } + +/* + * ============================================================================= + * Description : Transpose 8x8 block with byte elements in vectors + * Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7 + * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, + * _out7 + * Details : The rows of the matrix become columns, and the columns + * become rows. + * Example : LSX_TRANSPOSE8x8_B + * _in0 : 00,01,02,03,04,05,06,07, 00,00,00,00,00,00,00,00 + * _in1 : 10,11,12,13,14,15,16,17, 00,00,00,00,00,00,00,00 + * _in2 : 20,21,22,23,24,25,26,27, 00,00,00,00,00,00,00,00 + * _in3 : 30,31,32,33,34,35,36,37, 00,00,00,00,00,00,00,00 + * _in4 : 40,41,42,43,44,45,46,47, 00,00,00,00,00,00,00,00 + * _in5 : 50,51,52,53,54,55,56,57, 00,00,00,00,00,00,00,00 + * _in6 : 60,61,62,63,64,65,66,67, 00,00,00,00,00,00,00,00 + * _in7 : 70,71,72,73,74,75,76,77, 00,00,00,00,00,00,00,00 + * + * _ out0 : 00,10,20,30,40,50,60,70, 00,00,00,00,00,00,00,00 + * _ out1 : 01,11,21,31,41,51,61,71, 00,00,00,00,00,00,00,00 + * _ out2 : 02,12,22,32,42,52,62,72, 00,00,00,00,00,00,00,00 + * _ out3 : 03,13,23,33,43,53,63,73, 00,00,00,00,00,00,00,00 + * _ out4 : 04,14,24,34,44,54,64,74, 00,00,00,00,00,00,00,00 + * _ out5 : 05,15,25,35,45,55,65,75, 00,00,00,00,00,00,00,00 + * _ out6 : 06,16,26,36,46,56,66,76, 00,00,00,00,00,00,00,00 + * _ out7 : 07,17,27,37,47,57,67,77, 00,00,00,00,00,00,00,00 + * ============================================================================= + */ +#define LSX_TRANSPOSE8x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + __m128i zero = {0}; \ + __m128i shuf8 = {0x0F0E0D0C0B0A0908, 0x1716151413121110}; \ + __m128i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \ + \ + _t0 = __lsx_vilvl_b(_in2, _in0); \ + _t1 = __lsx_vilvl_b(_in3, _in1); \ + _t2 = __lsx_vilvl_b(_in6, _in4); \ + _t3 = __lsx_vilvl_b(_in7, _in5); \ + _t4 = __lsx_vilvl_b(_t1, _t0); \ + _t5 = __lsx_vilvh_b(_t1, _t0); \ + _t6 = __lsx_vilvl_b(_t3, _t2); \ + _t7 = __lsx_vilvh_b(_t3, _t2); \ + _out0 = __lsx_vilvl_w(_t6, _t4); \ + _out2 = __lsx_vilvh_w(_t6, _t4); \ + _out4 = __lsx_vilvl_w(_t7, _t5); \ + _out6 = __lsx_vilvh_w(_t7, _t5); \ + _out1 = __lsx_vshuf_b(zero, _out0, shuf8); \ + _out3 = __lsx_vshuf_b(zero, _out2, shuf8); \ + _out5 = __lsx_vshuf_b(zero, _out4, shuf8); \ + _out7 = __lsx_vshuf_b(zero, _out6, shuf8); \ + } + +/* + * ============================================================================= + * Description : Transpose 8x8 block with half-word elements in vectors + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 + * Outputs - out0, out1, out2, out3, out4, out5, out6, out7 + * Details : + * Example : + * 00,01,02,03,04,05,06,07 00,10,20,30,40,50,60,70 + * 10,11,12,13,14,15,16,17 01,11,21,31,41,51,61,71 + * 20,21,22,23,24,25,26,27 02,12,22,32,42,52,62,72 + * 30,31,32,33,34,35,36,37 to 03,13,23,33,43,53,63,73 + * 40,41,42,43,44,45,46,47 ======> 04,14,24,34,44,54,64,74 + * 50,51,52,53,54,55,56,57 05,15,25,35,45,55,65,75 + * 60,61,62,63,64,65,66,67 06,16,26,36,46,56,66,76 + * 70,71,72,73,74,75,76,77 07,17,27,37,47,57,67,77 + * ============================================================================= + */ +#define LSX_TRANSPOSE8x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + __m128i _s0, _s1, _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \ + \ + _s0 = __lsx_vilvl_h(_in6, _in4); \ + _s1 = __lsx_vilvl_h(_in7, _in5); \ + _t0 = __lsx_vilvl_h(_s1, _s0); \ + _t1 = __lsx_vilvh_h(_s1, _s0); \ + _s0 = __lsx_vilvh_h(_in6, _in4); \ + _s1 = __lsx_vilvh_h(_in7, _in5); \ + _t2 = __lsx_vilvl_h(_s1, _s0); \ + _t3 = __lsx_vilvh_h(_s1, _s0); \ + _s0 = __lsx_vilvl_h(_in2, _in0); \ + _s1 = __lsx_vilvl_h(_in3, _in1); \ + _t4 = __lsx_vilvl_h(_s1, _s0); \ + _t5 = __lsx_vilvh_h(_s1, _s0); \ + _s0 = __lsx_vilvh_h(_in2, _in0); \ + _s1 = __lsx_vilvh_h(_in3, _in1); \ + _t6 = __lsx_vilvl_h(_s1, _s0); \ + _t7 = __lsx_vilvh_h(_s1, _s0); \ + \ + _out0 = __lsx_vpickev_d(_t0, _t4); \ + _out2 = __lsx_vpickev_d(_t1, _t5); \ + _out4 = __lsx_vpickev_d(_t2, _t6); \ + _out6 = __lsx_vpickev_d(_t3, _t7); \ + _out1 = __lsx_vpickod_d(_t0, _t4); \ + _out3 = __lsx_vpickod_d(_t1, _t5); \ + _out5 = __lsx_vpickod_d(_t2, _t6); \ + _out7 = __lsx_vpickod_d(_t3, _t7); \ + } + +/* + * ============================================================================= + * Description : Transpose input 8x4 byte block into 4x8 + * Arguments : Inputs - _in0, _in1, _in2, _in3 (input 8x4 byte block) + * Outputs - _out0, _out1, _out2, _out3 (output 4x8 byte block) + * Return Type - as per RTYPE + * Details : The rows of the matrix become columns, and the columns become + * rows. + * Example : LSX_TRANSPOSE8x4_B + * _in0 : 00,01,02,03,00,00,00,00, 00,00,00,00,00,00,00,00 + * _in1 : 10,11,12,13,00,00,00,00, 00,00,00,00,00,00,00,00 + * _in2 : 20,21,22,23,00,00,00,00, 00,00,00,00,00,00,00,00 + * _in3 : 30,31,32,33,00,00,00,00, 00,00,00,00,00,00,00,00 + * _in4 : 40,41,42,43,00,00,00,00, 00,00,00,00,00,00,00,00 + * _in5 : 50,51,52,53,00,00,00,00, 00,00,00,00,00,00,00,00 + * _in6 : 60,61,62,63,00,00,00,00, 00,00,00,00,00,00,00,00 + * _in7 : 70,71,72,73,00,00,00,00, 00,00,00,00,00,00,00,00 + * + * _out0 : 00,10,20,30,40,50,60,70, 00,00,00,00,00,00,00,00 + * _out1 : 01,11,21,31,41,51,61,71, 00,00,00,00,00,00,00,00 + * _out2 : 02,12,22,32,42,52,62,72, 00,00,00,00,00,00,00,00 + * _out3 : 03,13,23,33,43,53,63,73, 00,00,00,00,00,00,00,00 + * ============================================================================= + */ +#define LSX_TRANSPOSE8x4_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3) \ + { \ + __m128i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \ + \ + _tmp0_m = __lsx_vpackev_w(_in4, _in0); \ + _tmp1_m = __lsx_vpackev_w(_in5, _in1); \ + _tmp2_m = __lsx_vilvl_b(_tmp1_m, _tmp0_m); \ + _tmp0_m = __lsx_vpackev_w(_in6, _in2); \ + _tmp1_m = __lsx_vpackev_w(_in7, _in3); \ + \ + _tmp3_m = __lsx_vilvl_b(_tmp1_m, _tmp0_m); \ + _tmp0_m = __lsx_vilvl_h(_tmp3_m, _tmp2_m); \ + _tmp1_m = __lsx_vilvh_h(_tmp3_m, _tmp2_m); \ + \ + _out0 = __lsx_vilvl_w(_tmp1_m, _tmp0_m); \ + _out2 = __lsx_vilvh_w(_tmp1_m, _tmp0_m); \ + _out1 = __lsx_vilvh_d(_out2, _out0); \ + _out3 = __lsx_vilvh_d(_out0, _out2); \ + } + +/* + * ============================================================================= + * Description : Transpose 16x8 block with byte elements in vectors + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, in8 + * in9, in10, in11, in12, in13, in14, in15 + * Outputs - out0, out1, out2, out3, out4, out5, out6, out7 + * Details : + * Example : + * 000,001,002,003,004,005,006,007 + * 008,009,010,011,012,013,014,015 + * 016,017,018,019,020,021,022,023 + * 024,025,026,027,028,029,030,031 + * 032,033,034,035,036,037,038,039 + * 040,041,042,043,044,045,046,047 000,008,...,112,120 + * 048,049,050,051,052,053,054,055 001,009,...,113,121 + * 056,057,058,059,060,061,062,063 to 002,010,...,114,122 + * 064,068,066,067,068,069,070,071 =====> 003,011,...,115,123 + * 072,073,074,075,076,077,078,079 004,012,...,116,124 + * 080,081,082,083,084,085,086,087 005,013,...,117,125 + * 088,089,090,091,092,093,094,095 006,014,...,118,126 + * 096,097,098,099,100,101,102,103 007,015,...,119,127 + * 104,105,106,107,108,109,110,111 + * 112,113,114,115,116,117,118,119 + * 120,121,122,123,124,125,126,127 + * ============================================================================= + */ +#define LSX_TRANSPOSE16x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _in8, _in9, _in10, _in11, _in12, _in13, _in14, \ + _in15, _out0, _out1, _out2, _out3, _out4, _out5, \ + _out6, _out7) \ + { \ + __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7; \ + __m128i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \ + DUP4_ARG2(__lsx_vilvl_b, _in2, _in0, _in3, _in1, _in6, _in4, _in7, _in5, \ + _tmp0, _tmp1, _tmp2, _tmp3); \ + DUP4_ARG2(__lsx_vilvl_b, _in10, _in8, _in11, _in9, _in14, _in12, _in15, \ + _in13, _tmp4, _tmp5, _tmp6, _tmp7); \ + DUP2_ARG2(__lsx_vilvl_b, _tmp1, _tmp0, _tmp3, _tmp2, _t0, _t2); \ + DUP2_ARG2(__lsx_vilvh_b, _tmp1, _tmp0, _tmp3, _tmp2, _t1, _t3); \ + DUP2_ARG2(__lsx_vilvl_b, _tmp5, _tmp4, _tmp7, _tmp6, _t4, _t6); \ + DUP2_ARG2(__lsx_vilvh_b, _tmp5, _tmp4, _tmp7, _tmp6, _t5, _t7); \ + DUP2_ARG2(__lsx_vilvl_w, _t2, _t0, _t3, _t1, _tmp0, _tmp4); \ + DUP2_ARG2(__lsx_vilvh_w, _t2, _t0, _t3, _t1, _tmp2, _tmp6); \ + DUP2_ARG2(__lsx_vilvl_w, _t6, _t4, _t7, _t5, _tmp1, _tmp5); \ + DUP2_ARG2(__lsx_vilvh_w, _t6, _t4, _t7, _t5, _tmp3, _tmp7); \ + DUP2_ARG2(__lsx_vilvl_d, _tmp1, _tmp0, _tmp3, _tmp2, _out0, _out2); \ + DUP2_ARG2(__lsx_vilvh_d, _tmp1, _tmp0, _tmp3, _tmp2, _out1, _out3); \ + DUP2_ARG2(__lsx_vilvl_d, _tmp5, _tmp4, _tmp7, _tmp6, _out4, _out6); \ + DUP2_ARG2(__lsx_vilvh_d, _tmp5, _tmp4, _tmp7, _tmp6, _out5, _out7); \ + } + +/* + * ============================================================================= + * Description : Butterfly of 4 input vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1, out2, out3 + * Details : Butterfly operation + * Example : + * out0 = in0 + in3; + * out1 = in1 + in2; + * out2 = in1 - in2; + * out3 = in0 - in3; + * ============================================================================= + */ +#define LSX_BUTTERFLY_4_B(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + _out0 = __lsx_vadd_b(_in0, _in3); \ + _out1 = __lsx_vadd_b(_in1, _in2); \ + _out2 = __lsx_vsub_b(_in1, _in2); \ + _out3 = __lsx_vsub_b(_in0, _in3); \ + } +#define LSX_BUTTERFLY_4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + _out0 = __lsx_vadd_h(_in0, _in3); \ + _out1 = __lsx_vadd_h(_in1, _in2); \ + _out2 = __lsx_vsub_h(_in1, _in2); \ + _out3 = __lsx_vsub_h(_in0, _in3); \ + } +#define LSX_BUTTERFLY_4_W(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + _out0 = __lsx_vadd_w(_in0, _in3); \ + _out1 = __lsx_vadd_w(_in1, _in2); \ + _out2 = __lsx_vsub_w(_in1, _in2); \ + _out3 = __lsx_vsub_w(_in0, _in3); \ + } +#define LSX_BUTTERFLY_4_D(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + _out0 = __lsx_vadd_d(_in0, _in3); \ + _out1 = __lsx_vadd_d(_in1, _in2); \ + _out2 = __lsx_vsub_d(_in1, _in2); \ + _out3 = __lsx_vsub_d(_in0, _in3); \ + } + +/* + * ============================================================================= + * Description : Butterfly of 8 input vectors + * Arguments : Inputs - _in0, _in1, _in2, _in3, ~ + * Outputs - _out0, _out1, _out2, _out3, ~ + * Details : Butterfly operation + * Example : + * _out0 = _in0 + _in7; + * _out1 = _in1 + _in6; + * _out2 = _in2 + _in5; + * _out3 = _in3 + _in4; + * _out4 = _in3 - _in4; + * _out5 = _in2 - _in5; + * _out6 = _in1 - _in6; + * _out7 = _in0 - _in7; + * ============================================================================= + */ +#define LSX_BUTTERFLY_8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + _out0 = __lsx_vadd_b(_in0, _in7); \ + _out1 = __lsx_vadd_b(_in1, _in6); \ + _out2 = __lsx_vadd_b(_in2, _in5); \ + _out3 = __lsx_vadd_b(_in3, _in4); \ + _out4 = __lsx_vsub_b(_in3, _in4); \ + _out5 = __lsx_vsub_b(_in2, _in5); \ + _out6 = __lsx_vsub_b(_in1, _in6); \ + _out7 = __lsx_vsub_b(_in0, _in7); \ + } + +#define LSX_BUTTERFLY_8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + _out0 = __lsx_vadd_h(_in0, _in7); \ + _out1 = __lsx_vadd_h(_in1, _in6); \ + _out2 = __lsx_vadd_h(_in2, _in5); \ + _out3 = __lsx_vadd_h(_in3, _in4); \ + _out4 = __lsx_vsub_h(_in3, _in4); \ + _out5 = __lsx_vsub_h(_in2, _in5); \ + _out6 = __lsx_vsub_h(_in1, _in6); \ + _out7 = __lsx_vsub_h(_in0, _in7); \ + } + +#define LSX_BUTTERFLY_8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + _out0 = __lsx_vadd_w(_in0, _in7); \ + _out1 = __lsx_vadd_w(_in1, _in6); \ + _out2 = __lsx_vadd_w(_in2, _in5); \ + _out3 = __lsx_vadd_w(_in3, _in4); \ + _out4 = __lsx_vsub_w(_in3, _in4); \ + _out5 = __lsx_vsub_w(_in2, _in5); \ + _out6 = __lsx_vsub_w(_in1, _in6); \ + _out7 = __lsx_vsub_w(_in0, _in7); \ + } + +#define LSX_BUTTERFLY_8_D(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + _out0 = __lsx_vadd_d(_in0, _in7); \ + _out1 = __lsx_vadd_d(_in1, _in6); \ + _out2 = __lsx_vadd_d(_in2, _in5); \ + _out3 = __lsx_vadd_d(_in3, _in4); \ + _out4 = __lsx_vsub_d(_in3, _in4); \ + _out5 = __lsx_vsub_d(_in2, _in5); \ + _out6 = __lsx_vsub_d(_in1, _in6); \ + _out7 = __lsx_vsub_d(_in0, _in7); \ + } + +#endif // LSX + +#ifdef __loongarch_asx +#include +/* + * ============================================================================= + * Description : Dot product of byte vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - signed halfword + * Details : Unsigned byte elements from in_h are multiplied with + * unsigned byte elements from in_l producing a result + * twice the size of input i.e. signed halfword. + * Then this multiplied results of adjacent odd-even elements + * are added to the out vector + * Example : See out = __lasx_xvdp2_w_h(in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2_h_bu(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvmulwev_h_bu(in_h, in_l); + out = __lasx_xvmaddwod_h_bu(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of byte vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - signed halfword + * Details : Signed byte elements from in_h are multiplied with + * signed byte elements from in_l producing a result + * twice the size of input i.e. signed halfword. + * Then this multiplication results of adjacent odd-even elements + * are added to the out vector + * Example : See out = __lasx_xvdp2_w_h(in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2_h_b(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvmulwev_h_b(in_h, in_l); + out = __lasx_xvmaddwod_h_b(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of halfword vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - signed word + * Details : Signed halfword elements from in_h are multiplied with + * signed halfword elements from in_l producing a result + * twice the size of input i.e. signed word. + * Then this multiplied results of adjacent odd-even elements + * are added to the out vector. + * Example : out = __lasx_xvdp2_w_h(in_h, in_l) + * in_h : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * in_l : 8,7,6,5, 4,3,2,1, 8,7,6,5, 4,3,2,1 + * out : 22,38,38,22, 22,38,38,22 + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2_w_h(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvmulwev_w_h(in_h, in_l); + out = __lasx_xvmaddwod_w_h(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of word vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - signed double + * Details : Signed word elements from in_h are multiplied with + * signed word elements from in_l producing a result + * twice the size of input i.e. signed double-word. + * Then this multiplied results of adjacent odd-even elements + * are added to the out vector. + * Example : See out = __lasx_xvdp2_w_h(in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2_d_w(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvmulwev_d_w(in_h, in_l); + out = __lasx_xvmaddwod_d_w(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of halfword vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - signed word + * Details : Unsigned halfword elements from in_h are multiplied with + * signed halfword elements from in_l producing a result + * twice the size of input i.e. unsigned word. + * Multiplication result of adjacent odd-even elements + * are added to the out vector + * Example : See out = __lasx_xvdp2_w_h(in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2_w_hu_h(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvmulwev_w_hu_h(in_h, in_l); + out = __lasx_xvmaddwod_w_hu_h(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product & addition of byte vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - halfword + * Details : Signed byte elements from in_h are multiplied with + * signed byte elements from in_l producing a result + * twice the size of input i.e. signed halfword. + * Then this multiplied results of adjacent odd-even elements + * are added to the in_c vector. + * Example : See out = __lasx_xvdp2add_w_h(in_c, in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2add_h_b(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i out; + + out = __lasx_xvmaddwev_h_b(in_c, in_h, in_l); + out = __lasx_xvmaddwod_h_b(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product & addition of byte vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - halfword + * Details : Unsigned byte elements from in_h are multiplied with + * unsigned byte elements from in_l producing a result + * twice the size of input i.e. signed halfword. + * Then this multiplied results of adjacent odd-even elements + * are added to the in_c vector. + * Example : See out = __lasx_xvdp2add_w_h(in_c, in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2add_h_bu(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i out; + + out = __lasx_xvmaddwev_h_bu(in_c, in_h, in_l); + out = __lasx_xvmaddwod_h_bu(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product & addition of byte vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - halfword + * Details : Unsigned byte elements from in_h are multiplied with + * signed byte elements from in_l producing a result + * twice the size of input i.e. signed halfword. + * Then this multiplied results of adjacent odd-even elements + * are added to the in_c vector. + * Example : See out = __lasx_xvdp2add_w_h(in_c, in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2add_h_bu_b(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i out; + + out = __lasx_xvmaddwev_h_bu_b(in_c, in_h, in_l); + out = __lasx_xvmaddwod_h_bu_b(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of halfword vector elements + * Arguments : Inputs - in_c, in_h, in_l + * Output - out + * Return Type - per RTYPE + * Details : Signed halfword elements from in_h are multiplied with + * signed halfword elements from in_l producing a result + * twice the size of input i.e. signed word. + * Multiplication result of adjacent odd-even elements + * are added to the in_c vector. + * Example : out = __lasx_xvdp2add_w_h(in_c, in_h, in_l) + * in_c : 1,2,3,4, 1,2,3,4 + * in_h : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8, + * in_l : 8,7,6,5, 4,3,2,1, 8,7,6,5, 4,3,2,1, + * out : 23,40,41,26, 23,40,41,26 + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2add_w_h(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i out; + + out = __lasx_xvmaddwev_w_h(in_c, in_h, in_l); + out = __lasx_xvmaddwod_w_h(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of halfword vector elements + * Arguments : Inputs - in_c, in_h, in_l + * Output - out + * Return Type - signed word + * Details : Unsigned halfword elements from in_h are multiplied with + * unsigned halfword elements from in_l producing a result + * twice the size of input i.e. signed word. + * Multiplication result of adjacent odd-even elements + * are added to the in_c vector. + * Example : See out = __lasx_xvdp2add_w_h(in_c, in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2add_w_hu(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i out; + + out = __lasx_xvmaddwev_w_hu(in_c, in_h, in_l); + out = __lasx_xvmaddwod_w_hu(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of halfword vector elements + * Arguments : Inputs - in_c, in_h, in_l + * Output - out + * Return Type - signed word + * Details : Unsigned halfword elements from in_h are multiplied with + * signed halfword elements from in_l producing a result + * twice the size of input i.e. signed word. + * Multiplication result of adjacent odd-even elements + * are added to the in_c vector + * Example : See out = __lasx_xvdp2add_w_h(in_c, in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2add_w_hu_h(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i out; + + out = __lasx_xvmaddwev_w_hu_h(in_c, in_h, in_l); + out = __lasx_xvmaddwod_w_hu_h(out, in_h, in_l); + return out; +} + +/* + * ============================================================================= + * Description : Vector Unsigned Dot Product and Subtract + * Arguments : Inputs - in_c, in_h, in_l + * Output - out + * Return Type - signed halfword + * Details : Unsigned byte elements from in_h are multiplied with + * unsigned byte elements from in_l producing a result + * twice the size of input i.e. signed halfword. + * Multiplication result of adjacent odd-even elements + * are added together and subtracted from double width elements + * in_c vector. + * Example : See out = __lasx_xvdp2sub_w_h(in_c, in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2sub_h_bu(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i out; + + out = __lasx_xvmulwev_h_bu(in_h, in_l); + out = __lasx_xvmaddwod_h_bu(out, in_h, in_l); + out = __lasx_xvsub_h(in_c, out); + return out; +} + +/* + * ============================================================================= + * Description : Vector Signed Dot Product and Subtract + * Arguments : Inputs - in_c, in_h, in_l + * Output - out + * Return Type - signed word + * Details : Signed halfword elements from in_h are multiplied with + * Signed halfword elements from in_l producing a result + * twice the size of input i.e. signed word. + * Multiplication result of adjacent odd-even elements + * are added together and subtracted from double width elements + * in_c vector. + * Example : out = __lasx_xvdp2sub_w_h(in_c, in_h, in_l) + * in_c : 0,0,0,0, 0,0,0,0 + * in_h : 3,1,3,0, 0,0,0,1, 0,0,1,1, 0,0,0,1 + * in_l : 2,1,1,0, 1,0,0,0, 0,0,1,0, 1,0,0,1 + * out : -7,-3,0,0, 0,-1,0,-1 + * ============================================================================= + */ +static inline __m256i __lasx_xvdp2sub_w_h(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i out; + + out = __lasx_xvmulwev_w_h(in_h, in_l); + out = __lasx_xvmaddwod_w_h(out, in_h, in_l); + out = __lasx_xvsub_w(in_c, out); + return out; +} + +/* + * ============================================================================= + * Description : Dot product of halfword vector elements + * Arguments : Inputs - in_h, in_l + * Output - out + * Return Type - signed word + * Details : Signed halfword elements from in_h are multiplied with + * signed halfword elements from in_l producing a result + * four times the size of input i.e. signed doubleword. + * Then this multiplication results of four adjacent elements + * are added together and stored to the out vector. + * Example : out = __lasx_xvdp4_d_h(in_h, in_l) + * in_h : 3,1,3,0, 0,0,0,1, 0,0,1,-1, 0,0,0,1 + * in_l : -2,1,1,0, 1,0,0,0, 0,0,1, 0, 1,0,0,1 + * out : -2,0,1,1 + * ============================================================================= + */ +static inline __m256i __lasx_xvdp4_d_h(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvmulwev_w_h(in_h, in_l); + out = __lasx_xvmaddwod_w_h(out, in_h, in_l); + out = __lasx_xvhaddw_d_w(out, out); + return out; +} + +/* + * ============================================================================= + * Description : The high half of the vector elements are expanded and + * added after being doubled. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_h vector and the in_l vector are added after the + * higher half of the two-fold sign extension (signed byte + * to signed halfword) and stored to the out vector. + * Example : See out = __lasx_xvaddwh_w_h(in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvaddwh_h_b(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvilvh_b(in_h, in_l); + out = __lasx_xvhaddw_h_b(out, out); + return out; +} + +/* + * ============================================================================= + * Description : The high half of the vector elements are expanded and + * added after being doubled. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_h vector and the in_l vector are added after the + * higher half of the two-fold sign extension (signed halfword + * to signed word) and stored to the out vector. + * Example : out = __lasx_xvaddwh_w_h(in_h, in_l) + * in_h : 3, 0,3,0, 0,0,0,-1, 0,0,1,-1, 0,0,0,1 + * in_l : 2,-1,1,2, 1,0,0, 0, 1,0,1, 0, 1,0,0,1 + * out : 1,0,0,-1, 1,0,0, 2 + * ============================================================================= + */ +static inline __m256i __lasx_xvaddwh_w_h(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvilvh_h(in_h, in_l); + out = __lasx_xvhaddw_w_h(out, out); + return out; +} + +/* + * ============================================================================= + * Description : The low half of the vector elements are expanded and + * added after being doubled. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_h vector and the in_l vector are added after the + * lower half of the two-fold sign extension (signed byte + * to signed halfword) and stored to the out vector. + * Example : See out = __lasx_xvaddwl_w_h(in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvaddwl_h_b(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvilvl_b(in_h, in_l); + out = __lasx_xvhaddw_h_b(out, out); + return out; +} + +/* + * ============================================================================= + * Description : The low half of the vector elements are expanded and + * added after being doubled. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_h vector and the in_l vector are added after the + * lower half of the two-fold sign extension (signed halfword + * to signed word) and stored to the out vector. + * Example : out = __lasx_xvaddwl_w_h(in_h, in_l) + * in_h : 3, 0,3,0, 0,0,0,-1, 0,0,1,-1, 0,0,0,1 + * in_l : 2,-1,1,2, 1,0,0, 0, 1,0,1, 0, 1,0,0,1 + * out : 5,-1,4,2, 1,0,2,-1 + * ============================================================================= + */ +static inline __m256i __lasx_xvaddwl_w_h(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvilvl_h(in_h, in_l); + out = __lasx_xvhaddw_w_h(out, out); + return out; +} + +/* + * ============================================================================= + * Description : The low half of the vector elements are expanded and + * added after being doubled. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The out vector and the out vector are added after the + * lower half of the two-fold zero extension (unsigned byte + * to unsigned halfword) and stored to the out vector. + * Example : See out = __lasx_xvaddwl_w_h(in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvaddwl_h_bu(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvilvl_b(in_h, in_l); + out = __lasx_xvhaddw_hu_bu(out, out); + return out; +} + +/* + * ============================================================================= + * Description : The low half of the vector elements are expanded and + * added after being doubled. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_l vector after double zero extension (unsigned byte to + * signed halfword),added to the in_h vector. + * Example : See out = __lasx_xvaddw_w_w_h(in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvaddw_h_h_bu(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvsllwil_hu_bu(in_l, 0); + out = __lasx_xvadd_h(in_h, out); + return out; +} + +/* + * ============================================================================= + * Description : The low half of the vector elements are expanded and + * added after being doubled. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_l vector after double sign extension (signed halfword to + * signed word), added to the in_h vector. + * Example : out = __lasx_xvaddw_w_w_h(in_h, in_l) + * in_h : 0, 1,0,0, -1,0,0,1, + * in_l : 2,-1,1,2, 1,0,0,0, 0,0,1,0, 1,0,0,1, + * out : 2, 0,1,2, -1,0,1,1, + * ============================================================================= + */ +static inline __m256i __lasx_xvaddw_w_w_h(__m256i in_h, __m256i in_l) { + __m256i out; + + out = __lasx_xvsllwil_w_h(in_l, 0); + out = __lasx_xvadd_w(in_h, out); + return out; +} + +/* + * ============================================================================= + * Description : Multiplication and addition calculation after expansion + * of the lower half of the vector. + * Arguments : Inputs - in_c, in_h, in_l + * Output - out + * Details : The in_h vector and the in_l vector are multiplied after + * the lower half of the two-fold sign extension (signed halfword + * to signed word), and the result is added to the vector in_c, + * then stored to the out vector. + * Example : out = __lasx_xvmaddwl_w_h(in_c, in_h, in_l) + * in_c : 1,2,3,4, 5,6,7,8 + * in_h : 1,2,3,4, 1,2,3,4, 5,6,7,8, 5,6,7,8 + * in_l : 200, 300, 400, 500, 2000, 3000, 4000, 5000, + * -200,-300,-400,-500, -2000,-3000,-4000,-5000 + * out : 201, 602,1203,2004, -995, -1794,-2793,-3992 + * ============================================================================= + */ +static inline __m256i __lasx_xvmaddwl_w_h(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i tmp0, tmp1, out; + + tmp0 = __lasx_xvsllwil_w_h(in_h, 0); + tmp1 = __lasx_xvsllwil_w_h(in_l, 0); + tmp0 = __lasx_xvmul_w(tmp0, tmp1); + out = __lasx_xvadd_w(tmp0, in_c); + return out; +} + +/* + * ============================================================================= + * Description : Multiplication and addition calculation after expansion + * of the higher half of the vector. + * Arguments : Inputs - in_c, in_h, in_l + * Output - out + * Details : The in_h vector and the in_l vector are multiplied after + * the higher half of the two-fold sign extension (signed + * halfword to signed word), and the result is added to + * the vector in_c, then stored to the out vector. + * Example : See out = __lasx_xvmaddwl_w_h(in_c, in_h, in_l) + * ============================================================================= + */ +static inline __m256i __lasx_xvmaddwh_w_h(__m256i in_c, + __m256i in_h, + __m256i in_l) { + __m256i tmp0, tmp1, out; + + tmp0 = __lasx_xvilvh_h(in_h, in_h); + tmp1 = __lasx_xvilvh_h(in_l, in_l); + tmp0 = __lasx_xvmulwev_w_h(tmp0, tmp1); + out = __lasx_xvadd_w(tmp0, in_c); + return out; +} + +/* + * ============================================================================= + * Description : Multiplication calculation after expansion of the lower + * half of the vector. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_h vector and the in_l vector are multiplied after + * the lower half of the two-fold sign extension (signed + * halfword to signed word), then stored to the out vector. + * Example : out = __lasx_xvmulwl_w_h(in_h, in_l) + * in_h : 3,-1,3,0, 0,0,0,-1, 0,0,1,-1, 0,0,0,1 + * in_l : 2,-1,1,2, 1,0,0, 0, 0,0,1, 0, 1,0,0,1 + * out : 6,1,3,0, 0,0,1,0 + * ============================================================================= + */ +static inline __m256i __lasx_xvmulwl_w_h(__m256i in_h, __m256i in_l) { + __m256i tmp0, tmp1, out; + + tmp0 = __lasx_xvsllwil_w_h(in_h, 0); + tmp1 = __lasx_xvsllwil_w_h(in_l, 0); + out = __lasx_xvmul_w(tmp0, tmp1); + return out; +} + +/* + * ============================================================================= + * Description : Multiplication calculation after expansion of the lower + * half of the vector. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_h vector and the in_l vector are multiplied after + * the lower half of the two-fold sign extension (signed + * halfword to signed word), then stored to the out vector. + * Example : out = __lasx_xvmulwh_w_h(in_h, in_l) + * in_h : 3,-1,3,0, 0,0,0,-1, 0,0,1,-1, 0,0,0,1 + * in_l : 2,-1,1,2, 1,0,0, 0, 0,0,1, 0, 1,0,0,1 + * out : 0,0,0,0, 0,0,0,1 + * ============================================================================= + */ +static inline __m256i __lasx_xvmulwh_w_h(__m256i in_h, __m256i in_l) { + __m256i tmp0, tmp1, out; + + tmp0 = __lasx_xvilvh_h(in_h, in_h); + tmp1 = __lasx_xvilvh_h(in_l, in_l); + out = __lasx_xvmulwev_w_h(tmp0, tmp1); + return out; +} + +/* + * ============================================================================= + * Description : The low half of the vector elements are added to the high half + * after being doubled, then saturated. + * Arguments : Inputs - in_h, in_l + * Output - out + * Details : The in_h vector adds the in_l vector after the lower half of + * the two-fold zero extension (unsigned byte to unsigned + * halfword) and then saturated. The results are stored to the out + * vector. + * Example : out = __lasx_xvsaddw_hu_hu_bu(in_h, in_l) + * in_h : 2,65532,1,2, 1,0,0,0, 0,0,1,0, 1,0,0,1 + * in_l : 3,6,3,0, 0,0,0,1, 0,0,1,1, 0,0,0,1, 3,18,3,0, 0,0,0,1, 0,0,1,1, + * 0,0,0,1 + * out : 5,65535,4,2, 1,0,0,1, 3,18,4,0, 1,0,0,2, + * ============================================================================= + */ +static inline __m256i __lasx_xvsaddw_hu_hu_bu(__m256i in_h, __m256i in_l) { + __m256i tmp1, out; + __m256i zero = {0}; + + tmp1 = __lasx_xvilvl_b(zero, in_l); + out = __lasx_xvsadd_hu(in_h, tmp1); + return out; +} + +/* + * ============================================================================= + * Description : Clip all halfword elements of input vector between min & max + * out = ((in) < (min)) ? (min) : (((in) > (max)) ? (max) : (in)) + * Arguments : Inputs - in (input vector) + * - min (min threshold) + * - max (max threshold) + * Outputs - in (output vector with clipped elements) + * Return Type - signed halfword + * Example : out = __lasx_xvclip_h(in, min, max) + * in : -8,2,280,249, -8,255,280,249, 4,4,4,4, 5,5,5,5 + * min : 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1 + * max : 9,9,9,9, 9,9,9,9, 9,9,9,9, 9,9,9,9 + * out : 1,2,9,9, 1,9,9,9, 4,4,4,4, 5,5,5,5 + * ============================================================================= + */ +static inline __m256i __lasx_xvclip_h(__m256i in, __m256i min, __m256i max) { + __m256i out; + + out = __lasx_xvmax_h(min, in); + out = __lasx_xvmin_h(max, out); + return out; +} + +/* + * ============================================================================= + * Description : Clip all signed halfword elements of input vector + * between 0 & 255 + * Arguments : Inputs - in (input vector) + * Outputs - out (output vector with clipped elements) + * Return Type - signed halfword + * Example : See out = __lasx_xvclip255_w(in) + * ============================================================================= + */ +static inline __m256i __lasx_xvclip255_h(__m256i in) { + __m256i out; + + out = __lasx_xvmaxi_h(in, 0); + out = __lasx_xvsat_hu(out, 7); + return out; +} + +/* + * ============================================================================= + * Description : Clip all signed word elements of input vector + * between 0 & 255 + * Arguments : Inputs - in (input vector) + * Output - out (output vector with clipped elements) + * Return Type - signed word + * Example : out = __lasx_xvclip255_w(in) + * in : -8,255,280,249, -8,255,280,249 + * out : 0,255,255,249, 0,255,255,249 + * ============================================================================= + */ +static inline __m256i __lasx_xvclip255_w(__m256i in) { + __m256i out; + + out = __lasx_xvmaxi_w(in, 0); + out = __lasx_xvsat_wu(out, 7); + return out; +} + +/* + * ============================================================================= + * Description : Indexed halfword element values are replicated to all + * elements in output vector. If 'idx < 8' use xvsplati_l_*, + * if 'idx >= 8' use xvsplati_h_*. + * Arguments : Inputs - in, idx + * Output - out + * Details : Idx element value from in vector is replicated to all + * elements in out vector. + * Valid index range for halfword operation is 0-7 + * Example : out = __lasx_xvsplati_l_h(in, idx) + * in : 20,10,11,12, 13,14,15,16, 0,0,2,0, 0,0,0,0 + * idx : 0x02 + * out : 11,11,11,11, 11,11,11,11, 11,11,11,11, 11,11,11,11 + * ============================================================================= + */ +static inline __m256i __lasx_xvsplati_l_h(__m256i in, int idx) { + __m256i out; + + out = __lasx_xvpermi_q(in, in, 0x02); + out = __lasx_xvreplve_h(out, idx); + return out; +} + +/* + * ============================================================================= + * Description : Indexed halfword element values are replicated to all + * elements in output vector. If 'idx < 8' use xvsplati_l_*, + * if 'idx >= 8' use xvsplati_h_*. + * Arguments : Inputs - in, idx + * Output - out + * Details : Idx element value from in vector is replicated to all + * elements in out vector. + * Valid index range for halfword operation is 0-7 + * Example : out = __lasx_xvsplati_h_h(in, idx) + * in : 20,10,11,12, 13,14,15,16, 0,2,0,0, 0,0,0,0 + * idx : 0x09 + * out : 2,2,2,2, 2,2,2,2, 2,2,2,2, 2,2,2,2 + * ============================================================================= + */ +static inline __m256i __lasx_xvsplati_h_h(__m256i in, int idx) { + __m256i out; + + out = __lasx_xvpermi_q(in, in, 0x13); + out = __lasx_xvreplve_h(out, idx); + return out; +} + +/* + * ============================================================================= + * Description : Transpose 4x4 block with double-word elements in vectors + * Arguments : Inputs - _in0, _in1, _in2, _in3 + * Outputs - _out0, _out1, _out2, _out3 + * Example : LASX_TRANSPOSE4x4_D + * _in0 : 1,2,3,4 + * _in1 : 1,2,3,4 + * _in2 : 1,2,3,4 + * _in3 : 1,2,3,4 + * + * _out0 : 1,1,1,1 + * _out1 : 2,2,2,2 + * _out2 : 3,3,3,3 + * _out3 : 4,4,4,4 + * ============================================================================= + */ +#define LASX_TRANSPOSE4x4_D(_in0, _in1, _in2, _in3, _out0, _out1, _out2, \ + _out3) \ + { \ + __m256i _tmp0, _tmp1, _tmp2, _tmp3; \ + _tmp0 = __lasx_xvilvl_d(_in1, _in0); \ + _tmp1 = __lasx_xvilvh_d(_in1, _in0); \ + _tmp2 = __lasx_xvilvl_d(_in3, _in2); \ + _tmp3 = __lasx_xvilvh_d(_in3, _in2); \ + _out0 = __lasx_xvpermi_q(_tmp2, _tmp0, 0x20); \ + _out2 = __lasx_xvpermi_q(_tmp2, _tmp0, 0x31); \ + _out1 = __lasx_xvpermi_q(_tmp3, _tmp1, 0x20); \ + _out3 = __lasx_xvpermi_q(_tmp3, _tmp1, 0x31); \ + } + +/* + * ============================================================================= + * Description : Transpose 8x8 block with word elements in vectors + * Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7 + * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, + * _out7 + * Example : LASX_TRANSPOSE8x8_W + * _in0 : 1,2,3,4,5,6,7,8 + * _in1 : 2,2,3,4,5,6,7,8 + * _in2 : 3,2,3,4,5,6,7,8 + * _in3 : 4,2,3,4,5,6,7,8 + * _in4 : 5,2,3,4,5,6,7,8 + * _in5 : 6,2,3,4,5,6,7,8 + * _in6 : 7,2,3,4,5,6,7,8 + * _in7 : 8,2,3,4,5,6,7,8 + * + * _out0 : 1,2,3,4,5,6,7,8 + * _out1 : 2,2,2,2,2,2,2,2 + * _out2 : 3,3,3,3,3,3,3,3 + * _out3 : 4,4,4,4,4,4,4,4 + * _out4 : 5,5,5,5,5,5,5,5 + * _out5 : 6,6,6,6,6,6,6,6 + * _out6 : 7,7,7,7,7,7,7,7 + * _out7 : 8,8,8,8,8,8,8,8 + * ============================================================================= + */ +#define LASX_TRANSPOSE8x8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + __m256i _s0_m, _s1_m; \ + __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \ + __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \ + \ + _s0_m = __lasx_xvilvl_w(_in2, _in0); \ + _s1_m = __lasx_xvilvl_w(_in3, _in1); \ + _tmp0_m = __lasx_xvilvl_w(_s1_m, _s0_m); \ + _tmp1_m = __lasx_xvilvh_w(_s1_m, _s0_m); \ + _s0_m = __lasx_xvilvh_w(_in2, _in0); \ + _s1_m = __lasx_xvilvh_w(_in3, _in1); \ + _tmp2_m = __lasx_xvilvl_w(_s1_m, _s0_m); \ + _tmp3_m = __lasx_xvilvh_w(_s1_m, _s0_m); \ + _s0_m = __lasx_xvilvl_w(_in6, _in4); \ + _s1_m = __lasx_xvilvl_w(_in7, _in5); \ + _tmp4_m = __lasx_xvilvl_w(_s1_m, _s0_m); \ + _tmp5_m = __lasx_xvilvh_w(_s1_m, _s0_m); \ + _s0_m = __lasx_xvilvh_w(_in6, _in4); \ + _s1_m = __lasx_xvilvh_w(_in7, _in5); \ + _tmp6_m = __lasx_xvilvl_w(_s1_m, _s0_m); \ + _tmp7_m = __lasx_xvilvh_w(_s1_m, _s0_m); \ + _out0 = __lasx_xvpermi_q(_tmp4_m, _tmp0_m, 0x20); \ + _out1 = __lasx_xvpermi_q(_tmp5_m, _tmp1_m, 0x20); \ + _out2 = __lasx_xvpermi_q(_tmp6_m, _tmp2_m, 0x20); \ + _out3 = __lasx_xvpermi_q(_tmp7_m, _tmp3_m, 0x20); \ + _out4 = __lasx_xvpermi_q(_tmp4_m, _tmp0_m, 0x31); \ + _out5 = __lasx_xvpermi_q(_tmp5_m, _tmp1_m, 0x31); \ + _out6 = __lasx_xvpermi_q(_tmp6_m, _tmp2_m, 0x31); \ + _out7 = __lasx_xvpermi_q(_tmp7_m, _tmp3_m, 0x31); \ + } + +/* + * ============================================================================= + * Description : Transpose input 16x8 byte block + * Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, + * _in8, _in9, _in10, _in11, _in12, _in13, _in14, _in15 + * (input 16x8 byte block) + * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, + * _out7 (output 8x16 byte block) + * Details : The rows of the matrix become columns, and the columns become + * rows. + * Example : See LASX_TRANSPOSE16x8_H + * ============================================================================= + */ +#define LASX_TRANSPOSE16x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _in8, _in9, _in10, _in11, _in12, _in13, _in14, \ + _in15, _out0, _out1, _out2, _out3, _out4, _out5, \ + _out6, _out7) \ + { \ + __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \ + __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \ + \ + _tmp0_m = __lasx_xvilvl_b(_in2, _in0); \ + _tmp1_m = __lasx_xvilvl_b(_in3, _in1); \ + _tmp2_m = __lasx_xvilvl_b(_in6, _in4); \ + _tmp3_m = __lasx_xvilvl_b(_in7, _in5); \ + _tmp4_m = __lasx_xvilvl_b(_in10, _in8); \ + _tmp5_m = __lasx_xvilvl_b(_in11, _in9); \ + _tmp6_m = __lasx_xvilvl_b(_in14, _in12); \ + _tmp7_m = __lasx_xvilvl_b(_in15, _in13); \ + _out0 = __lasx_xvilvl_b(_tmp1_m, _tmp0_m); \ + _out1 = __lasx_xvilvh_b(_tmp1_m, _tmp0_m); \ + _out2 = __lasx_xvilvl_b(_tmp3_m, _tmp2_m); \ + _out3 = __lasx_xvilvh_b(_tmp3_m, _tmp2_m); \ + _out4 = __lasx_xvilvl_b(_tmp5_m, _tmp4_m); \ + _out5 = __lasx_xvilvh_b(_tmp5_m, _tmp4_m); \ + _out6 = __lasx_xvilvl_b(_tmp7_m, _tmp6_m); \ + _out7 = __lasx_xvilvh_b(_tmp7_m, _tmp6_m); \ + _tmp0_m = __lasx_xvilvl_w(_out2, _out0); \ + _tmp2_m = __lasx_xvilvh_w(_out2, _out0); \ + _tmp4_m = __lasx_xvilvl_w(_out3, _out1); \ + _tmp6_m = __lasx_xvilvh_w(_out3, _out1); \ + _tmp1_m = __lasx_xvilvl_w(_out6, _out4); \ + _tmp3_m = __lasx_xvilvh_w(_out6, _out4); \ + _tmp5_m = __lasx_xvilvl_w(_out7, _out5); \ + _tmp7_m = __lasx_xvilvh_w(_out7, _out5); \ + _out0 = __lasx_xvilvl_d(_tmp1_m, _tmp0_m); \ + _out1 = __lasx_xvilvh_d(_tmp1_m, _tmp0_m); \ + _out2 = __lasx_xvilvl_d(_tmp3_m, _tmp2_m); \ + _out3 = __lasx_xvilvh_d(_tmp3_m, _tmp2_m); \ + _out4 = __lasx_xvilvl_d(_tmp5_m, _tmp4_m); \ + _out5 = __lasx_xvilvh_d(_tmp5_m, _tmp4_m); \ + _out6 = __lasx_xvilvl_d(_tmp7_m, _tmp6_m); \ + _out7 = __lasx_xvilvh_d(_tmp7_m, _tmp6_m); \ + } + +/* + * ============================================================================= + * Description : Transpose input 16x8 byte block + * Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, + * _in8, _in9, _in10, _in11, _in12, _in13, _in14, _in15 + * (input 16x8 byte block) + * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, + * _out7 (output 8x16 byte block) + * Details : The rows of the matrix become columns, and the columns become + * rows. + * Example : LASX_TRANSPOSE16x8_H + * _in0 : 1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in1 : 2,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in2 : 3,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in3 : 4,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in4 : 5,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in5 : 6,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in6 : 7,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in7 : 8,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in8 : 9,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in9 : 1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in10 : 0,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in11 : 2,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in12 : 3,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in13 : 7,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in14 : 5,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * _in15 : 6,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0 + * + * _out0 : 1,2,3,4,5,6,7,8,9,1,0,2,3,7,5,6 + * _out1 : 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2 + * _out2 : 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 + * _out3 : 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4 + * _out4 : 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 + * _out5 : 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6 + * _out6 : 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 + * _out7 : 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 + * ============================================================================= + */ +#define LASX_TRANSPOSE16x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _in8, _in9, _in10, _in11, _in12, _in13, _in14, \ + _in15, _out0, _out1, _out2, _out3, _out4, _out5, \ + _out6, _out7) \ + { \ + __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \ + __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \ + __m256i _t0, _t1, _t2, _t3, _t4, _t5, _t6, _t7; \ + \ + _tmp0_m = __lasx_xvilvl_h(_in2, _in0); \ + _tmp1_m = __lasx_xvilvl_h(_in3, _in1); \ + _tmp2_m = __lasx_xvilvl_h(_in6, _in4); \ + _tmp3_m = __lasx_xvilvl_h(_in7, _in5); \ + _tmp4_m = __lasx_xvilvl_h(_in10, _in8); \ + _tmp5_m = __lasx_xvilvl_h(_in11, _in9); \ + _tmp6_m = __lasx_xvilvl_h(_in14, _in12); \ + _tmp7_m = __lasx_xvilvl_h(_in15, _in13); \ + _t0 = __lasx_xvilvl_h(_tmp1_m, _tmp0_m); \ + _t1 = __lasx_xvilvh_h(_tmp1_m, _tmp0_m); \ + _t2 = __lasx_xvilvl_h(_tmp3_m, _tmp2_m); \ + _t3 = __lasx_xvilvh_h(_tmp3_m, _tmp2_m); \ + _t4 = __lasx_xvilvl_h(_tmp5_m, _tmp4_m); \ + _t5 = __lasx_xvilvh_h(_tmp5_m, _tmp4_m); \ + _t6 = __lasx_xvilvl_h(_tmp7_m, _tmp6_m); \ + _t7 = __lasx_xvilvh_h(_tmp7_m, _tmp6_m); \ + _tmp0_m = __lasx_xvilvl_d(_t2, _t0); \ + _tmp2_m = __lasx_xvilvh_d(_t2, _t0); \ + _tmp4_m = __lasx_xvilvl_d(_t3, _t1); \ + _tmp6_m = __lasx_xvilvh_d(_t3, _t1); \ + _tmp1_m = __lasx_xvilvl_d(_t6, _t4); \ + _tmp3_m = __lasx_xvilvh_d(_t6, _t4); \ + _tmp5_m = __lasx_xvilvl_d(_t7, _t5); \ + _tmp7_m = __lasx_xvilvh_d(_t7, _t5); \ + _out0 = __lasx_xvpermi_q(_tmp1_m, _tmp0_m, 0x20); \ + _out1 = __lasx_xvpermi_q(_tmp3_m, _tmp2_m, 0x20); \ + _out2 = __lasx_xvpermi_q(_tmp5_m, _tmp4_m, 0x20); \ + _out3 = __lasx_xvpermi_q(_tmp7_m, _tmp6_m, 0x20); \ + \ + _tmp0_m = __lasx_xvilvh_h(_in2, _in0); \ + _tmp1_m = __lasx_xvilvh_h(_in3, _in1); \ + _tmp2_m = __lasx_xvilvh_h(_in6, _in4); \ + _tmp3_m = __lasx_xvilvh_h(_in7, _in5); \ + _tmp4_m = __lasx_xvilvh_h(_in10, _in8); \ + _tmp5_m = __lasx_xvilvh_h(_in11, _in9); \ + _tmp6_m = __lasx_xvilvh_h(_in14, _in12); \ + _tmp7_m = __lasx_xvilvh_h(_in15, _in13); \ + _t0 = __lasx_xvilvl_h(_tmp1_m, _tmp0_m); \ + _t1 = __lasx_xvilvh_h(_tmp1_m, _tmp0_m); \ + _t2 = __lasx_xvilvl_h(_tmp3_m, _tmp2_m); \ + _t3 = __lasx_xvilvh_h(_tmp3_m, _tmp2_m); \ + _t4 = __lasx_xvilvl_h(_tmp5_m, _tmp4_m); \ + _t5 = __lasx_xvilvh_h(_tmp5_m, _tmp4_m); \ + _t6 = __lasx_xvilvl_h(_tmp7_m, _tmp6_m); \ + _t7 = __lasx_xvilvh_h(_tmp7_m, _tmp6_m); \ + _tmp0_m = __lasx_xvilvl_d(_t2, _t0); \ + _tmp2_m = __lasx_xvilvh_d(_t2, _t0); \ + _tmp4_m = __lasx_xvilvl_d(_t3, _t1); \ + _tmp6_m = __lasx_xvilvh_d(_t3, _t1); \ + _tmp1_m = __lasx_xvilvl_d(_t6, _t4); \ + _tmp3_m = __lasx_xvilvh_d(_t6, _t4); \ + _tmp5_m = __lasx_xvilvl_d(_t7, _t5); \ + _tmp7_m = __lasx_xvilvh_d(_t7, _t5); \ + _out4 = __lasx_xvpermi_q(_tmp1_m, _tmp0_m, 0x20); \ + _out5 = __lasx_xvpermi_q(_tmp3_m, _tmp2_m, 0x20); \ + _out6 = __lasx_xvpermi_q(_tmp5_m, _tmp4_m, 0x20); \ + _out7 = __lasx_xvpermi_q(_tmp7_m, _tmp6_m, 0x20); \ + } + +/* + * ============================================================================= + * Description : Transpose 4x4 block with halfword elements in vectors + * Arguments : Inputs - _in0, _in1, _in2, _in3 + * Outputs - _out0, _out1, _out2, _out3 + * Return Type - signed halfword + * Details : The rows of the matrix become columns, and the columns become + * rows. + * Example : See LASX_TRANSPOSE8x8_H + * ============================================================================= + */ +#define LASX_TRANSPOSE4x4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, \ + _out3) \ + { \ + __m256i _s0_m, _s1_m; \ + \ + _s0_m = __lasx_xvilvl_h(_in1, _in0); \ + _s1_m = __lasx_xvilvl_h(_in3, _in2); \ + _out0 = __lasx_xvilvl_w(_s1_m, _s0_m); \ + _out2 = __lasx_xvilvh_w(_s1_m, _s0_m); \ + _out1 = __lasx_xvilvh_d(_out0, _out0); \ + _out3 = __lasx_xvilvh_d(_out2, _out2); \ + } + +/* + * ============================================================================= + * Description : Transpose input 8x8 byte block + * Arguments : Inputs - _in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7 + * (input 8x8 byte block) + * Outputs - _out0, _out1, _out2, _out3, _out4, _out5, _out6, + * _out7 (output 8x8 byte block) + * Example : See LASX_TRANSPOSE8x8_H + * ============================================================================= + */ +#define LASX_TRANSPOSE8x8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \ + __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \ + _tmp0_m = __lasx_xvilvl_b(_in2, _in0); \ + _tmp1_m = __lasx_xvilvl_b(_in3, _in1); \ + _tmp2_m = __lasx_xvilvl_b(_in6, _in4); \ + _tmp3_m = __lasx_xvilvl_b(_in7, _in5); \ + _tmp4_m = __lasx_xvilvl_b(_tmp1_m, _tmp0_m); \ + _tmp5_m = __lasx_xvilvh_b(_tmp1_m, _tmp0_m); \ + _tmp6_m = __lasx_xvilvl_b(_tmp3_m, _tmp2_m); \ + _tmp7_m = __lasx_xvilvh_b(_tmp3_m, _tmp2_m); \ + _out0 = __lasx_xvilvl_w(_tmp6_m, _tmp4_m); \ + _out2 = __lasx_xvilvh_w(_tmp6_m, _tmp4_m); \ + _out4 = __lasx_xvilvl_w(_tmp7_m, _tmp5_m); \ + _out6 = __lasx_xvilvh_w(_tmp7_m, _tmp5_m); \ + _out1 = __lasx_xvbsrl_v(_out0, 8); \ + _out3 = __lasx_xvbsrl_v(_out2, 8); \ + _out5 = __lasx_xvbsrl_v(_out4, 8); \ + _out7 = __lasx_xvbsrl_v(_out6, 8); \ + } + +/* + * ============================================================================= + * Description : Transpose 8x8 block with halfword elements in vectors. + * Arguments : Inputs - _in0, _in1, ~ + * Outputs - _out0, _out1, ~ + * Details : The rows of the matrix become columns, and the columns become + * rows. + * Example : LASX_TRANSPOSE8x8_H + * _in0 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * _in1 : 8,2,3,4, 5,6,7,8, 8,2,3,4, 5,6,7,8 + * _in2 : 8,2,3,4, 5,6,7,8, 8,2,3,4, 5,6,7,8 + * _in3 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * _in4 : 9,2,3,4, 5,6,7,8, 9,2,3,4, 5,6,7,8 + * _in5 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * _in6 : 1,2,3,4, 5,6,7,8, 1,2,3,4, 5,6,7,8 + * _in7 : 9,2,3,4, 5,6,7,8, 9,2,3,4, 5,6,7,8 + * + * _out0 : 1,8,8,1, 9,1,1,9, 1,8,8,1, 9,1,1,9 + * _out1 : 2,2,2,2, 2,2,2,2, 2,2,2,2, 2,2,2,2 + * _out2 : 3,3,3,3, 3,3,3,3, 3,3,3,3, 3,3,3,3 + * _out3 : 4,4,4,4, 4,4,4,4, 4,4,4,4, 4,4,4,4 + * _out4 : 5,5,5,5, 5,5,5,5, 5,5,5,5, 5,5,5,5 + * _out5 : 6,6,6,6, 6,6,6,6, 6,6,6,6, 6,6,6,6 + * _out6 : 7,7,7,7, 7,7,7,7, 7,7,7,7, 7,7,7,7 + * _out7 : 8,8,8,8, 8,8,8,8, 8,8,8,8, 8,8,8,8 + * ============================================================================= + */ +#define LASX_TRANSPOSE8x8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + __m256i _s0_m, _s1_m; \ + __m256i _tmp0_m, _tmp1_m, _tmp2_m, _tmp3_m; \ + __m256i _tmp4_m, _tmp5_m, _tmp6_m, _tmp7_m; \ + \ + _s0_m = __lasx_xvilvl_h(_in6, _in4); \ + _s1_m = __lasx_xvilvl_h(_in7, _in5); \ + _tmp0_m = __lasx_xvilvl_h(_s1_m, _s0_m); \ + _tmp1_m = __lasx_xvilvh_h(_s1_m, _s0_m); \ + _s0_m = __lasx_xvilvh_h(_in6, _in4); \ + _s1_m = __lasx_xvilvh_h(_in7, _in5); \ + _tmp2_m = __lasx_xvilvl_h(_s1_m, _s0_m); \ + _tmp3_m = __lasx_xvilvh_h(_s1_m, _s0_m); \ + \ + _s0_m = __lasx_xvilvl_h(_in2, _in0); \ + _s1_m = __lasx_xvilvl_h(_in3, _in1); \ + _tmp4_m = __lasx_xvilvl_h(_s1_m, _s0_m); \ + _tmp5_m = __lasx_xvilvh_h(_s1_m, _s0_m); \ + _s0_m = __lasx_xvilvh_h(_in2, _in0); \ + _s1_m = __lasx_xvilvh_h(_in3, _in1); \ + _tmp6_m = __lasx_xvilvl_h(_s1_m, _s0_m); \ + _tmp7_m = __lasx_xvilvh_h(_s1_m, _s0_m); \ + \ + _out0 = __lasx_xvpickev_d(_tmp0_m, _tmp4_m); \ + _out2 = __lasx_xvpickev_d(_tmp1_m, _tmp5_m); \ + _out4 = __lasx_xvpickev_d(_tmp2_m, _tmp6_m); \ + _out6 = __lasx_xvpickev_d(_tmp3_m, _tmp7_m); \ + _out1 = __lasx_xvpickod_d(_tmp0_m, _tmp4_m); \ + _out3 = __lasx_xvpickod_d(_tmp1_m, _tmp5_m); \ + _out5 = __lasx_xvpickod_d(_tmp2_m, _tmp6_m); \ + _out7 = __lasx_xvpickod_d(_tmp3_m, _tmp7_m); \ + } + +/* + * ============================================================================= + * Description : Butterfly of 4 input vectors + * Arguments : Inputs - _in0, _in1, _in2, _in3 + * Outputs - _out0, _out1, _out2, _out3 + * Details : Butterfly operation + * Example : LASX_BUTTERFLY_4 + * _out0 = _in0 + _in3; + * _out1 = _in1 + _in2; + * _out2 = _in1 - _in2; + * _out3 = _in0 - _in3; + * ============================================================================= + */ +#define LASX_BUTTERFLY_4_B(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + _out0 = __lasx_xvadd_b(_in0, _in3); \ + _out1 = __lasx_xvadd_b(_in1, _in2); \ + _out2 = __lasx_xvsub_b(_in1, _in2); \ + _out3 = __lasx_xvsub_b(_in0, _in3); \ + } +#define LASX_BUTTERFLY_4_H(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + _out0 = __lasx_xvadd_h(_in0, _in3); \ + _out1 = __lasx_xvadd_h(_in1, _in2); \ + _out2 = __lasx_xvsub_h(_in1, _in2); \ + _out3 = __lasx_xvsub_h(_in0, _in3); \ + } +#define LASX_BUTTERFLY_4_W(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + _out0 = __lasx_xvadd_w(_in0, _in3); \ + _out1 = __lasx_xvadd_w(_in1, _in2); \ + _out2 = __lasx_xvsub_w(_in1, _in2); \ + _out3 = __lasx_xvsub_w(_in0, _in3); \ + } +#define LASX_BUTTERFLY_4_D(_in0, _in1, _in2, _in3, _out0, _out1, _out2, _out3) \ + { \ + _out0 = __lasx_xvadd_d(_in0, _in3); \ + _out1 = __lasx_xvadd_d(_in1, _in2); \ + _out2 = __lasx_xvsub_d(_in1, _in2); \ + _out3 = __lasx_xvsub_d(_in0, _in3); \ + } + +/* + * ============================================================================= + * Description : Butterfly of 8 input vectors + * Arguments : Inputs - _in0, _in1, _in2, _in3, ~ + * Outputs - _out0, _out1, _out2, _out3, ~ + * Details : Butterfly operation + * Example : LASX_BUTTERFLY_8 + * _out0 = _in0 + _in7; + * _out1 = _in1 + _in6; + * _out2 = _in2 + _in5; + * _out3 = _in3 + _in4; + * _out4 = _in3 - _in4; + * _out5 = _in2 - _in5; + * _out6 = _in1 - _in6; + * _out7 = _in0 - _in7; + * ============================================================================= + */ +#define LASX_BUTTERFLY_8_B(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + _out0 = __lasx_xvadd_b(_in0, _in7); \ + _out1 = __lasx_xvadd_b(_in1, _in6); \ + _out2 = __lasx_xvadd_b(_in2, _in5); \ + _out3 = __lasx_xvadd_b(_in3, _in4); \ + _out4 = __lasx_xvsub_b(_in3, _in4); \ + _out5 = __lasx_xvsub_b(_in2, _in5); \ + _out6 = __lasx_xvsub_b(_in1, _in6); \ + _out7 = __lasx_xvsub_b(_in0, _in7); \ + } + +#define LASX_BUTTERFLY_8_H(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + _out0 = __lasx_xvadd_h(_in0, _in7); \ + _out1 = __lasx_xvadd_h(_in1, _in6); \ + _out2 = __lasx_xvadd_h(_in2, _in5); \ + _out3 = __lasx_xvadd_h(_in3, _in4); \ + _out4 = __lasx_xvsub_h(_in3, _in4); \ + _out5 = __lasx_xvsub_h(_in2, _in5); \ + _out6 = __lasx_xvsub_h(_in1, _in6); \ + _out7 = __lasx_xvsub_h(_in0, _in7); \ + } + +#define LASX_BUTTERFLY_8_W(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + _out0 = __lasx_xvadd_w(_in0, _in7); \ + _out1 = __lasx_xvadd_w(_in1, _in6); \ + _out2 = __lasx_xvadd_w(_in2, _in5); \ + _out3 = __lasx_xvadd_w(_in3, _in4); \ + _out4 = __lasx_xvsub_w(_in3, _in4); \ + _out5 = __lasx_xvsub_w(_in2, _in5); \ + _out6 = __lasx_xvsub_w(_in1, _in6); \ + _out7 = __lasx_xvsub_w(_in0, _in7); \ + } + +#define LASX_BUTTERFLY_8_D(_in0, _in1, _in2, _in3, _in4, _in5, _in6, _in7, \ + _out0, _out1, _out2, _out3, _out4, _out5, _out6, \ + _out7) \ + { \ + _out0 = __lasx_xvadd_d(_in0, _in7); \ + _out1 = __lasx_xvadd_d(_in1, _in6); \ + _out2 = __lasx_xvadd_d(_in2, _in5); \ + _out3 = __lasx_xvadd_d(_in3, _in4); \ + _out4 = __lasx_xvsub_d(_in3, _in4); \ + _out5 = __lasx_xvsub_d(_in2, _in5); \ + _out6 = __lasx_xvsub_d(_in1, _in6); \ + _out7 = __lasx_xvsub_d(_in0, _in7); \ + } + +#endif // LASX + +/* + * ============================================================================= + * Description : Print out elements in vector. + * Arguments : Inputs - RTYPE, _element_num, _in0, _enter + * Outputs - + * Details : Print out '_element_num' elements in 'RTYPE' vector '_in0', if + * '_enter' is TRUE, prefix "\nVP:" will be added first. + * Example : VECT_PRINT(v4i32,4,in0,1); // in0: 1,2,3,4 + * VP:1,2,3,4, + * ============================================================================= + */ +#define VECT_PRINT(RTYPE, element_num, in0, enter) \ + { \ + RTYPE _tmp0 = (RTYPE)in0; \ + int _i = 0; \ + if (enter) \ + printf("\nVP:"); \ + for (_i = 0; _i < element_num; _i++) \ + printf("%d,", _tmp0[_i]); \ + } + +#endif /* LOONGSON_INTRINSICS_H */ +#endif /* INCLUDE_LIBYUV_LOONGSON_INTRINSICS_H */ diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/macros_msa.h b/third-party/libyuv/third_party/libyuv/include/libyuv/macros_msa.h index 4e232b66bf..b9a44fcced 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/macros_msa.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/macros_msa.h @@ -81,25 +81,35 @@ }) #endif // !(__mips == 64) #else // !(__mips_isa_rev >= 6) -#define LW(psrc) \ - ({ \ - const uint8_t* psrc_lw_m = (const uint8_t*)(psrc); \ - uint32_t val_m; \ - asm volatile("ulw %[val_m], %[psrc_lw_m] \n" \ - : [val_m] "=r"(val_m) \ - : [psrc_lw_m] "m"(*psrc_lw_m)); \ - val_m; \ +#define LW(psrc) \ + ({ \ + uint8_t* psrc_lw_m = (uint8_t*)(psrc); \ + uint32_t val_lw_m; \ + \ + __asm__ volatile( \ + "lwr %[val_lw_m], 0(%[psrc_lw_m]) \n\t" \ + "lwl %[val_lw_m], 3(%[psrc_lw_m]) \n\t" \ + \ + : [val_lw_m] "=&r"(val_lw_m) \ + : [psrc_lw_m] "r"(psrc_lw_m)); \ + \ + val_lw_m; \ }) #if (__mips == 64) -#define LD(psrc) \ - ({ \ - const uint8_t* psrc_ld_m = (const uint8_t*)(psrc); \ - uint64_t val_m = 0; \ - asm volatile("uld %[val_m], %[psrc_ld_m] \n" \ - : [val_m] "=r"(val_m) \ - : [psrc_ld_m] "m"(*psrc_ld_m)); \ - val_m; \ +#define LD(psrc) \ + ({ \ + uint8_t* psrc_ld_m = (uint8_t*)(psrc); \ + uint64_t val_ld_m = 0; \ + \ + __asm__ volatile( \ + "ldr %[val_ld_m], 0(%[psrc_ld_m]) \n\t" \ + "ldl %[val_ld_m], 7(%[psrc_ld_m]) \n\t" \ + \ + : [val_ld_m] "=&r"(val_ld_m) \ + : [psrc_ld_m] "r"(psrc_ld_m)); \ + \ + val_ld_m; \ }) #else // !(__mips == 64) #define LD(psrc) \ diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/planar_functions.h b/third-party/libyuv/third_party/libyuv/include/libyuv/planar_functions.h index def773cb44..ffe6370561 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/planar_functions.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/planar_functions.h @@ -83,6 +83,50 @@ void SetPlane(uint8_t* dst_y, int height, uint32_t value); +// Convert a plane of tiles of 16 x H to linear. +LIBYUV_API +int DetilePlane(const uint8_t* src_y, + int src_stride_y, + uint8_t* dst_y, + int dst_stride_y, + int width, + int height, + int tile_height); + +// Convert a plane of 16 bit tiles of 16 x H to linear. +LIBYUV_API +int DetilePlane_16(const uint16_t* src_y, + int src_stride_y, + uint16_t* dst_y, + int dst_stride_y, + int width, + int height, + int tile_height); + +// Convert a UV plane of tiles of 16 x H into linear U and V planes. +LIBYUV_API +void DetileSplitUVPlane(const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height, + int tile_height); + +// Convert a Y and UV plane of tiles into interlaced YUY2. +LIBYUV_API +void DetileToYUY2(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_yuy2, + int dst_stride_yuy2, + int width, + int height, + int tile_height); + // Split interleaved UV plane into separate U and V planes. LIBYUV_API void SplitUVPlane(const uint8_t* src_uv, @@ -330,7 +374,26 @@ int I444Copy(const uint8_t* src_y, int width, int height); +// Copy I210 to I210. +#define I210ToI210 I210Copy +LIBYUV_API +int I210Copy(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint16_t* dst_y, + int dst_stride_y, + uint16_t* dst_u, + int dst_stride_u, + uint16_t* dst_v, + int dst_stride_v, + int width, + int height); + // Copy NV12. Supports inverting. +LIBYUV_API int NV12Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_uv, @@ -343,6 +406,7 @@ int NV12Copy(const uint8_t* src_y, int height); // Copy NV21. Supports inverting. +LIBYUV_API int NV21Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_vu, @@ -421,6 +485,14 @@ int YUY2ToY(const uint8_t* src_yuy2, int width, int height); +LIBYUV_API +int UYVYToY(const uint8_t* src_uyvy, + int src_stride_uyvy, + uint8_t* dst_y, + int dst_stride_y, + int width, + int height); + // Convert I420 to I400. (calls CopyPlane ignoring u/v). LIBYUV_API int I420ToI400(const uint8_t* src_y, @@ -943,6 +1015,21 @@ int InterpolatePlane(const uint8_t* src0, int height, int interpolation); +// Interpolate between two images using specified amount of interpolation +// (0 to 255) and store to destination. +// 'interpolation' is specified as 8 bit fraction where 0 means 100% src0 +// and 255 means 1% src0 and 99% src1. +LIBYUV_API +int InterpolatePlane_16(const uint16_t* src0, + int src_stride0, // measured in 16 bit pixels + const uint16_t* src1, + int src_stride1, + uint16_t* dst, + int dst_stride, + int width, + int height, + int interpolation); + // Interpolate between two ARGB images using specified amount of interpolation // Internally calls InterpolatePlane with width * 4 (bpp). LIBYUV_API diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/rotate.h b/third-party/libyuv/third_party/libyuv/include/libyuv/rotate.h index 308882242c..684ed5e6de 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/rotate.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/rotate.h @@ -49,6 +49,24 @@ int I420Rotate(const uint8_t* src_y, int height, enum RotationMode mode); +// Rotate I422 frame. +LIBYUV_API +int I422Rotate(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height, + enum RotationMode mode); + // Rotate I444 frame. LIBYUV_API int I444Rotate(const uint8_t* src_y, @@ -83,6 +101,26 @@ int NV12ToI420Rotate(const uint8_t* src_y, int height, enum RotationMode mode); +// Convert Android420 to I420 with rotation. +// "rotation" can be 0, 90, 180 or 270. +LIBYUV_API +int Android420ToI420Rotate(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + int src_pixel_stride_uv, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height, + enum RotationMode rotation); + // Rotate a plane by 0, 90, 180, or 270. LIBYUV_API int RotatePlane(const uint8_t* src, @@ -119,38 +157,50 @@ void RotatePlane270(const uint8_t* src, int height); // Rotations for when U and V are interleaved. -// These functions take one input pointer and +// These functions take one UV input pointer and // split the data into two buffers while -// rotating them. Deprecated. +// rotating them. +// width and height expected to be half size for NV12. LIBYUV_API -void RotateUV90(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height); +int SplitRotateUV(const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height, + enum RotationMode mode); LIBYUV_API -void RotateUV180(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height); +void SplitRotateUV90(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height); LIBYUV_API -void RotateUV270(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height); +void SplitRotateUV180(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height); + +LIBYUV_API +void SplitRotateUV270(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height); // The 90 and 270 functions are based on transposes. // Doing a transpose with reversing the read/write @@ -165,14 +215,14 @@ void TransposePlane(const uint8_t* src, int height); LIBYUV_API -void TransposeUV(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height); +void SplitTransposeUV(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height); #ifdef __cplusplus } // extern "C" diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/rotate_row.h b/third-party/libyuv/third_party/libyuv/include/libyuv/rotate_row.h index f4c701fb4f..aa8528a925 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/rotate_row.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/rotate_row.h @@ -61,9 +61,9 @@ extern "C" { #define HAS_TRANSPOSEUVWX16_MSA #endif -#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A) -#define HAS_TRANSPOSEWX8_MMI -#define HAS_TRANSPOSEUVWX8_MMI +#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) +#define HAS_TRANSPOSEWX16_LSX +#define HAS_TRANSPOSEUVWX16_LSX #endif void TransposeWxH_C(const uint8_t* src, @@ -93,11 +93,6 @@ void TransposeWx8_SSSE3(const uint8_t* src, uint8_t* dst, int dst_stride, int width); -void TransposeWx8_MMI(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride, - int width); void TransposeWx8_Fast_SSSE3(const uint8_t* src, int src_stride, uint8_t* dst, @@ -108,6 +103,11 @@ void TransposeWx16_MSA(const uint8_t* src, uint8_t* dst, int dst_stride, int width); +void TransposeWx16_LSX(const uint8_t* src, + int src_stride, + uint8_t* dst, + int dst_stride, + int width); void TransposeWx8_Any_NEON(const uint8_t* src, int src_stride, @@ -119,11 +119,6 @@ void TransposeWx8_Any_SSSE3(const uint8_t* src, uint8_t* dst, int dst_stride, int width); -void TransposeWx8_Any_MMI(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride, - int width); void TransposeWx8_Fast_Any_SSSE3(const uint8_t* src, int src_stride, uint8_t* dst, @@ -134,6 +129,11 @@ void TransposeWx16_Any_MSA(const uint8_t* src, uint8_t* dst, int dst_stride, int width); +void TransposeWx16_Any_LSX(const uint8_t* src, + int src_stride, + uint8_t* dst, + int dst_stride, + int width); void TransposeUVWxH_C(const uint8_t* src, int src_stride, @@ -172,13 +172,6 @@ void TransposeUVWx8_NEON(const uint8_t* src, uint8_t* dst_b, int dst_stride_b, int width); -void TransposeUVWx8_MMI(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width); void TransposeUVWx16_MSA(const uint8_t* src, int src_stride, uint8_t* dst_a, @@ -186,6 +179,13 @@ void TransposeUVWx16_MSA(const uint8_t* src, uint8_t* dst_b, int dst_stride_b, int width); +void TransposeUVWx16_LSX(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width); void TransposeUVWx8_Any_SSE2(const uint8_t* src, int src_stride, @@ -201,13 +201,6 @@ void TransposeUVWx8_Any_NEON(const uint8_t* src, uint8_t* dst_b, int dst_stride_b, int width); -void TransposeUVWx8_Any_MMI(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width); void TransposeUVWx16_Any_MSA(const uint8_t* src, int src_stride, uint8_t* dst_a, @@ -215,6 +208,13 @@ void TransposeUVWx16_Any_MSA(const uint8_t* src, uint8_t* dst_b, int dst_stride_b, int width); +void TransposeUVWx16_Any_LSX(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width); #ifdef __cplusplus } // extern "C" diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/row.h b/third-party/libyuv/third_party/libyuv/include/libyuv/row.h index 1444a04786..861c6d3e9c 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/row.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/row.h @@ -74,7 +74,6 @@ extern "C" { #if !defined(LIBYUV_DISABLE_X86) && \ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) // Conversions: -#define HAS_ABGRTOUVROW_SSSE3 #define HAS_ABGRTOYROW_SSSE3 #define HAS_ARGB1555TOARGBROW_SSE2 #define HAS_ARGB4444TOARGBROW_SSE2 @@ -87,12 +86,8 @@ extern "C" { #define HAS_ARGBTORGB24ROW_SSSE3 #define HAS_ARGBTORGB565DITHERROW_SSE2 #define HAS_ARGBTORGB565ROW_SSE2 -#define HAS_ARGBTOUV444ROW_SSSE3 -#define HAS_ARGBTOUVJROW_SSSE3 -#define HAS_ARGBTOUVROW_SSSE3 #define HAS_ARGBTOYJROW_SSSE3 #define HAS_ARGBTOYROW_SSSE3 -#define HAS_BGRATOUVROW_SSSE3 #define HAS_BGRATOYROW_SSSE3 #define HAS_COPYROW_ERMS #define HAS_COPYROW_SSE2 @@ -107,6 +102,8 @@ extern "C" { #define HAS_I422TOUYVYROW_SSE2 #define HAS_I422TOYUY2ROW_SSE2 #define HAS_I444TOARGBROW_SSSE3 +#define HAS_I444TORGB24ROW_SSSE3 +#define HAS_INTERPOLATEROW_SSSE3 #define HAS_J400TOARGBROW_SSE2 #define HAS_J422TOARGBROW_SSSE3 #define HAS_MERGEUVROW_SSE2 @@ -119,13 +116,12 @@ extern "C" { #define HAS_NV21TORGB24ROW_SSSE3 #define HAS_RAWTOARGBROW_SSSE3 #define HAS_RAWTORGB24ROW_SSSE3 +#define HAS_RAWTOYJROW_SSSE3 #define HAS_RAWTOYROW_SSSE3 #define HAS_RGB24TOARGBROW_SSSE3 -#define HAS_RGB24TOYROW_SSSE3 #define HAS_RGB24TOYJROW_SSSE3 -#define HAS_RAWTOYJROW_SSSE3 +#define HAS_RGB24TOYROW_SSSE3 #define HAS_RGB565TOARGBROW_SSE2 -#define HAS_RGBATOUVROW_SSSE3 #define HAS_RGBATOYROW_SSSE3 #define HAS_SETROW_ERMS #define HAS_SETROW_X86 @@ -138,11 +134,18 @@ extern "C" { #define HAS_YUY2TOUV422ROW_SSE2 #define HAS_YUY2TOUVROW_SSE2 #define HAS_YUY2TOYROW_SSE2 +#if !defined(LIBYUV_BIT_EXACT) +#define HAS_ABGRTOUVROW_SSSE3 +#define HAS_ARGBTOUV444ROW_SSSE3 +#define HAS_ARGBTOUVJROW_SSSE3 +#define HAS_ARGBTOUVROW_SSSE3 +#define HAS_BGRATOUVROW_SSSE3 +#define HAS_RGBATOUVROW_SSSE3 +#endif // Effects: #define HAS_ARGBADDROW_SSE2 #define HAS_ARGBAFFINEROW_SSE2 -#define HAS_ARGBATTENUATEROW_SSSE3 #define HAS_ARGBBLENDROW_SSSE3 #define HAS_ARGBCOLORMATRIXROW_SSSE3 #define HAS_ARGBCOLORTABLEROW_X86 @@ -161,13 +164,15 @@ extern "C" { #define HAS_BLENDPLANEROW_SSSE3 #define HAS_COMPUTECUMULATIVESUMROW_SSE2 #define HAS_CUMULATIVESUMTOAVERAGEROW_SSE2 -#define HAS_INTERPOLATEROW_SSSE3 #define HAS_RGBCOLORTABLEROW_X86 #define HAS_SOBELROW_SSE2 #define HAS_SOBELTOPLANEROW_SSE2 #define HAS_SOBELXROW_SSE2 #define HAS_SOBELXYROW_SSE2 #define HAS_SOBELYROW_SSE2 +#if !defined(LIBYUV_BIT_EXACT) +#define HAS_ARGBATTENUATEROW_SSSE3 +#endif // The following functions fail on gcc/clang 32 bit with fpic and framepointer. // caveat: clangcl uses row_win.cc which works. @@ -192,16 +197,11 @@ extern "C" { #define HAS_ARGBPOLYNOMIALROW_AVX2 #define HAS_ARGBSHUFFLEROW_AVX2 #define HAS_ARGBTORGB565DITHERROW_AVX2 -#define HAS_ARGBTOUVJROW_AVX2 -#define HAS_ARGBTOUVROW_AVX2 #define HAS_ARGBTOYJROW_AVX2 #define HAS_ARGBTOYROW_AVX2 -#define HAS_RGB24TOYJROW_AVX2 -#define HAS_RAWTOYJROW_AVX2 #define HAS_COPYROW_AVX #define HAS_H422TOARGBROW_AVX2 #define HAS_HALFFLOATROW_AVX2 -// #define HAS_HALFFLOATROW_F16C // Enable to test halffloat cast #define HAS_I422TOARGB1555ROW_AVX2 #define HAS_I422TOARGB4444ROW_AVX2 #define HAS_I422TOARGBROW_AVX2 @@ -209,6 +209,7 @@ extern "C" { #define HAS_I422TORGB565ROW_AVX2 #define HAS_I422TORGBAROW_AVX2 #define HAS_I444TOARGBROW_AVX2 +#define HAS_I444TORGB24ROW_AVX2 #define HAS_INTERPOLATEROW_AVX2 #define HAS_J422TOARGBROW_AVX2 #define HAS_MERGEUVROW_AVX2 @@ -218,6 +219,8 @@ extern "C" { #define HAS_NV12TORGB565ROW_AVX2 #define HAS_NV21TOARGBROW_AVX2 #define HAS_NV21TORGB24ROW_AVX2 +#define HAS_RAWTOYJROW_AVX2 +#define HAS_RGB24TOYJROW_AVX2 #define HAS_SPLITUVROW_AVX2 #define HAS_UYVYTOARGBROW_AVX2 #define HAS_UYVYTOUV422ROW_AVX2 @@ -227,14 +230,21 @@ extern "C" { #define HAS_YUY2TOUV422ROW_AVX2 #define HAS_YUY2TOUVROW_AVX2 #define HAS_YUY2TOYROW_AVX2 +// #define HAS_HALFFLOATROW_F16C // Enable to test half float cast +#if !defined(LIBYUV_BIT_EXACT) +#define HAS_ARGBTOUVJROW_AVX2 +#define HAS_ARGBTOUVROW_AVX2 +#endif // Effects: #define HAS_ARGBADDROW_AVX2 -#define HAS_ARGBATTENUATEROW_AVX2 #define HAS_ARGBMULTIPLYROW_AVX2 #define HAS_ARGBSUBTRACTROW_AVX2 #define HAS_ARGBUNATTENUATEROW_AVX2 #define HAS_BLENDPLANEROW_AVX2 +#if !defined(LIBYUV_BIT_EXACT) +#define HAS_ARGBATTENUATEROW_AVX2 +#endif #if defined(__x86_64__) || !defined(__pic__) || defined(__clang__) || \ defined(_MSC_VER) @@ -270,27 +280,34 @@ extern "C" { // The following are available for gcc/clang x86 platforms: // TODO(fbarchard): Port to Visual C #if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__)) +#define HAS_AB64TOARGBROW_SSSE3 #define HAS_ABGRTOAR30ROW_SSSE3 +#define HAS_ABGRTOYJROW_SSSE3 +#define HAS_AR64TOARGBROW_SSSE3 +#define HAS_ARGBTOAB64ROW_SSSE3 #define HAS_ARGBTOAR30ROW_SSSE3 #define HAS_ARGBTOAR64ROW_SSSE3 -#define HAS_ARGBTOAB64ROW_SSSE3 -#define HAS_AR64TOARGBROW_SSSE3 -#define HAS_AB64TOARGBROW_SSSE3 #define HAS_CONVERT16TO8ROW_SSSE3 #define HAS_CONVERT8TO16ROW_SSE2 +#define HAS_DETILEROW_SSE2 +#define HAS_DETILEROW_16_SSE2 +#define HAS_DETILEROW_16_AVX +#define HAS_DETILESPLITUVROW_SSSE3 +#define HAS_DETILETOYUY2_SSE2 #define HAS_HALFMERGEUVROW_SSSE3 #define HAS_I210TOAR30ROW_SSSE3 #define HAS_I210TOARGBROW_SSSE3 #define HAS_I212TOAR30ROW_SSSE3 #define HAS_I212TOARGBROW_SSSE3 #define HAS_I400TOARGBROW_SSE2 -#define HAS_I422TOAR30ROW_SSSE3 #define HAS_I410TOAR30ROW_SSSE3 #define HAS_I410TOARGBROW_SSSE3 +#define HAS_I422TOAR30ROW_SSSE3 #define HAS_MERGEARGBROW_SSE2 -#define HAS_MERGEXRGBROW_SSE2 #define HAS_MERGERGBROW_SSSE3 +#define HAS_MERGEXRGBROW_SSE2 #define HAS_MIRRORUVROW_SSSE3 +#define HAS_NV21TOYUV24ROW_SSSE3 #define HAS_P210TOAR30ROW_SSSE3 #define HAS_P210TOARGBROW_SSSE3 #define HAS_P410TOAR30ROW_SSSE3 @@ -300,10 +317,14 @@ extern "C" { #define HAS_RGBATOYJROW_SSSE3 #define HAS_SPLITARGBROW_SSE2 #define HAS_SPLITARGBROW_SSSE3 +#define HAS_SPLITRGBROW_SSSE3 #define HAS_SPLITXRGBROW_SSE2 #define HAS_SPLITXRGBROW_SSSE3 -#define HAS_SPLITRGBROW_SSSE3 #define HAS_SWAPUVROW_SSSE3 +#define HAS_YUY2TONVUVROW_SSE2 +#if !defined(LIBYUV_BIT_EXACT) +#define HAS_ABGRTOUVJROW_SSSE3 +#endif #if defined(__x86_64__) || !defined(__pic__) // TODO(fbarchard): fix build error on android_full_debug=1 @@ -318,27 +339,20 @@ extern "C" { #if !defined(LIBYUV_DISABLE_X86) && \ (defined(__x86_64__) || defined(__i386__)) && \ (defined(CLANG_HAS_AVX2) || defined(GCC_HAS_AVX2)) +#define HAS_AB64TOARGBROW_AVX2 #define HAS_ABGRTOAR30ROW_AVX2 -#define HAS_ABGRTOUVROW_AVX2 +#define HAS_ABGRTOYJROW_AVX2 #define HAS_ABGRTOYROW_AVX2 +#define HAS_AR64TOARGBROW_AVX2 +#define HAS_ARGBTOAB64ROW_AVX2 #define HAS_ARGBTOAR30ROW_AVX2 +#define HAS_ARGBTOAR64ROW_AVX2 #define HAS_ARGBTORAWROW_AVX2 #define HAS_ARGBTORGB24ROW_AVX2 -#define HAS_ARGBTOAR64ROW_AVX2 -#define HAS_ARGBTOAB64ROW_AVX2 -#define HAS_AR64TOARGBROW_AVX2 -#define HAS_AB64TOARGBROW_AVX2 #define HAS_CONVERT16TO8ROW_AVX2 #define HAS_CONVERT8TO16ROW_AVX2 #define HAS_DIVIDEROW_16_AVX2 #define HAS_HALFMERGEUVROW_AVX2 -#define HAS_MERGEAR64ROW_AVX2 -#define HAS_MERGEARGB16TO8ROW_AVX2 -#define HAS_MERGEARGBROW_AVX2 -#define HAS_MERGEXR30ROW_AVX2 -#define HAS_MERGEXR64ROW_AVX2 -#define HAS_MERGEXRGB16TO8ROW_AVX2 -#define HAS_MERGEXRGBROW_AVX2 #define HAS_I210TOAR30ROW_AVX2 #define HAS_I210TOARGBROW_AVX2 #define HAS_I212TOAR30ROW_AVX2 @@ -346,23 +360,35 @@ extern "C" { #define HAS_I400TOARGBROW_AVX2 #define HAS_I410TOAR30ROW_AVX2 #define HAS_I410TOARGBROW_AVX2 +#define HAS_I422TOAR30ROW_AVX2 +#define HAS_I422TOUYVYROW_AVX2 +#define HAS_I422TOYUY2ROW_AVX2 +#define HAS_INTERPOLATEROW_16TO8_AVX2 +#define HAS_MERGEAR64ROW_AVX2 +#define HAS_MERGEARGB16TO8ROW_AVX2 +#define HAS_MERGEARGBROW_AVX2 +#define HAS_MERGEUVROW_16_AVX2 +#define HAS_MERGEXR30ROW_AVX2 +#define HAS_MERGEXR64ROW_AVX2 +#define HAS_MERGEXRGB16TO8ROW_AVX2 +#define HAS_MERGEXRGBROW_AVX2 +#define HAS_MIRRORUVROW_AVX2 +#define HAS_MULTIPLYROW_16_AVX2 +#define HAS_NV21TOYUV24ROW_AVX2 #define HAS_P210TOAR30ROW_AVX2 #define HAS_P210TOARGBROW_AVX2 #define HAS_P410TOAR30ROW_AVX2 #define HAS_P410TOARGBROW_AVX2 -#define HAS_I422TOAR30ROW_AVX2 -#define HAS_I422TOUYVYROW_AVX2 -#define HAS_I422TOYUY2ROW_AVX2 -#define HAS_MERGEUVROW_16_AVX2 -#define HAS_MIRRORUVROW_AVX2 -#define HAS_MULTIPLYROW_16_AVX2 #define HAS_RGBATOYJROW_AVX2 #define HAS_SPLITARGBROW_AVX2 -#define HAS_SPLITXRGBROW_AVX2 #define HAS_SPLITUVROW_16_AVX2 +#define HAS_SPLITXRGBROW_AVX2 #define HAS_SWAPUVROW_AVX2 -// TODO(fbarchard): Fix AVX2 version of YUV24 -// #define HAS_NV21TOYUV24ROW_AVX2 +#define HAS_YUY2TONVUVROW_AVX2 +#if !defined(LIBYUV_BIT_EXACT) +#define HAS_ABGRTOUVJROW_AVX2 +#define HAS_ABGRTOUVROW_AVX2 +#endif #if defined(__x86_64__) || !defined(__pic__) // TODO(fbarchard): fix build error on android_full_debug=1 @@ -380,11 +406,22 @@ extern "C" { #define HAS_ARGBTORGB24ROW_AVX512VBMI #endif +// The following are available for AVX512 clang x64 platforms: +// TODO(fbarchard): Port to x86 +#if !defined(LIBYUV_DISABLE_X86) && defined(__x86_64__) && \ + (defined(CLANG_HAS_AVX512)) +#define HAS_I422TOARGBROW_AVX512BW +#endif + // The following are available on Neon platforms: #if !defined(LIBYUV_DISABLE_NEON) && \ (defined(__aarch64__) || defined(__ARM_NEON__) || defined(LIBYUV_NEON)) +#define HAS_AB64TOARGBROW_NEON +#define HAS_ABGRTOUVJROW_NEON #define HAS_ABGRTOUVROW_NEON +#define HAS_ABGRTOYJROW_NEON #define HAS_ABGRTOYROW_NEON +#define HAS_AR64TOARGBROW_NEON #define HAS_ARGB1555TOARGBROW_NEON #define HAS_ARGB1555TOUVROW_NEON #define HAS_ARGB1555TOYROW_NEON @@ -393,16 +430,14 @@ extern "C" { #define HAS_ARGB4444TOYROW_NEON #define HAS_ARGBEXTRACTALPHAROW_NEON #define HAS_ARGBSETROW_NEON +#define HAS_ARGBTOAB64ROW_NEON +#define HAS_ARGBTOAR64ROW_NEON #define HAS_ARGBTOARGB1555ROW_NEON #define HAS_ARGBTOARGB4444ROW_NEON #define HAS_ARGBTORAWROW_NEON #define HAS_ARGBTORGB24ROW_NEON #define HAS_ARGBTORGB565DITHERROW_NEON #define HAS_ARGBTORGB565ROW_NEON -#define HAS_ARGBTOAR64ROW_NEON -#define HAS_ARGBTOAB64ROW_NEON -#define HAS_AR64TOARGBROW_NEON -#define HAS_AB64TOARGBROW_NEON #define HAS_ARGBTOUV444ROW_NEON #define HAS_ARGBTOUVJROW_NEON #define HAS_ARGBTOUVROW_NEON @@ -414,12 +449,16 @@ extern "C" { #define HAS_BGRATOUVROW_NEON #define HAS_BGRATOYROW_NEON #define HAS_BYTETOFLOATROW_NEON +#define HAS_CONVERT16TO8ROW_NEON #define HAS_COPYROW_NEON +#define HAS_DETILEROW_16_NEON +#define HAS_DETILEROW_NEON +#define HAS_DETILESPLITUVROW_NEON +#define HAS_DETILETOYUY2_NEON #define HAS_DIVIDEROW_16_NEON #define HAS_HALFFLOATROW_NEON #define HAS_HALFMERGEUVROW_NEON #define HAS_I400TOARGBROW_NEON -#define HAS_I444ALPHATOARGBROW_NEON #define HAS_I422ALPHATOARGBROW_NEON #define HAS_I422TOARGB1555ROW_NEON #define HAS_I422TOARGB4444ROW_NEON @@ -429,20 +468,24 @@ extern "C" { #define HAS_I422TORGBAROW_NEON #define HAS_I422TOUYVYROW_NEON #define HAS_I422TOYUY2ROW_NEON +#define HAS_I444ALPHATOARGBROW_NEON #define HAS_I444TOARGBROW_NEON +#define HAS_I444TORGB24ROW_NEON +#define HAS_INTERPOLATEROW_16_NEON +#define HAS_INTERPOLATEROW_NEON #define HAS_J400TOARGBROW_NEON #define HAS_MERGEAR64ROW_NEON #define HAS_MERGEARGB16TO8ROW_NEON #define HAS_MERGEARGBROW_NEON +#define HAS_MERGEUVROW_16_NEON +#define HAS_MERGEUVROW_NEON #define HAS_MERGEXR30ROW_NEON #define HAS_MERGEXR64ROW_NEON #define HAS_MERGEXRGB16TO8ROW_NEON #define HAS_MERGEXRGBROW_NEON -#define HAS_MERGEUVROW_NEON -#define HAS_MERGEUVROW_16_NEON #define HAS_MIRRORROW_NEON -#define HAS_MIRRORUVROW_NEON #define HAS_MIRRORSPLITUVROW_NEON +#define HAS_MIRRORUVROW_NEON #define HAS_MULTIPLYROW_16_NEON #define HAS_NV12TOARGBROW_NEON #define HAS_NV12TORGB24ROW_NEON @@ -453,10 +496,12 @@ extern "C" { #define HAS_RAWTOARGBROW_NEON #define HAS_RAWTORGB24ROW_NEON #define HAS_RAWTORGBAROW_NEON +#define HAS_RAWTOUVJROW_NEON #define HAS_RAWTOUVROW_NEON #define HAS_RAWTOYJROW_NEON #define HAS_RAWTOYROW_NEON #define HAS_RGB24TOARGBROW_NEON +#define HAS_RGB24TOUVJROW_NEON #define HAS_RGB24TOUVROW_NEON #define HAS_RGB24TOYJROW_NEON #define HAS_RGB24TOYROW_NEON @@ -468,16 +513,17 @@ extern "C" { #define HAS_RGBATOYROW_NEON #define HAS_SETROW_NEON #define HAS_SPLITARGBROW_NEON -#define HAS_SPLITXRGBROW_NEON #define HAS_SPLITRGBROW_NEON -#define HAS_SPLITUVROW_NEON #define HAS_SPLITUVROW_16_NEON +#define HAS_SPLITUVROW_NEON +#define HAS_SPLITXRGBROW_NEON #define HAS_SWAPUVROW_NEON #define HAS_UYVYTOARGBROW_NEON #define HAS_UYVYTOUV422ROW_NEON #define HAS_UYVYTOUVROW_NEON #define HAS_UYVYTOYROW_NEON #define HAS_YUY2TOARGBROW_NEON +#define HAS_YUY2TONVUVROW_NEON #define HAS_YUY2TOUV422ROW_NEON #define HAS_YUY2TOUVROW_NEON #define HAS_YUY2TOYROW_NEON @@ -489,14 +535,13 @@ extern "C" { #define HAS_ARGBCOLORMATRIXROW_NEON #define HAS_ARGBGRAYROW_NEON #define HAS_ARGBMIRRORROW_NEON -#define HAS_RGB24MIRRORROW_NEON #define HAS_ARGBMULTIPLYROW_NEON #define HAS_ARGBQUANTIZEROW_NEON #define HAS_ARGBSEPIAROW_NEON #define HAS_ARGBSHADEROW_NEON #define HAS_ARGBSHUFFLEROW_NEON #define HAS_ARGBSUBTRACTROW_NEON -#define HAS_INTERPOLATEROW_NEON +#define HAS_RGB24MIRRORROW_NEON #define HAS_SOBELROW_NEON #define HAS_SOBELTOPLANEROW_NEON #define HAS_SOBELXROW_NEON @@ -506,12 +551,13 @@ extern "C" { // The following are available on AArch64 platforms: #if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__) -#define HAS_SCALESUMSAMPLES_NEON -#define HAS_GAUSSROW_F32_NEON #define HAS_GAUSSCOL_F32_NEON - +#define HAS_GAUSSROW_F32_NEON +#define HAS_INTERPOLATEROW_16TO8_NEON +#define HAS_SCALESUMSAMPLES_NEON #endif #if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa) +#define HAS_ABGRTOUVJROW_MSA #define HAS_ABGRTOUVROW_MSA #define HAS_ABGRTOYROW_MSA #define HAS_ARGB1555TOARGBROW_MSA @@ -547,14 +593,25 @@ extern "C" { #define HAS_BGRATOYROW_MSA #define HAS_HALFFLOATROW_MSA #define HAS_I400TOARGBROW_MSA +#define HAS_I422ALPHATOARGBROW_MSA +#define HAS_I422TOARGB1555ROW_MSA +#define HAS_I422TOARGB4444ROW_MSA +#define HAS_I422TOARGBROW_MSA +#define HAS_I422TORGB24ROW_MSA +#define HAS_I422TORGB565ROW_MSA +#define HAS_I422TORGBAROW_MSA #define HAS_I422TOUYVYROW_MSA #define HAS_I422TOYUY2ROW_MSA +#define HAS_I444TOARGBROW_MSA #define HAS_INTERPOLATEROW_MSA #define HAS_J400TOARGBROW_MSA #define HAS_MERGEUVROW_MSA #define HAS_MIRRORROW_MSA -#define HAS_MIRRORUVROW_MSA #define HAS_MIRRORSPLITUVROW_MSA +#define HAS_MIRRORUVROW_MSA +#define HAS_NV12TOARGBROW_MSA +#define HAS_NV12TORGB565ROW_MSA +#define HAS_NV21TOARGBROW_MSA #define HAS_RAWTOARGBROW_MSA #define HAS_RAWTORGB24ROW_MSA #define HAS_RAWTOUVROW_MSA @@ -574,87 +631,115 @@ extern "C" { #define HAS_SOBELXYROW_MSA #define HAS_SOBELYROW_MSA #define HAS_SPLITUVROW_MSA +#define HAS_UYVYTOARGBROW_MSA #define HAS_UYVYTOUVROW_MSA #define HAS_UYVYTOYROW_MSA +#define HAS_YUY2TOARGBROW_MSA #define HAS_YUY2TOUV422ROW_MSA #define HAS_YUY2TOUVROW_MSA #define HAS_YUY2TOYROW_MSA #endif -#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A) -#define HAS_ABGRTOUVROW_MMI -#define HAS_ABGRTOYROW_MMI -#define HAS_ARGB1555TOARGBROW_MMI -#define HAS_ARGB1555TOUVROW_MMI -#define HAS_ARGB1555TOYROW_MMI -#define HAS_ARGB4444TOARGBROW_MMI -#define HAS_ARGB4444TOUVROW_MMI -#define HAS_ARGB4444TOYROW_MMI -#define HAS_ARGBADDROW_MMI -#define HAS_ARGBATTENUATEROW_MMI -#define HAS_ARGBBLENDROW_MMI -#define HAS_ARGBCOLORMATRIXROW_MMI -#define HAS_ARGBCOPYALPHAROW_MMI -#define HAS_ARGBCOPYYTOALPHAROW_MMI -#define HAS_ARGBEXTRACTALPHAROW_MMI -#define HAS_ARGBGRAYROW_MMI -#define HAS_ARGBMIRRORROW_MMI -#define HAS_ARGBMULTIPLYROW_MMI -#define HAS_ARGBSEPIAROW_MMI -#define HAS_ARGBSETROW_MMI -#define HAS_ARGBSHADEROW_MMI -#define HAS_ARGBSHUFFLEROW_MMI -#define HAS_ARGBSUBTRACTROW_MMI -#define HAS_ARGBTOARGB1555ROW_MMI -#define HAS_ARGBTOARGB4444ROW_MMI -#define HAS_ARGBTORAWROW_MMI -#define HAS_ARGBTORGB24ROW_MMI -#define HAS_ARGBTORGB565DITHERROW_MMI -#define HAS_ARGBTORGB565ROW_MMI -#define HAS_ARGBTOUV444ROW_MMI -#define HAS_ARGBTOUVJROW_MMI -#define HAS_ARGBTOUVROW_MMI -#define HAS_ARGBTOYJROW_MMI -#define HAS_ARGBTOYROW_MMI -#define HAS_BGRATOUVROW_MMI -#define HAS_BGRATOYROW_MMI -#define HAS_BLENDPLANEROW_MMI -#define HAS_COMPUTECUMULATIVESUMROW_MMI -#define HAS_CUMULATIVESUMTOAVERAGEROW_MMI -#define HAS_HALFFLOATROW_MMI -#define HAS_I400TOARGBROW_MMI -#define HAS_I422TOUYVYROW_MMI -#define HAS_I422TOYUY2ROW_MMI -#define HAS_INTERPOLATEROW_MMI -#define HAS_J400TOARGBROW_MMI -#define HAS_MERGERGBROW_MMI -#define HAS_MERGEUVROW_MMI -#define HAS_MIRRORROW_MMI -#define HAS_MIRRORSPLITUVROW_MMI -#define HAS_RAWTOARGBROW_MMI -#define HAS_RAWTORGB24ROW_MMI -#define HAS_RAWTOUVROW_MMI -#define HAS_RAWTOYROW_MMI -#define HAS_RGB24TOARGBROW_MMI -#define HAS_RGB24TOUVROW_MMI -#define HAS_RGB24TOYROW_MMI -#define HAS_RGB565TOARGBROW_MMI -#define HAS_RGB565TOUVROW_MMI -#define HAS_RGB565TOYROW_MMI -#define HAS_RGBATOUVROW_MMI -#define HAS_RGBATOYROW_MMI -#define HAS_SOBELROW_MMI -#define HAS_SOBELTOPLANEROW_MMI -#define HAS_SOBELXROW_MMI -#define HAS_SOBELXYROW_MMI -#define HAS_SOBELYROW_MMI -#define HAS_SPLITRGBROW_MMI -#define HAS_SPLITUVROW_MMI -#define HAS_UYVYTOUVROW_MMI -#define HAS_UYVYTOYROW_MMI -#define HAS_YUY2TOUV422ROW_MMI -#define HAS_YUY2TOUVROW_MMI -#define HAS_YUY2TOYROW_MMI +#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) +#define HAS_ABGRTOUVROW_LSX +#define HAS_ABGRTOYROW_LSX +#define HAS_ARGB1555TOARGBROW_LSX +#define HAS_ARGB1555TOUVROW_LSX +#define HAS_ARGB1555TOYROW_LSX +#define HAS_ARGB4444TOARGBROW_LSX +#define HAS_ARGBBLENDROW_LSX +#define HAS_ARGBCOLORMATRIXROW_LSX +#define HAS_ARGBEXTRACTALPHAROW_LSX +#define HAS_ARGBQUANTIZEROW_LSX +#define HAS_ARGBSETROW_LSX +#define HAS_ARGBTOUVJROW_LSX +#define HAS_ARGBTOYJROW_LSX +#define HAS_BGRATOUVROW_LSX +#define HAS_BGRATOYROW_LSX +#define HAS_I400TOARGBROW_LSX +#define HAS_I444TOARGBROW_LSX +#define HAS_INTERPOLATEROW_LSX +#define HAS_J400TOARGBROW_LSX +#define HAS_MERGEUVROW_LSX +#define HAS_MIRRORSPLITUVROW_LSX +#define HAS_NV12TOARGBROW_LSX +#define HAS_NV12TORGB565ROW_LSX +#define HAS_NV21TOARGBROW_LSX +#define HAS_RAWTOARGBROW_LSX +#define HAS_RAWTORGB24ROW_LSX +#define HAS_RAWTOUVROW_LSX +#define HAS_RAWTOYROW_LSX +#define HAS_RGB24TOARGBROW_LSX +#define HAS_RGB24TOUVROW_LSX +#define HAS_RGB24TOYROW_LSX +#define HAS_RGB565TOARGBROW_LSX +#define HAS_RGB565TOUVROW_LSX +#define HAS_RGB565TOYROW_LSX +#define HAS_RGBATOUVROW_LSX +#define HAS_RGBATOYROW_LSX +#define HAS_SETROW_LSX +#define HAS_SOBELROW_LSX +#define HAS_SOBELTOPLANEROW_LSX +#define HAS_SOBELXYROW_LSX +#define HAS_SPLITUVROW_LSX +#define HAS_UYVYTOARGBROW_LSX +#define HAS_YUY2TOARGBROW_LSX +#endif + +#if !defined(LIBYUV_DISABLE_LASX) && defined(__loongarch_asx) +#define HAS_ARGB1555TOARGBROW_LASX +#define HAS_ARGB1555TOUVROW_LASX +#define HAS_ARGB1555TOYROW_LASX +#define HAS_ARGB4444TOARGBROW_LASX +#define HAS_ARGBADDROW_LASX +#define HAS_ARGBATTENUATEROW_LASX +#define HAS_ARGBGRAYROW_LASX +#define HAS_ARGBMIRRORROW_LASX +#define HAS_ARGBMULTIPLYROW_LASX +#define HAS_ARGBSEPIAROW_LASX +#define HAS_ARGBSHADEROW_LASX +#define HAS_ARGBSHUFFLEROW_LASX +#define HAS_ARGBSUBTRACTROW_LASX +#define HAS_ARGBTOARGB1555ROW_LASX +#define HAS_ARGBTOARGB4444ROW_LASX +#define HAS_ARGBTORAWROW_LASX +#define HAS_ARGBTORGB24ROW_LASX +#define HAS_ARGBTORGB565DITHERROW_LASX +#define HAS_ARGBTORGB565ROW_LASX +#define HAS_ARGBTOUV444ROW_LASX +#define HAS_ARGBTOUVJROW_LASX +#define HAS_ARGBTOUVROW_LASX +#define HAS_ARGBTOYJROW_LASX +#define HAS_ARGBTOYROW_LASX +#define HAS_I422ALPHATOARGBROW_LASX +#define HAS_I422TOARGB1555ROW_LASX +#define HAS_I422TOARGB4444ROW_LASX +#define HAS_I422TOARGBROW_LASX +#define HAS_I422TORGB24ROW_LASX +#define HAS_I422TORGB565ROW_LASX +#define HAS_I422TORGBAROW_LASX +#define HAS_I422TOUYVYROW_LASX +#define HAS_I422TOYUY2ROW_LASX +#define HAS_MIRRORROW_LASX +#define HAS_MIRRORUVROW_LASX +#define HAS_NV12TOARGBROW_LASX +#define HAS_NV12TORGB565ROW_LASX +#define HAS_NV21TOARGBROW_LASX +#define HAS_RAWTOARGBROW_LASX +#define HAS_RAWTOUVROW_LASX +#define HAS_RAWTOYROW_LASX +#define HAS_RGB24TOARGBROW_LASX +#define HAS_RGB24TOUVROW_LASX +#define HAS_RGB24TOYROW_LASX +#define HAS_RGB565TOARGBROW_LASX +#define HAS_RGB565TOUVROW_LASX +#define HAS_RGB565TOYROW_LASX +#define HAS_UYVYTOUV422ROW_LASX +#define HAS_UYVYTOUVROW_LASX +#define HAS_UYVYTOYROW_LASX +#define HAS_YUY2TOUV422ROW_LASX +#define HAS_YUY2TOUVROW_LASX +#define HAS_YUY2TOYROW_LASX #endif #if defined(_MSC_VER) && !defined(__CLR_VER) && !defined(__clang__) @@ -821,6 +906,12 @@ void I444ToARGBRow_NEON(const uint8_t* src_y, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); +void I444ToRGB24Row_NEON(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGBRow_NEON(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, @@ -914,7 +1005,7 @@ void I444ToARGBRow_MSA(const uint8_t* src_y, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); -void I444ToARGBRow_MMI(const uint8_t* src_y, +void I444ToARGBRow_LSX(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_argb, @@ -927,18 +1018,24 @@ void I422ToARGBRow_MSA(const uint8_t* src_y, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); +void I422ToARGBRow_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGBARow_MSA(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); -void I422ToARGBRow_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - uint8_t* dst_argb, - const struct YuvConstants* yuvconstants, - int width); +void I422ToRGBARow_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I422AlphaToARGBRow_MSA(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, @@ -946,30 +1043,61 @@ void I422AlphaToARGBRow_MSA(const uint8_t* src_y, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); +void I422AlphaToARGBRow_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + const uint8_t* src_a, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGB24Row_MSA(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); +void I422ToRGB24Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGB565Row_MSA(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_rgb565, const struct YuvConstants* yuvconstants, int width); +void I422ToRGB565Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGB4444Row_MSA(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_argb4444, const struct YuvConstants* yuvconstants, int width); +void I422ToARGB4444Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb4444, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGB1555Row_MSA(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_argb1555, const struct YuvConstants* yuvconstants, int width); +void I422ToARGB1555Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb1555, + const struct YuvConstants* yuvconstants, + int width); void NV12ToARGBRow_MSA(const uint8_t* src_y, const uint8_t* src_uv, uint8_t* dst_argb, @@ -994,14 +1122,57 @@ void UYVYToARGBRow_MSA(const uint8_t* src_uyvy, const struct YuvConstants* yuvconstants, int width); +void NV12ToARGBRow_LSX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void NV12ToARGBRow_LASX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void NV12ToRGB565Row_LSX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width); +void NV12ToRGB565Row_LASX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width); +void NV21ToARGBRow_LSX(const uint8_t* src_y, + const uint8_t* src_vu, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void NV21ToARGBRow_LASX(const uint8_t* src_y, + const uint8_t* src_vu, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void YUY2ToARGBRow_LSX(const uint8_t* src_yuy2, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void UYVYToARGBRow_LSX(const uint8_t* src_uyvy, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); + void ARGBToYRow_AVX2(const uint8_t* src_argb, uint8_t* dst_y, int width); void ARGBToYRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ABGRToYRow_AVX2(const uint8_t* src_abgr, uint8_t* dst_y, int width); void ABGRToYRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGBToYRow_SSSE3(const uint8_t* src_argb, uint8_t* dst_y, int width); +void ARGBToYJRow_SSSE3(const uint8_t* src_argb, uint8_t* dst_y, int width); void ARGBToYJRow_AVX2(const uint8_t* src_argb, uint8_t* dst_y, int width); void ARGBToYJRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBToYJRow_SSSE3(const uint8_t* src_argb, uint8_t* dst_y, int width); +void ABGRToYRow_SSSE3(const uint8_t* src_abgr, uint8_t* dst_y, int width); +void ABGRToYJRow_SSSE3(const uint8_t* src_abgr, uint8_t* dst_y, int width); +void ABGRToYJRow_AVX2(const uint8_t* src_abgr, uint8_t* dst_y, int width); +void ABGRToYJRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RGBAToYJRow_AVX2(const uint8_t* src_rgba, uint8_t* dst_y, int width); void RGBAToYJRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RGBAToYJRow_SSSE3(const uint8_t* src_rgba, uint8_t* dst_y, int width); @@ -1015,12 +1186,14 @@ void RAWToYJRow_SSSE3(const uint8_t* src_raw, uint8_t* dst_yj, int width); void RGB24ToYJRow_AVX2(const uint8_t* src_rgb24, uint8_t* dst_yj, int width); void RAWToYJRow_AVX2(const uint8_t* src_raw, uint8_t* dst_yj, int width); void ARGBToYRow_NEON(const uint8_t* src_argb, uint8_t* dst_y, int width); -void ARGBToYJRow_NEON(const uint8_t* src_argb, uint8_t* dst_y, int width); -void RGBAToYJRow_NEON(const uint8_t* src_rgba, uint8_t* dst_y, int width); +void ARGBToYJRow_NEON(const uint8_t* src_argb, uint8_t* dst_yj, int width); +void ABGRToYJRow_NEON(const uint8_t* src_abgr, uint8_t* dst_yj, int width); +void RGBAToYJRow_NEON(const uint8_t* src_rgba, uint8_t* dst_yj, int width); void ARGBToYRow_MSA(const uint8_t* src_argb0, uint8_t* dst_y, int width); void ARGBToYJRow_MSA(const uint8_t* src_argb0, uint8_t* dst_y, int width); -void ARGBToYRow_MMI(const uint8_t* src_argb0, uint8_t* dst_y, int width); -void ARGBToYJRow_MMI(const uint8_t* src_argb0, uint8_t* dst_y, int width); +void ARGBToYRow_LASX(const uint8_t* src_argb0, uint8_t* dst_y, int width); +void ARGBToYJRow_LSX(const uint8_t* src_argb0, uint8_t* dst_y, int width); +void ARGBToYJRow_LASX(const uint8_t* src_argb0, uint8_t* dst_y, int width); void ARGBToUV444Row_NEON(const uint8_t* src_argb, uint8_t* dst_u, uint8_t* dst_v, @@ -1039,20 +1212,25 @@ void ARGBToUVRow_MSA(const uint8_t* src_argb, uint8_t* dst_u, uint8_t* dst_v, int width); -void ARGBToUV444Row_MMI(const uint8_t* src_argb, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void ARGBToUVRow_MMI(const uint8_t* src_argb, - int src_stride_argb, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void ARGBToUVRow_LASX(const uint8_t* src_argb, + int src_stride_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void ARGBToUV444Row_LASX(const uint8_t* src_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void ARGBToUVJRow_NEON(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_u, uint8_t* dst_v, int width); +void ABGRToUVJRow_NEON(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_uj, + uint8_t* dst_vj, + int width); void BGRAToUVRow_NEON(const uint8_t* src_bgra, int src_stride_bgra, uint8_t* dst_u, @@ -1078,6 +1256,16 @@ void RAWToUVRow_NEON(const uint8_t* src_raw, uint8_t* dst_u, uint8_t* dst_v, int width); +void RGB24ToUVJRow_NEON(const uint8_t* src_rgb24, + int src_stride_rgb24, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RAWToUVJRow_NEON(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void RGB565ToUVRow_NEON(const uint8_t* src_rgb565, int src_stride_rgb565, uint8_t* dst_u, @@ -1098,6 +1286,11 @@ void ARGBToUVJRow_MSA(const uint8_t* src_rgb, uint8_t* dst_u, uint8_t* dst_v, int width); +void ABGRToUVJRow_MSA(const uint8_t* src_rgb, + int src_stride_rgb, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void BGRAToUVRow_MSA(const uint8_t* src_rgb, int src_stride_rgb, uint8_t* dst_u, @@ -1133,51 +1326,71 @@ void ARGB1555ToUVRow_MSA(const uint8_t* src_argb1555, uint8_t* dst_u, uint8_t* dst_v, int width); -void ARGBToUVJRow_MMI(const uint8_t* src_rgb, - int src_stride_rgb, +void BGRAToUVRow_LSX(const uint8_t* src_bgra, + int src_stride_bgra, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void ABGRToUVRow_LSX(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGBAToUVRow_LSX(const uint8_t* src_rgba, + int src_stride_rgba, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void ARGBToUVJRow_LSX(const uint8_t* src_argb, + int src_stride_argb, uint8_t* dst_u, uint8_t* dst_v, int width); -void BGRAToUVRow_MMI(const uint8_t* src_rgb, - int src_stride_rgb, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void ABGRToUVRow_MMI(const uint8_t* src_rgb, - int src_stride_rgb, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void RGBAToUVRow_MMI(const uint8_t* src_rgb, - int src_stride_rgb, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void RGB24ToUVRow_MMI(const uint8_t* src_rgb, - int src_stride_rgb, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void RAWToUVRow_MMI(const uint8_t* src_rgb, - int src_stride_rgb, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void RGB565ToUVRow_MMI(const uint8_t* src_rgb565, - int src_stride_rgb565, +void ARGBToUVJRow_LASX(const uint8_t* src_argb, + int src_stride_argb, uint8_t* dst_u, uint8_t* dst_v, int width); -void ARGB1555ToUVRow_MMI(const uint8_t* src_argb1555, +void ARGB1555ToUVRow_LSX(const uint8_t* src_argb1555, int src_stride_argb1555, uint8_t* dst_u, uint8_t* dst_v, int width); -void ARGB4444ToUVRow_MMI(const uint8_t* src_argb4444, - int src_stride_argb4444, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void ARGB1555ToUVRow_LASX(const uint8_t* src_argb1555, + int src_stride_argb1555, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGB565ToUVRow_LSX(const uint8_t* src_rgb565, + int src_stride_rgb565, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGB565ToUVRow_LASX(const uint8_t* src_rgb565, + int src_stride_rgb565, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGB24ToUVRow_LSX(const uint8_t* src_rgb24, + int src_stride_rgb24, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGB24ToUVRow_LASX(const uint8_t* src_rgb24, + int src_stride_rgb24, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RAWToUVRow_LSX(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RAWToUVRow_LASX(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void BGRAToYRow_NEON(const uint8_t* src_bgra, uint8_t* dst_y, int width); void ABGRToYRow_NEON(const uint8_t* src_abgr, uint8_t* dst_y, int width); void RGBAToYRow_NEON(const uint8_t* src_rgba, uint8_t* dst_y, int width); @@ -1199,17 +1412,24 @@ void RGB24ToYRow_MSA(const uint8_t* src_argb, uint8_t* dst_y, int width); void RAWToYRow_MSA(const uint8_t* src_argb, uint8_t* dst_y, int width); void RGB565ToYRow_MSA(const uint8_t* src_rgb565, uint8_t* dst_y, int width); void ARGB1555ToYRow_MSA(const uint8_t* src_argb1555, uint8_t* dst_y, int width); -void BGRAToYRow_MMI(const uint8_t* src_argb, uint8_t* dst_y, int width); -void ABGRToYRow_MMI(const uint8_t* src_argb, uint8_t* dst_y, int width); -void RGBAToYRow_MMI(const uint8_t* src_argb, uint8_t* dst_y, int width); -void RGB24ToYRow_MMI(const uint8_t* src_argb, uint8_t* dst_y, int width); -void RAWToYRow_MMI(const uint8_t* src_argb, uint8_t* dst_y, int width); -void RGB565ToYRow_MMI(const uint8_t* src_rgb565, uint8_t* dst_y, int width); -void ARGB1555ToYRow_MMI(const uint8_t* src_argb1555, uint8_t* dst_y, int width); -void ARGB4444ToYRow_MMI(const uint8_t* src_argb4444, uint8_t* dst_y, int width); + +void BGRAToYRow_LSX(const uint8_t* src_bgra, uint8_t* dst_y, int width); +void ABGRToYRow_LSX(const uint8_t* src_abgr, uint8_t* dst_y, int width); +void RGBAToYRow_LSX(const uint8_t* src_rgba, uint8_t* dst_y, int width); +void ARGB1555ToYRow_LSX(const uint8_t* src_argb1555, uint8_t* dst_y, int width); +void ARGB1555ToYRow_LASX(const uint8_t* src_argb1555, + uint8_t* dst_y, + int width); +void RGB565ToYRow_LSX(const uint8_t* src_rgb565, uint8_t* dst_y, int width); +void RGB565ToYRow_LASX(const uint8_t* src_rgb565, uint8_t* dst_y, int width); +void RGB24ToYRow_LSX(const uint8_t* src_rgb24, uint8_t* dst_y, int width); +void RGB24ToYRow_LASX(const uint8_t* src_rgb24, uint8_t* dst_y, int width); +void RAWToYRow_LSX(const uint8_t* src_raw, uint8_t* dst_y, int width); +void RAWToYRow_LASX(const uint8_t* src_raw, uint8_t* dst_y, int width); void ARGBToYRow_C(const uint8_t* src_rgb, uint8_t* dst_y, int width); void ARGBToYJRow_C(const uint8_t* src_rgb, uint8_t* dst_y, int width); +void ABGRToYJRow_C(const uint8_t* src_rgb, uint8_t* dst_y, int width); void RGBAToYJRow_C(const uint8_t* src_rgb, uint8_t* dst_y, int width); void BGRAToYRow_C(const uint8_t* src_rgb, uint8_t* dst_y, int width); void ABGRToYRow_C(const uint8_t* src_rgb, uint8_t* dst_y, int width); @@ -1223,6 +1443,7 @@ void ARGB1555ToYRow_C(const uint8_t* src_argb1555, uint8_t* dst_y, int width); void ARGB4444ToYRow_C(const uint8_t* src_argb4444, uint8_t* dst_y, int width); void ARGBToYRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGBToYJRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ABGRToYJRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RGBAToYJRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void BGRAToYRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ABGRToYRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); @@ -1237,6 +1458,7 @@ void RGB24ToYJRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RAWToYJRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGBToYRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGBToYJRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ABGRToYJRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RGBAToYJRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void BGRAToYRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ABGRToYRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); @@ -1263,21 +1485,27 @@ void RGB565ToYRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGB1555ToYRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void BGRAToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ABGRToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void RGBAToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBToYJRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void RGB24ToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void RAWToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void RGB565ToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGB1555ToYRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - int width); -void ARGB4444ToYRow_Any_MMI(const uint8_t* src_ptr, + +void BGRAToYRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ABGRToYRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RGBAToYRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ARGBToYJRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RGB24ToYRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RGB565ToYRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RAWToYRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ARGB1555ToYRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RGB565ToYRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RGB24ToYRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ARGBToYJRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ARGBToYRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RAWToYRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ARGB1555ToYRow_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); + void ARGBToUVRow_AVX2(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_u, @@ -1293,6 +1521,11 @@ void ARGBToUVJRow_AVX2(const uint8_t* src_argb, uint8_t* dst_u, uint8_t* dst_v, int width); +void ABGRToUVJRow_AVX2(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void ARGBToUVRow_SSSE3(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_u, @@ -1303,6 +1536,11 @@ void ARGBToUVJRow_SSSE3(const uint8_t* src_argb, uint8_t* dst_u, uint8_t* dst_v, int width); +void ABGRToUVJRow_SSSE3(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void BGRAToUVRow_SSSE3(const uint8_t* src_bgra, int src_stride_bgra, uint8_t* dst_u, @@ -1333,6 +1571,11 @@ void ARGBToUVJRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); +void ABGRToUVJRow_Any_AVX2(const uint8_t* src_ptr, + int src_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void ARGBToUVRow_Any_SSSE3(const uint8_t* src_ptr, int src_stride, uint8_t* dst_u, @@ -1343,6 +1586,11 @@ void ARGBToUVJRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); +void ABGRToUVJRow_Any_SSSE3(const uint8_t* src_ptr, + int src_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void BGRAToUVRow_Any_SSSE3(const uint8_t* src_ptr, int src_stride, uint8_t* dst_u, @@ -1376,20 +1624,25 @@ void ARGBToUVRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void ARGBToUV444Row_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void ARGBToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void ARGBToUVRow_Any_LASX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void ARGBToUV444Row_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void ARGBToUVJRow_Any_NEON(const uint8_t* src_ptr, int src_stride, uint8_t* dst_u, uint8_t* dst_v, int width); +void ABGRToUVJRow_Any_NEON(const uint8_t* src_ptr, + int src_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void BGRAToUVRow_Any_NEON(const uint8_t* src_ptr, int src_stride, uint8_t* dst_u, @@ -1415,6 +1668,16 @@ void RAWToUVRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); +void RGB24ToUVJRow_Any_NEON(const uint8_t* src_ptr, + int src_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RAWToUVJRow_Any_NEON(const uint8_t* src_ptr, + int src_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void RGB565ToUVRow_Any_NEON(const uint8_t* src_ptr, int src_stride, uint8_t* dst_u, @@ -1470,51 +1733,71 @@ void ARGB1555ToUVRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void ARGBToUVJRow_Any_MMI(const uint8_t* src_ptr, +void ABGRToUVRow_Any_LSX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void BGRAToUVRow_Any_LSX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGBAToUVRow_Any_LSX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void ARGBToUVJRow_Any_LSX(const uint8_t* src_ptr, int src_stride_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void BGRAToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void ABGRToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void RGBAToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void RGB24ToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void RAWToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void RGB565ToUVRow_Any_MMI(const uint8_t* src_ptr, +void ARGBToUVJRow_Any_LASX(const uint8_t* src_ptr, int src_stride_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void ARGB1555ToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); -void ARGB4444ToUVRow_Any_MMI(const uint8_t* src_ptr, +void ARGB1555ToUVRow_Any_LSX(const uint8_t* src_ptr, int src_stride_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); +void ARGB1555ToUVRow_Any_LASX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGB565ToUVRow_Any_LSX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGB565ToUVRow_Any_LASX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGB24ToUVRow_Any_LSX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RGB24ToUVRow_Any_LASX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RAWToUVRow_Any_LSX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RAWToUVRow_Any_LASX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void ARGBToUVRow_C(const uint8_t* src_rgb, int src_stride_rgb, uint8_t* dst_u, @@ -1525,16 +1808,16 @@ void ARGBToUVJRow_C(const uint8_t* src_rgb, uint8_t* dst_u, uint8_t* dst_v, int width); +void ABGRToUVJRow_C(const uint8_t* src_rgb, + int src_stride_rgb, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void ARGBToUVRow_C(const uint8_t* src_rgb, int src_stride_rgb, uint8_t* dst_u, uint8_t* dst_v, int width); -void ARGBToUVJRow_C(const uint8_t* src_rgb, - int src_stride_rgb, - uint8_t* dst_u, - uint8_t* dst_v, - int width); void BGRAToUVRow_C(const uint8_t* src_rgb, int src_stride_rgb, uint8_t* dst_u, @@ -1550,6 +1833,11 @@ void RGBAToUVRow_C(const uint8_t* src_rgb, uint8_t* dst_u, uint8_t* dst_v, int width); +void RGBAToUVJRow_C(const uint8_t* src_rgb, + int src_stride_rgb, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void RGB24ToUVRow_C(const uint8_t* src_rgb, int src_stride_rgb, uint8_t* dst_u, @@ -1560,6 +1848,16 @@ void RAWToUVRow_C(const uint8_t* src_rgb, uint8_t* dst_u, uint8_t* dst_v, int width); +void RGB24ToUVJRow_C(const uint8_t* src_rgb, + int src_stride_rgb, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void RAWToUVJRow_C(const uint8_t* src_rgb, + int src_stride_rgb, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void RGB565ToUVRow_C(const uint8_t* src_rgb565, int src_stride_rgb565, uint8_t* dst_u, @@ -1594,23 +1892,25 @@ void MirrorRow_AVX2(const uint8_t* src, uint8_t* dst, int width); void MirrorRow_SSSE3(const uint8_t* src, uint8_t* dst, int width); void MirrorRow_NEON(const uint8_t* src, uint8_t* dst, int width); void MirrorRow_MSA(const uint8_t* src, uint8_t* dst, int width); -void MirrorRow_MMI(const uint8_t* src, uint8_t* dst, int width); +void MirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width); void MirrorRow_C(const uint8_t* src, uint8_t* dst, int width); void MirrorRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void MirrorRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void MirrorRow_Any_SSE2(const uint8_t* src, uint8_t* dst, int width); void MirrorRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void MirrorRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void MirrorRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void MirrorRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void MirrorUVRow_AVX2(const uint8_t* src_uv, uint8_t* dst_uv, int width); void MirrorUVRow_SSSE3(const uint8_t* src_uv, uint8_t* dst_uv, int width); void MirrorUVRow_NEON(const uint8_t* src_uv, uint8_t* dst_uv, int width); void MirrorUVRow_MSA(const uint8_t* src_uv, uint8_t* dst_uv, int width); +void MirrorUVRow_LASX(const uint8_t* src_uv, uint8_t* dst_uv, int width); void MirrorUVRow_C(const uint8_t* src_uv, uint8_t* dst_uv, int width); void MirrorUVRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void MirrorUVRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void MirrorUVRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void MirrorUVRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void MirrorUVRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void MirrorSplitUVRow_SSSE3(const uint8_t* src, uint8_t* dst_u, @@ -1624,7 +1924,7 @@ void MirrorSplitUVRow_MSA(const uint8_t* src_uv, uint8_t* dst_u, uint8_t* dst_v, int width); -void MirrorSplitUVRow_MMI(const uint8_t* src_uv, +void MirrorSplitUVRow_LSX(const uint8_t* src_uv, uint8_t* dst_u, uint8_t* dst_v, int width); @@ -1637,7 +1937,7 @@ void ARGBMirrorRow_AVX2(const uint8_t* src, uint8_t* dst, int width); void ARGBMirrorRow_SSE2(const uint8_t* src, uint8_t* dst, int width); void ARGBMirrorRow_NEON(const uint8_t* src_argb, uint8_t* dst_argb, int width); void ARGBMirrorRow_MSA(const uint8_t* src, uint8_t* dst, int width); -void ARGBMirrorRow_MMI(const uint8_t* src, uint8_t* dst, int width); +void ARGBMirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width); void ARGBMirrorRow_C(const uint8_t* src, uint8_t* dst, int width); void ARGBMirrorRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, @@ -1649,7 +1949,9 @@ void ARGBMirrorRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGBMirrorRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBMirrorRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ARGBMirrorRow_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); void RGB24MirrorRow_SSSE3(const uint8_t* src_rgb24, uint8_t* dst_rgb24, @@ -1685,7 +1987,7 @@ void SplitUVRow_MSA(const uint8_t* src_uv, uint8_t* dst_u, uint8_t* dst_v, int width); -void SplitUVRow_MMI(const uint8_t* src_uv, +void SplitUVRow_LSX(const uint8_t* src_uv, uint8_t* dst_u, uint8_t* dst_v, int width); @@ -1705,11 +2007,121 @@ void SplitUVRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void SplitUVRow_Any_MMI(const uint8_t* src_ptr, +void SplitUVRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); - +void DetileRow_C(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width); +void DetileRow_NEON(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width); +void DetileRow_Any_NEON(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width); +void DetileRow_SSE2(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width); +void DetileRow_Any_SSE2(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width); +void DetileRow_AVX(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width); +void DetileRow_Any_AVX(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width); +void DetileRow_16_C(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width); +void DetileRow_16_NEON(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width); +void DetileRow_16_Any_NEON(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width); +void DetileRow_16_SSE2(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width); +void DetileRow_16_Any_SSE2(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width); +void DetileRow_16_AVX(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width); +void DetileRow_16_Any_AVX(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width); +void DetileSplitUVRow_C(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void DetileSplitUVRow_SSSE3(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void DetileSplitUVRow_Any_SSSE3(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void DetileSplitUVRow_NEON(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void DetileSplitUVRow_Any_NEON(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width); +void DetileToYUY2_C(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width); +void DetileToYUY2_SSE2(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width); +void DetileToYUY2_Any_SSE2(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width); +void DetileToYUY2_NEON(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width); +void DetileToYUY2_Any_NEON(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width); void MergeUVRow_C(const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_uv, @@ -1730,7 +2142,7 @@ void MergeUVRow_MSA(const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_uv, int width); -void MergeUVRow_MMI(const uint8_t* src_u, +void MergeUVRow_LSX(const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_uv, int width); @@ -1750,7 +2162,7 @@ void MergeUVRow_Any_MSA(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); -void MergeUVRow_Any_MMI(const uint8_t* y_buf, +void MergeUVRow_Any_LSX(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); @@ -1798,11 +2210,6 @@ void SplitRGBRow_NEON(const uint8_t* src_rgb, uint8_t* dst_g, uint8_t* dst_b, int width); -void SplitRGBRow_MMI(const uint8_t* src_rgb, - uint8_t* dst_r, - uint8_t* dst_g, - uint8_t* dst_b, - int width); void SplitRGBRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_r, uint8_t* dst_g, @@ -1813,11 +2220,6 @@ void SplitRGBRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_g, uint8_t* dst_b, int width); -void SplitRGBRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_r, - uint8_t* dst_g, - uint8_t* dst_b, - int width); void MergeRGBRow_C(const uint8_t* src_r, const uint8_t* src_g, @@ -1834,11 +2236,6 @@ void MergeRGBRow_NEON(const uint8_t* src_r, const uint8_t* src_b, uint8_t* dst_rgb, int width); -void MergeRGBRow_MMI(const uint8_t* src_r, - const uint8_t* src_g, - const uint8_t* src_b, - uint8_t* dst_rgb, - int width); void MergeRGBRow_Any_SSSE3(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, @@ -1849,11 +2246,6 @@ void MergeRGBRow_Any_NEON(const uint8_t* src_r, const uint8_t* src_b, uint8_t* dst_rgb, int width); -void MergeRGBRow_Any_MMI(const uint8_t* src_r, - const uint8_t* src_g, - const uint8_t* src_b, - uint8_t* dst_rgb, - int width); void MergeARGBRow_C(const uint8_t* src_r, const uint8_t* src_g, const uint8_t* src_b, @@ -2339,6 +2731,14 @@ void Convert16To8Row_Any_AVX2(const uint16_t* src_ptr, uint8_t* dst_ptr, int scale, int width); +void Convert16To8Row_NEON(const uint16_t* src_y, + uint8_t* dst_y, + int scale, + int width); +void Convert16To8Row_Any_NEON(const uint16_t* src_ptr, + uint8_t* dst_ptr, + int scale, + int width); void CopyRow_SSE2(const uint8_t* src, uint8_t* dst, int width); void CopyRow_AVX(const uint8_t* src, uint8_t* dst, int width); @@ -2355,16 +2755,12 @@ void CopyRow_16_C(const uint16_t* src, uint16_t* dst, int count); void ARGBCopyAlphaRow_C(const uint8_t* src, uint8_t* dst, int width); void ARGBCopyAlphaRow_SSE2(const uint8_t* src, uint8_t* dst, int width); void ARGBCopyAlphaRow_AVX2(const uint8_t* src, uint8_t* dst, int width); -void ARGBCopyAlphaRow_MMI(const uint8_t* src, uint8_t* dst, int width); void ARGBCopyAlphaRow_Any_SSE2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGBCopyAlphaRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBCopyAlphaRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - int width); void ARGBExtractAlphaRow_C(const uint8_t* src_argb, uint8_t* dst_a, int width); void ARGBExtractAlphaRow_SSE2(const uint8_t* src_argb, @@ -2379,7 +2775,7 @@ void ARGBExtractAlphaRow_NEON(const uint8_t* src_argb, void ARGBExtractAlphaRow_MSA(const uint8_t* src_argb, uint8_t* dst_a, int width); -void ARGBExtractAlphaRow_MMI(const uint8_t* src_argb, +void ARGBExtractAlphaRow_LSX(const uint8_t* src_argb, uint8_t* dst_a, int width); void ARGBExtractAlphaRow_Any_SSE2(const uint8_t* src_ptr, @@ -2394,31 +2790,29 @@ void ARGBExtractAlphaRow_Any_NEON(const uint8_t* src_ptr, void ARGBExtractAlphaRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBExtractAlphaRow_Any_MMI(const uint8_t* src_ptr, +void ARGBExtractAlphaRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGBCopyYToAlphaRow_C(const uint8_t* src, uint8_t* dst, int width); void ARGBCopyYToAlphaRow_SSE2(const uint8_t* src, uint8_t* dst, int width); void ARGBCopyYToAlphaRow_AVX2(const uint8_t* src, uint8_t* dst, int width); -void ARGBCopyYToAlphaRow_MMI(const uint8_t* src, uint8_t* dst, int width); void ARGBCopyYToAlphaRow_Any_SSE2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGBCopyYToAlphaRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBCopyYToAlphaRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - int width); void SetRow_C(uint8_t* dst, uint8_t v8, int width); void SetRow_MSA(uint8_t* dst, uint8_t v8, int width); void SetRow_X86(uint8_t* dst, uint8_t v8, int width); void SetRow_ERMS(uint8_t* dst, uint8_t v8, int width); void SetRow_NEON(uint8_t* dst, uint8_t v8, int width); +void SetRow_LSX(uint8_t* dst, uint8_t v8, int width); void SetRow_Any_X86(uint8_t* dst_ptr, uint8_t v32, int width); void SetRow_Any_NEON(uint8_t* dst_ptr, uint8_t v32, int width); +void SetRow_Any_LSX(uint8_t* dst_ptr, uint8_t v32, int width); void ARGBSetRow_C(uint8_t* dst_argb, uint32_t v32, int width); void ARGBSetRow_X86(uint8_t* dst_argb, uint32_t v32, int width); @@ -2426,8 +2820,8 @@ void ARGBSetRow_NEON(uint8_t* dst, uint32_t v32, int width); void ARGBSetRow_Any_NEON(uint8_t* dst_ptr, uint32_t v32, int width); void ARGBSetRow_MSA(uint8_t* dst_argb, uint32_t v32, int width); void ARGBSetRow_Any_MSA(uint8_t* dst_ptr, uint32_t v32, int width); -void ARGBSetRow_MMI(uint8_t* dst_argb, uint32_t v32, int width); -void ARGBSetRow_Any_MMI(uint8_t* dst_ptr, uint32_t v32, int width); +void ARGBSetRow_LSX(uint8_t* dst_argb, uint32_t v32, int width); +void ARGBSetRow_Any_LSX(uint8_t* dst_ptr, uint32_t v32, int width); // ARGBShufflers for BGRAToARGB etc. void ARGBShuffleRow_C(const uint8_t* src_argb, @@ -2450,10 +2844,10 @@ void ARGBShuffleRow_MSA(const uint8_t* src_argb, uint8_t* dst_argb, const uint8_t* shuffler, int width); -void ARGBShuffleRow_MMI(const uint8_t* src_argb, - uint8_t* dst_argb, - const uint8_t* shuffler, - int width); +void ARGBShuffleRow_LASX(const uint8_t* src_argb, + uint8_t* dst_argb, + const uint8_t* shuffler, + int width); void ARGBShuffleRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, const uint8_t* param, @@ -2470,10 +2864,10 @@ void ARGBShuffleRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, const uint8_t* param, int width); -void ARGBShuffleRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - const uint8_t* param, - int width); +void ARGBShuffleRow_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + const uint8_t* param, + int width); void RGB24ToARGBRow_SSSE3(const uint8_t* src_rgb24, uint8_t* dst_argb, @@ -2498,41 +2892,54 @@ void RGB24ToARGBRow_NEON(const uint8_t* src_rgb24, uint8_t* dst_argb, int width); void RGB24ToARGBRow_MSA(const uint8_t* src_rgb24, uint8_t* dst_argb, int width); -void RGB24ToARGBRow_MMI(const uint8_t* src_rgb24, uint8_t* dst_argb, int width); +void RGB24ToARGBRow_LSX(const uint8_t* src_rgb24, uint8_t* dst_argb, int width); +void RGB24ToARGBRow_LASX(const uint8_t* src_rgb24, + uint8_t* dst_argb, + int width); void RAWToARGBRow_NEON(const uint8_t* src_raw, uint8_t* dst_argb, int width); void RAWToRGBARow_NEON(const uint8_t* src_raw, uint8_t* dst_rgba, int width); void RAWToARGBRow_MSA(const uint8_t* src_raw, uint8_t* dst_argb, int width); -void RAWToARGBRow_MMI(const uint8_t* src_raw, uint8_t* dst_argb, int width); +void RAWToARGBRow_LSX(const uint8_t* src_raw, uint8_t* dst_argb, int width); +void RAWToARGBRow_LASX(const uint8_t* src_raw, uint8_t* dst_argb, int width); void RAWToRGB24Row_NEON(const uint8_t* src_raw, uint8_t* dst_rgb24, int width); void RAWToRGB24Row_MSA(const uint8_t* src_raw, uint8_t* dst_rgb24, int width); -void RAWToRGB24Row_MMI(const uint8_t* src_raw, uint8_t* dst_rgb24, int width); +void RAWToRGB24Row_LSX(const uint8_t* src_raw, uint8_t* dst_rgb24, int width); void RGB565ToARGBRow_NEON(const uint8_t* src_rgb565, uint8_t* dst_argb, int width); void RGB565ToARGBRow_MSA(const uint8_t* src_rgb565, uint8_t* dst_argb, int width); -void RGB565ToARGBRow_MMI(const uint8_t* src_rgb565, +void RGB565ToARGBRow_LSX(const uint8_t* src_rgb565, uint8_t* dst_argb, int width); +void RGB565ToARGBRow_LASX(const uint8_t* src_rgb565, + uint8_t* dst_argb, + int width); void ARGB1555ToARGBRow_NEON(const uint8_t* src_argb1555, uint8_t* dst_argb, int width); void ARGB1555ToARGBRow_MSA(const uint8_t* src_argb1555, uint8_t* dst_argb, int width); -void ARGB1555ToARGBRow_MMI(const uint8_t* src_argb1555, +void ARGB1555ToARGBRow_LSX(const uint8_t* src_argb1555, uint8_t* dst_argb, int width); +void ARGB1555ToARGBRow_LASX(const uint8_t* src_argb1555, + uint8_t* dst_argb, + int width); void ARGB4444ToARGBRow_NEON(const uint8_t* src_argb4444, uint8_t* dst_argb, int width); void ARGB4444ToARGBRow_MSA(const uint8_t* src_argb4444, uint8_t* dst_argb, int width); -void ARGB4444ToARGBRow_MMI(const uint8_t* src_argb4444, +void ARGB4444ToARGBRow_LSX(const uint8_t* src_argb4444, uint8_t* dst_argb, int width); +void ARGB4444ToARGBRow_LASX(const uint8_t* src_argb4444, + uint8_t* dst_argb, + int width); void RGB24ToARGBRow_C(const uint8_t* src_rgb24, uint8_t* dst_argb, int width); void RAWToARGBRow_C(const uint8_t* src_raw, uint8_t* dst_argb, int width); void RAWToRGBARow_C(const uint8_t* src_raw, uint8_t* dst_rgba, int width); @@ -2587,46 +2994,59 @@ void RGB24ToARGBRow_Any_NEON(const uint8_t* src_ptr, void RGB24ToARGBRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void RGB24ToARGBRow_Any_MMI(const uint8_t* src_ptr, +void RGB24ToARGBRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RGB24ToARGBRow_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); void RAWToARGBRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RAWToRGBARow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RAWToARGBRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void RAWToARGBRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RAWToARGBRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RAWToARGBRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RAWToRGB24Row_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RAWToRGB24Row_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void RAWToRGB24Row_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RAWToRGB24Row_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RGB565ToARGBRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void RGB565ToARGBRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void RGB565ToARGBRow_Any_MMI(const uint8_t* src_ptr, +void RGB565ToARGBRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void RGB565ToARGBRow_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); void ARGB1555ToARGBRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGB1555ToARGBRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGB1555ToARGBRow_Any_MMI(const uint8_t* src_ptr, +void ARGB4444ToARGBRow_Any_NEON(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); +void ARGB1555ToARGBRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGB4444ToARGBRow_Any_NEON(const uint8_t* src_ptr, +void ARGB1555ToARGBRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void ARGB4444ToARGBRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGB4444ToARGBRow_Any_MMI(const uint8_t* src_ptr, +void ARGB4444ToARGBRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ARGB4444ToARGBRow_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); void ARGBToRGB24Row_SSSE3(const uint8_t* src, uint8_t* dst, int width); void ARGBToRAWRow_SSSE3(const uint8_t* src, uint8_t* dst, int width); @@ -2694,20 +3114,20 @@ void ARGBToRGB565DitherRow_MSA(const uint8_t* src_argb, uint8_t* dst_rgb, const uint32_t dither4, int width); +void ARGBToRGB565DitherRow_LASX(const uint8_t* src_argb, + uint8_t* dst_rgb, + const uint32_t dither4, + int width); -void ARGBToRGB24Row_MMI(const uint8_t* src_argb, uint8_t* dst_rgb, int width); -void ARGBToRAWRow_MMI(const uint8_t* src_argb, uint8_t* dst_rgb, int width); -void ARGBToRGB565Row_MMI(const uint8_t* src_argb, uint8_t* dst_rgb, int width); -void ARGBToARGB1555Row_MMI(const uint8_t* src_argb, - uint8_t* dst_rgb, - int width); -void ARGBToARGB4444Row_MMI(const uint8_t* src_argb, - uint8_t* dst_rgb, - int width); -void ARGBToRGB565DitherRow_MMI(const uint8_t* src_argb, - uint8_t* dst_rgb, - const uint32_t dither4, - int width); +void ARGBToRGB24Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width); +void ARGBToRAWRow_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width); +void ARGBToRGB565Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width); +void ARGBToARGB1555Row_LASX(const uint8_t* src_argb, + uint8_t* dst_rgb, + int width); +void ARGBToARGB4444Row_LASX(const uint8_t* src_argb, + uint8_t* dst_rgb, + int width); void ARGBToRGBARow_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width); void ARGBToRGB24Row_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width); @@ -2787,7 +3207,7 @@ void J400ToARGBRow_SSE2(const uint8_t* src_y, uint8_t* dst_argb, int width); void J400ToARGBRow_AVX2(const uint8_t* src_y, uint8_t* dst_argb, int width); void J400ToARGBRow_NEON(const uint8_t* src_y, uint8_t* dst_argb, int width); void J400ToARGBRow_MSA(const uint8_t* src_y, uint8_t* dst_argb, int width); -void J400ToARGBRow_MMI(const uint8_t* src_y, uint8_t* dst_argb, int width); +void J400ToARGBRow_LSX(const uint8_t* src_y, uint8_t* dst_argb, int width); void J400ToARGBRow_C(const uint8_t* src_y, uint8_t* dst_argb, int width); void J400ToARGBRow_Any_SSE2(const uint8_t* src_ptr, uint8_t* dst_ptr, @@ -2799,7 +3219,7 @@ void J400ToARGBRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void J400ToARGBRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void J400ToARGBRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void J400ToARGBRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void I444ToARGBRow_C(const uint8_t* src_y, const uint8_t* src_u, @@ -2807,6 +3227,12 @@ void I444ToARGBRow_C(const uint8_t* src_y, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width); +void I444ToRGB24Row_C(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGBRow_C(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, @@ -2977,6 +3403,12 @@ void I422ToARGBRow_AVX2(const uint8_t* y_buf, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); +void I422ToARGBRow_AVX512BW(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGBARow_AVX2(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, @@ -2995,6 +3427,18 @@ void I444ToARGBRow_AVX2(const uint8_t* y_buf, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); +void I444ToRGB24Row_SSSE3(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width); +void I444ToRGB24Row_AVX2(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGBRow_SSSE3(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, @@ -3177,6 +3621,10 @@ void NV21ToRGB24Row_AVX2(const uint8_t* src_y, uint8_t* dst_rgb24, const struct YuvConstants* yuvconstants, int width); +void NV21ToYUV24Row_SSSE3(const uint8_t* src_y, + const uint8_t* src_vu, + uint8_t* dst_yuv24, + int width); void NV21ToYUV24Row_AVX2(const uint8_t* src_y, const uint8_t* src_vu, uint8_t* dst_yuv24, @@ -3314,6 +3762,12 @@ void I422ToARGBRow_Any_AVX2(const uint8_t* y_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I422ToARGBRow_Any_AVX512BW(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGBARow_Any_AVX2(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, @@ -3326,12 +3780,24 @@ void I444ToARGBRow_Any_SSSE3(const uint8_t* y_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I444ToRGB24Row_Any_SSSE3(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I444ToARGBRow_Any_AVX2(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I444ToRGB24Row_Any_AVX2(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGBRow_Any_SSSE3(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, @@ -3518,9 +3984,13 @@ void NV21ToRGB24Row_Any_AVX2(const uint8_t* y_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); -void NV21ToYUV24Row_Any_AVX2(const uint8_t* src_y, - const uint8_t* src_vu, - uint8_t* dst_yuv24, +void NV21ToYUV24Row_Any_SSSE3(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + int width); +void NV21ToYUV24Row_Any_AVX2(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, int width); void NV12ToRGB565Row_Any_SSSE3(const uint8_t* y_buf, const uint8_t* uv_buf, @@ -3663,7 +4133,7 @@ void I400ToARGBRow_MSA(const uint8_t* src_y, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); -void I400ToARGBRow_MMI(const uint8_t* src_y, +void I400ToARGBRow_LSX(const uint8_t* src_y, uint8_t* dst_argb, const struct YuvConstants* yuvconstants, int width); @@ -3683,7 +4153,7 @@ void I400ToARGBRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); -void I400ToARGBRow_Any_MMI(const uint8_t* src_ptr, +void I400ToARGBRow_Any_LSX(const uint8_t* src_ptr, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); @@ -3701,7 +4171,7 @@ void ARGBBlendRow_MSA(const uint8_t* src_argb0, const uint8_t* src_argb1, uint8_t* dst_argb, int width); -void ARGBBlendRow_MMI(const uint8_t* src_argb0, +void ARGBBlendRow_LSX(const uint8_t* src_argb0, const uint8_t* src_argb1, uint8_t* dst_argb, int width); @@ -3731,16 +4201,6 @@ void BlendPlaneRow_Any_AVX2(const uint8_t* y_buf, const uint8_t* v_buf, uint8_t* dst_ptr, int width); -void BlendPlaneRow_MMI(const uint8_t* src0, - const uint8_t* src1, - const uint8_t* alpha, - uint8_t* dst, - int width); -void BlendPlaneRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - int width); void BlendPlaneRow_C(const uint8_t* src0, const uint8_t* src1, const uint8_t* alpha, @@ -3785,14 +4245,14 @@ void ARGBMultiplyRow_Any_MSA(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); -void ARGBMultiplyRow_MMI(const uint8_t* src_argb0, - const uint8_t* src_argb1, - uint8_t* dst_argb, - int width); -void ARGBMultiplyRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* uv_buf, - uint8_t* dst_ptr, - int width); +void ARGBMultiplyRow_LASX(const uint8_t* src_argb0, + const uint8_t* src_argb1, + uint8_t* dst_argb, + int width); +void ARGBMultiplyRow_Any_LASX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + int width); // ARGB add images. void ARGBAddRow_C(const uint8_t* src_argb, @@ -3831,14 +4291,14 @@ void ARGBAddRow_Any_MSA(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); -void ARGBAddRow_MMI(const uint8_t* src_argb0, - const uint8_t* src_argb1, - uint8_t* dst_argb, - int width); -void ARGBAddRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* uv_buf, - uint8_t* dst_ptr, - int width); +void ARGBAddRow_LASX(const uint8_t* src_argb0, + const uint8_t* src_argb1, + uint8_t* dst_argb, + int width); +void ARGBAddRow_Any_LASX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + int width); // ARGB subtract images. Same API as Blend, but these require // pointer and width alignment for SSE2. @@ -3878,14 +4338,14 @@ void ARGBSubtractRow_Any_MSA(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); -void ARGBSubtractRow_MMI(const uint8_t* src_argb0, - const uint8_t* src_argb1, - uint8_t* dst_argb, - int width); -void ARGBSubtractRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* uv_buf, - uint8_t* dst_ptr, - int width); +void ARGBSubtractRow_LASX(const uint8_t* src_argb0, + const uint8_t* src_argb1, + uint8_t* dst_argb, + int width); +void ARGBSubtractRow_Any_LASX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + int width); void ARGBToRGB24Row_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, @@ -3974,24 +4434,24 @@ void ARGBToRGB565DitherRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, const uint32_t param, int width); +void ARGBToRGB565DitherRow_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + const uint32_t param, + int width); -void ARGBToRGB24Row_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - int width); -void ARGBToRAWRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBToRGB565Row_Any_MMI(const uint8_t* src_ptr, +void ARGBToRGB24Row_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBToARGB1555Row_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - int width); -void ARGBToARGB4444Row_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - int width); -void ARGBToRGB565DitherRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - const uint32_t param, - int width); +void ARGBToRAWRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void ARGBToRGB565Row_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); +void ARGBToARGB1555Row_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); +void ARGBToARGB4444Row_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); void I444ToARGBRow_Any_NEON(const uint8_t* y_buf, const uint8_t* u_buf, @@ -3999,6 +4459,12 @@ void I444ToARGBRow_Any_NEON(const uint8_t* y_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I444ToRGB24Row_Any_NEON(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGBRow_Any_NEON(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, @@ -4132,7 +4598,7 @@ void I444ToARGBRow_Any_MSA(const uint8_t* y_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); -void I444ToARGBRow_Any_MMI(const uint8_t* y_buf, +void I444ToARGBRow_Any_LSX(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, @@ -4144,18 +4610,24 @@ void I422ToARGBRow_Any_MSA(const uint8_t* y_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); -void I422ToARGBRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); +void I422ToARGBRow_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGBARow_Any_MSA(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I422ToRGBARow_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422AlphaToARGBRow_Any_MSA(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, @@ -4163,30 +4635,61 @@ void I422AlphaToARGBRow_Any_MSA(const uint8_t* y_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I422AlphaToARGBRow_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + const uint8_t* a_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGB24Row_Any_MSA(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I422ToRGB24Row_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422ToRGB565Row_Any_MSA(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I422ToRGB565Row_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGB4444Row_Any_MSA(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I422ToARGB4444Row_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void I422ToARGB1555Row_Any_MSA(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, const struct YuvConstants* yuvconstants, int width); +void I422ToARGB1555Row_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); void NV12ToARGBRow_Any_MSA(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, @@ -4211,12 +4714,55 @@ void UYVYToARGBRow_Any_MSA(const uint8_t* src_ptr, const struct YuvConstants* yuvconstants, int width); +void NV12ToARGBRow_Any_LSX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); +void NV12ToARGBRow_Any_LASX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); +void NV12ToRGB565Row_Any_LSX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); +void NV12ToRGB565Row_Any_LASX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); +void NV21ToARGBRow_Any_LSX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); +void NV21ToARGBRow_Any_LASX(const uint8_t* y_buf, + const uint8_t* uv_buf, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); +void YUY2ToARGBRow_Any_LSX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); +void UYVYToARGBRow_Any_LSX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + const struct YuvConstants* yuvconstants, + int width); + void YUY2ToYRow_AVX2(const uint8_t* src_yuy2, uint8_t* dst_y, int width); void YUY2ToUVRow_AVX2(const uint8_t* src_yuy2, int stride_yuy2, uint8_t* dst_u, uint8_t* dst_v, int width); +void YUY2ToNVUVRow_AVX2(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width); void YUY2ToUV422Row_AVX2(const uint8_t* src_yuy2, uint8_t* dst_u, uint8_t* dst_v, @@ -4227,6 +4773,10 @@ void YUY2ToUVRow_SSE2(const uint8_t* src_yuy2, uint8_t* dst_u, uint8_t* dst_v, int width); +void YUY2ToNVUVRow_SSE2(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width); void YUY2ToUV422Row_SSE2(const uint8_t* src_yuy2, uint8_t* dst_u, uint8_t* dst_v, @@ -4237,36 +4787,44 @@ void YUY2ToUVRow_NEON(const uint8_t* src_yuy2, uint8_t* dst_u, uint8_t* dst_v, int width); +void YUY2ToNVUVRow_NEON(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width); void YUY2ToUV422Row_NEON(const uint8_t* src_yuy2, uint8_t* dst_u, uint8_t* dst_v, int width); void YUY2ToYRow_MSA(const uint8_t* src_yuy2, uint8_t* dst_y, int width); -void YUY2ToYRow_MMI(const uint8_t* src_yuy2, uint8_t* dst_y, int width); +void YUY2ToYRow_LASX(const uint8_t* src_yuy2, uint8_t* dst_y, int width); void YUY2ToUVRow_MSA(const uint8_t* src_yuy2, int src_stride_yuy2, uint8_t* dst_u, uint8_t* dst_v, int width); -void YUY2ToUVRow_MMI(const uint8_t* src_yuy2, - int src_stride_yuy2, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void YUY2ToUVRow_LASX(const uint8_t* src_yuy2, + int src_stride_yuy2, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void YUY2ToUV422Row_MSA(const uint8_t* src_yuy2, uint8_t* dst_u, uint8_t* dst_v, int width); -void YUY2ToUV422Row_MMI(const uint8_t* src_yuy2, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void YUY2ToUV422Row_LASX(const uint8_t* src_yuy2, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void YUY2ToYRow_C(const uint8_t* src_yuy2, uint8_t* dst_y, int width); void YUY2ToUVRow_C(const uint8_t* src_yuy2, int src_stride_yuy2, uint8_t* dst_u, uint8_t* dst_v, int width); +void YUY2ToNVUVRow_C(const uint8_t* src_yuy2, + int src_stride_yuy2, + uint8_t* dst_uv, + int width); void YUY2ToUV422Row_C(const uint8_t* src_yuy2, uint8_t* dst_u, uint8_t* dst_v, @@ -4277,6 +4835,10 @@ void YUY2ToUVRow_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); +void YUY2ToNVUVRow_Any_AVX2(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width); void YUY2ToUV422Row_Any_AVX2(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, @@ -4287,6 +4849,10 @@ void YUY2ToUVRow_Any_SSE2(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); +void YUY2ToNVUVRow_Any_SSE2(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width); void YUY2ToUV422Row_Any_SSE2(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, @@ -4297,30 +4863,34 @@ void YUY2ToUVRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); +void YUY2ToNVUVRow_Any_NEON(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width); void YUY2ToUV422Row_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); void YUY2ToYRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void YUY2ToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void YUY2ToYRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void YUY2ToUVRow_Any_MSA(const uint8_t* src_ptr, int src_stride_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void YUY2ToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void YUY2ToUVRow_Any_LASX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void YUY2ToUV422Row_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void YUY2ToUV422Row_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void YUY2ToUV422Row_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void UYVYToYRow_AVX2(const uint8_t* src_uyvy, uint8_t* dst_y, int width); void UYVYToUVRow_AVX2(const uint8_t* src_uyvy, int stride_uyvy, @@ -4362,25 +4932,25 @@ void UYVYToUV422Row_NEON(const uint8_t* src_uyvy, uint8_t* dst_v, int width); void UYVYToYRow_MSA(const uint8_t* src_uyvy, uint8_t* dst_y, int width); -void UYVYToYRow_MMI(const uint8_t* src_uyvy, uint8_t* dst_y, int width); +void UYVYToYRow_LASX(const uint8_t* src_uyvy, uint8_t* dst_y, int width); void UYVYToUVRow_MSA(const uint8_t* src_uyvy, int src_stride_uyvy, uint8_t* dst_u, uint8_t* dst_v, int width); -void UYVYToUVRow_MMI(const uint8_t* src_uyvy, - int src_stride_uyvy, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void UYVYToUVRow_LASX(const uint8_t* src_uyvy, + int src_stride_uyvy, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void UYVYToUV422Row_MSA(const uint8_t* src_uyvy, uint8_t* dst_u, uint8_t* dst_v, int width); -void UYVYToUV422Row_MMI(const uint8_t* src_uyvy, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void UYVYToUV422Row_LASX(const uint8_t* src_uyvy, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void UYVYToYRow_C(const uint8_t* src_uyvy, uint8_t* dst_y, int width); void UYVYToUVRow_C(const uint8_t* src_uyvy, @@ -4423,25 +4993,25 @@ void UYVYToUV422Row_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_v, int width); void UYVYToYRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void UYVYToYRow_Any_MMI(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); +void UYVYToYRow_Any_LASX(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); void UYVYToUVRow_Any_MSA(const uint8_t* src_ptr, int src_stride_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void UYVYToUVRow_Any_MMI(const uint8_t* src_ptr, - int src_stride_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void UYVYToUVRow_Any_LASX(const uint8_t* src_ptr, + int src_stride_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void UYVYToUV422Row_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_u, uint8_t* dst_v, int width); -void UYVYToUV422Row_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_u, - uint8_t* dst_v, - int width); +void UYVYToUV422Row_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_u, + uint8_t* dst_v, + int width); void SwapUVRow_C(const uint8_t* src_uv, uint8_t* dst_vu, int width); void SwapUVRow_NEON(const uint8_t* src_uv, uint8_t* dst_vu, int width); void SwapUVRow_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); @@ -4552,41 +5122,41 @@ void I422ToYUY2Row_MSA(const uint8_t* src_y, const uint8_t* src_v, uint8_t* dst_yuy2, int width); -void I422ToYUY2Row_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - uint8_t* dst_yuy2, - int width); +void I422ToYUY2Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_yuy2, + int width); void I422ToUYVYRow_MSA(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_uyvy, int width); -void I422ToUYVYRow_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - uint8_t* dst_uyvy, - int width); +void I422ToUYVYRow_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_uyvy, + int width); void I422ToYUY2Row_Any_MSA(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, int width); -void I422ToYUY2Row_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - int width); +void I422ToYUY2Row_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + int width); void I422ToUYVYRow_Any_MSA(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* dst_ptr, int width); -void I422ToUYVYRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - int width); +void I422ToUYVYRow_Any_LASX(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_ptr, + int width); // Effects related row functions. void ARGBAttenuateRow_C(const uint8_t* src_argb, uint8_t* dst_argb, int width); @@ -4602,9 +5172,9 @@ void ARGBAttenuateRow_NEON(const uint8_t* src_argb, void ARGBAttenuateRow_MSA(const uint8_t* src_argb, uint8_t* dst_argb, int width); -void ARGBAttenuateRow_MMI(const uint8_t* src_argb, - uint8_t* dst_argb, - int width); +void ARGBAttenuateRow_LASX(const uint8_t* src_argb, + uint8_t* dst_argb, + int width); void ARGBAttenuateRow_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); @@ -4617,9 +5187,9 @@ void ARGBAttenuateRow_Any_NEON(const uint8_t* src_ptr, void ARGBAttenuateRow_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int width); -void ARGBAttenuateRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - int width); +void ARGBAttenuateRow_Any_LASX(const uint8_t* src_ptr, + uint8_t* dst_ptr, + int width); // Inverse table for unattenuate, shared by C and SSE2. extern const uint32_t fixed_invtbl8[256]; @@ -4643,13 +5213,13 @@ void ARGBGrayRow_C(const uint8_t* src_argb, uint8_t* dst_argb, int width); void ARGBGrayRow_SSSE3(const uint8_t* src_argb, uint8_t* dst_argb, int width); void ARGBGrayRow_NEON(const uint8_t* src_argb, uint8_t* dst_argb, int width); void ARGBGrayRow_MSA(const uint8_t* src_argb, uint8_t* dst_argb, int width); -void ARGBGrayRow_MMI(const uint8_t* src_argb, uint8_t* dst_argb, int width); +void ARGBGrayRow_LASX(const uint8_t* src_argb, uint8_t* dst_argb, int width); void ARGBSepiaRow_C(uint8_t* dst_argb, int width); void ARGBSepiaRow_SSSE3(uint8_t* dst_argb, int width); void ARGBSepiaRow_NEON(uint8_t* dst_argb, int width); void ARGBSepiaRow_MSA(uint8_t* dst_argb, int width); -void ARGBSepiaRow_MMI(uint8_t* dst_argb, int width); +void ARGBSepiaRow_LASX(uint8_t* dst_argb, int width); void ARGBColorMatrixRow_C(const uint8_t* src_argb, uint8_t* dst_argb, @@ -4667,7 +5237,7 @@ void ARGBColorMatrixRow_MSA(const uint8_t* src_argb, uint8_t* dst_argb, const int8_t* matrix_argb, int width); -void ARGBColorMatrixRow_MMI(const uint8_t* src_argb, +void ARGBColorMatrixRow_LSX(const uint8_t* src_argb, uint8_t* dst_argb, const int8_t* matrix_argb, int width); @@ -4706,6 +5276,11 @@ void ARGBQuantizeRow_MSA(uint8_t* dst_argb, int interval_size, int interval_offset, int width); +void ARGBQuantizeRow_LSX(uint8_t* dst_argb, + int scale, + int interval_size, + int interval_offset, + int width); void ARGBShadeRow_C(const uint8_t* src_argb, uint8_t* dst_argb, @@ -4723,10 +5298,10 @@ void ARGBShadeRow_MSA(const uint8_t* src_argb, uint8_t* dst_argb, int width, uint32_t value); -void ARGBShadeRow_MMI(const uint8_t* src_argb, - uint8_t* dst_argb, - int width, - uint32_t value); +void ARGBShadeRow_LASX(const uint8_t* src_argb, + uint8_t* dst_argb, + int width, + uint32_t value); // Used for blur. void CumulativeSumToAverageRow_SSE2(const int32_t* topleft, @@ -4740,11 +5315,6 @@ void ComputeCumulativeSumRow_SSE2(const uint8_t* row, const int32_t* previous_cumsum, int width); -void ComputeCumulativeSumRow_MMI(const uint8_t* row, - int32_t* cumsum, - const int32_t* previous_cumsum, - int width); - void CumulativeSumToAverageRow_C(const int32_t* tl, const int32_t* bl, int w, @@ -4795,7 +5365,7 @@ void InterpolateRow_MSA(uint8_t* dst_ptr, ptrdiff_t src_stride, int width, int source_y_fraction); -void InterpolateRow_MMI(uint8_t* dst_ptr, +void InterpolateRow_LSX(uint8_t* dst_ptr, const uint8_t* src_ptr, ptrdiff_t src_stride, int width, @@ -4820,7 +5390,7 @@ void InterpolateRow_Any_MSA(uint8_t* dst_ptr, ptrdiff_t src_stride_ptr, int width, int source_y_fraction); -void InterpolateRow_Any_MMI(uint8_t* dst_ptr, +void InterpolateRow_Any_LSX(uint8_t* dst_ptr, const uint8_t* src_ptr, ptrdiff_t src_stride_ptr, int width, @@ -4831,6 +5401,47 @@ void InterpolateRow_16_C(uint16_t* dst_ptr, ptrdiff_t src_stride, int width, int source_y_fraction); +void InterpolateRow_16_NEON(uint16_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int width, + int source_y_fraction); +void InterpolateRow_16_Any_NEON(uint16_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int width, + int source_y_fraction); + +void InterpolateRow_16To8_C(uint8_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int scale, + int width, + int source_y_fraction); +void InterpolateRow_16To8_NEON(uint8_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int scale, + int width, + int source_y_fraction); +void InterpolateRow_16To8_Any_NEON(uint8_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int scale, + int width, + int source_y_fraction); +void InterpolateRow_16To8_AVX2(uint8_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int scale, + int width, + int source_y_fraction); +void InterpolateRow_16To8_Any_AVX2(uint8_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int scale, + int width, + int source_y_fraction); // Sobel images. void SobelXRow_C(const uint8_t* src_y0, @@ -4853,11 +5464,6 @@ void SobelXRow_MSA(const uint8_t* src_y0, const uint8_t* src_y2, uint8_t* dst_sobelx, int width); -void SobelXRow_MMI(const uint8_t* src_y0, - const uint8_t* src_y1, - const uint8_t* src_y2, - uint8_t* dst_sobelx, - int width); void SobelYRow_C(const uint8_t* src_y0, const uint8_t* src_y1, uint8_t* dst_sobely, @@ -4874,10 +5480,6 @@ void SobelYRow_MSA(const uint8_t* src_y0, const uint8_t* src_y1, uint8_t* dst_sobely, int width); -void SobelYRow_MMI(const uint8_t* src_y0, - const uint8_t* src_y1, - uint8_t* dst_sobely, - int width); void SobelRow_C(const uint8_t* src_sobelx, const uint8_t* src_sobely, uint8_t* dst_argb, @@ -4894,7 +5496,7 @@ void SobelRow_MSA(const uint8_t* src_sobelx, const uint8_t* src_sobely, uint8_t* dst_argb, int width); -void SobelRow_MMI(const uint8_t* src_sobelx, +void SobelRow_LSX(const uint8_t* src_sobelx, const uint8_t* src_sobely, uint8_t* dst_argb, int width); @@ -4914,7 +5516,7 @@ void SobelToPlaneRow_MSA(const uint8_t* src_sobelx, const uint8_t* src_sobely, uint8_t* dst_y, int width); -void SobelToPlaneRow_MMI(const uint8_t* src_sobelx, +void SobelToPlaneRow_LSX(const uint8_t* src_sobelx, const uint8_t* src_sobely, uint8_t* dst_y, int width); @@ -4934,7 +5536,7 @@ void SobelXYRow_MSA(const uint8_t* src_sobelx, const uint8_t* src_sobely, uint8_t* dst_argb, int width); -void SobelXYRow_MMI(const uint8_t* src_sobelx, +void SobelXYRow_LSX(const uint8_t* src_sobelx, const uint8_t* src_sobely, uint8_t* dst_argb, int width); @@ -4950,7 +5552,7 @@ void SobelRow_Any_MSA(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); -void SobelRow_Any_MMI(const uint8_t* y_buf, +void SobelRow_Any_LSX(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); @@ -4966,7 +5568,7 @@ void SobelToPlaneRow_Any_MSA(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); -void SobelToPlaneRow_Any_MMI(const uint8_t* y_buf, +void SobelToPlaneRow_Any_LSX(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); @@ -4982,7 +5584,7 @@ void SobelXYRow_Any_MSA(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); -void SobelXYRow_Any_MMI(const uint8_t* y_buf, +void SobelXYRow_Any_LSX(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, int width); @@ -5058,6 +5660,14 @@ void HalfFloatRow_Any_MSA(const uint16_t* src_ptr, uint16_t* dst_ptr, float param, int width); +void HalfFloatRow_LSX(const uint16_t* src, + uint16_t* dst, + float scale, + int width); +void HalfFloatRow_Any_LSX(const uint16_t* src_ptr, + uint16_t* dst_ptr, + float param, + int width); void ByteToFloatRow_C(const uint8_t* src, float* dst, float scale, int width); void ByteToFloatRow_NEON(const uint8_t* src, float* dst, @@ -5092,159 +5702,6 @@ float ScaleSumSamples_NEON(const float* src, void ScaleSamples_C(const float* src, float* dst, float scale, int width); void ScaleSamples_NEON(const float* src, float* dst, float scale, int width); -void I210ToARGBRow_MMI(const uint16_t* src_y, - const uint16_t* src_u, - const uint16_t* src_v, - uint8_t* rgb_buf, - const struct YuvConstants* yuvconstants, - int width); -void I422ToRGBARow_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - uint8_t* dst_argb, - const struct YuvConstants* yuvconstants, - int width); -void I422AlphaToARGBRow_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - const uint8_t* src_a, - uint8_t* dst_argb, - const struct YuvConstants* yuvconstants, - int width); -void I422ToRGB24Row_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - uint8_t* dst_argb, - const struct YuvConstants* yuvconstants, - int width); -void I422ToRGB565Row_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - uint8_t* dst_rgb565, - const struct YuvConstants* yuvconstants, - int width); -void I422ToARGB4444Row_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - uint8_t* dst_argb4444, - const struct YuvConstants* yuvconstants, - int width); -void I422ToARGB1555Row_MMI(const uint8_t* src_y, - const uint8_t* src_u, - const uint8_t* src_v, - uint8_t* dst_argb1555, - const struct YuvConstants* yuvconstants, - int width); -void NV12ToARGBRow_MMI(const uint8_t* src_y, - const uint8_t* src_uv, - uint8_t* dst_argb, - const struct YuvConstants* yuvconstants, - int width); -void NV12ToRGB565Row_MMI(const uint8_t* src_y, - const uint8_t* src_uv, - uint8_t* dst_rgb565, - const struct YuvConstants* yuvconstants, - int width); -void NV21ToARGBRow_MMI(const uint8_t* src_y, - const uint8_t* src_vu, - uint8_t* dst_argb, - const struct YuvConstants* yuvconstants, - int width); -void NV12ToRGB24Row_MMI(const uint8_t* src_y, - const uint8_t* src_uv, - uint8_t* dst_rgb24, - const struct YuvConstants* yuvconstants, - int width); -void NV21ToRGB24Row_MMI(const uint8_t* src_y, - const uint8_t* src_vu, - uint8_t* dst_rgb24, - const struct YuvConstants* yuvconstants, - int width); -void YUY2ToARGBRow_MMI(const uint8_t* src_yuy2, - uint8_t* dst_argb, - const struct YuvConstants* yuvconstants, - int width); -void UYVYToARGBRow_MMI(const uint8_t* src_uyvy, - uint8_t* dst_argb, - const struct YuvConstants* yuvconstants, - int width); -void I210ToARGBRow_Any_MMI(const uint16_t* y_buf, - const uint16_t* u_buf, - const uint16_t* v_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void I422ToRGBARow_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void I422AlphaToARGBRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - const uint8_t* a_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void I422ToRGB24Row_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void I422ToRGB565Row_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void I422ToARGB4444Row_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void I422ToARGB1555Row_Any_MMI(const uint8_t* y_buf, - const uint8_t* u_buf, - const uint8_t* v_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void NV12ToARGBRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* uv_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void NV12ToRGB565Row_Any_MMI(const uint8_t* y_buf, - const uint8_t* uv_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void NV21ToARGBRow_Any_MMI(const uint8_t* y_buf, - const uint8_t* uv_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void NV12ToRGB24Row_Any_MMI(const uint8_t* y_buf, - const uint8_t* uv_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void NV21ToRGB24Row_Any_MMI(const uint8_t* y_buf, - const uint8_t* uv_buf, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void YUY2ToARGBRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); -void UYVYToARGBRow_Any_MMI(const uint8_t* src_ptr, - uint8_t* dst_ptr, - const struct YuvConstants* yuvconstants, - int width); - void GaussRow_F32_NEON(const float* src, float* dst, int width); void GaussRow_F32_C(const float* src, float* dst, int width); @@ -5264,6 +5721,17 @@ void GaussCol_F32_C(const float* src0, float* dst, int width); +void GaussRow_C(const uint32_t* src, uint16_t* dst, int width); +void GaussCol_C(const uint16_t* src0, + const uint16_t* src1, + const uint16_t* src2, + const uint16_t* src3, + const uint16_t* src4, + uint32_t* dst, + int width); + +void ClampFloatToZero_SSE2(const float* src_x, float* dst_y, int width); + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/scale.h b/third-party/libyuv/third_party/libyuv/include/libyuv/scale.h index 3d4b60052d..443f89c2f9 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/scale.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/scale.h @@ -195,6 +195,72 @@ int I444Scale_12(const uint16_t* src_y, int dst_height, enum FilterMode filtering); +// Scales a YUV 4:2:2 image from the src width and height to the +// dst width and height. +// If filtering is kFilterNone, a simple nearest-neighbor algorithm is +// used. This produces basic (blocky) quality at the fastest speed. +// If filtering is kFilterBilinear, interpolation is used to produce a better +// quality image, at the expense of speed. +// If filtering is kFilterBox, averaging is used to produce ever better +// quality image, at further expense of speed. +// Returns 0 if successful. +LIBYUV_API +int I422Scale(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + int src_width, + int src_height, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int dst_width, + int dst_height, + enum FilterMode filtering); + +LIBYUV_API +int I422Scale_16(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + int src_width, + int src_height, + uint16_t* dst_y, + int dst_stride_y, + uint16_t* dst_u, + int dst_stride_u, + uint16_t* dst_v, + int dst_stride_v, + int dst_width, + int dst_height, + enum FilterMode filtering); + +LIBYUV_API +int I422Scale_12(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + int src_width, + int src_height, + uint16_t* dst_y, + int dst_stride_y, + uint16_t* dst_u, + int dst_stride_u, + uint16_t* dst_v, + int dst_stride_v, + int dst_width, + int dst_height, + enum FilterMode filtering); + // Scales an NV12 image from the src width and height to the // dst width and height. // If filtering is kFilterNone, a simple nearest-neighbor algorithm is diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/scale_rgb.h b/third-party/libyuv/third_party/libyuv/include/libyuv/scale_rgb.h new file mode 100644 index 0000000000..d17c39fd6e --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/scale_rgb.h @@ -0,0 +1,42 @@ +/* + * Copyright 2022 The LibYuv Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef INCLUDE_LIBYUV_SCALE_RGB_H_ +#define INCLUDE_LIBYUV_SCALE_RGB_H_ + +#include "libyuv/basic_types.h" +#include "libyuv/scale.h" // For FilterMode + +#ifdef __cplusplus +namespace libyuv { +extern "C" { +#endif + +// RGB can be RAW, RGB24 or YUV24 +// RGB scales 24 bit images by converting a row at a time to ARGB +// and using ARGB row functions to scale, then convert to RGB. +// TODO(fbarchard): Allow input/output formats to be specified. +LIBYUV_API +int RGBScale(const uint8_t* src_rgb, + int src_stride_rgb, + int src_width, + int src_height, + uint8_t* dst_rgb, + int dst_stride_rgb, + int dst_width, + int dst_height, + enum FilterMode filtering); + +#ifdef __cplusplus +} // extern "C" +} // namespace libyuv +#endif + +#endif // INCLUDE_LIBYUV_SCALE_UV_H_ diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/scale_row.h b/third-party/libyuv/third_party/libyuv/include/libyuv/scale_row.h index 461ac36f33..6cb5e12842 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/scale_row.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/scale_row.h @@ -76,18 +76,18 @@ extern "C" { // TODO(fbarchard): Port to Visual C #if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__)) #define HAS_SCALEUVROWDOWN2BOX_SSSE3 -#define HAS_SCALEROWUP2LINEAR_SSE2 -#define HAS_SCALEROWUP2LINEAR_SSSE3 -#define HAS_SCALEROWUP2BILINEAR_SSE2 -#define HAS_SCALEROWUP2BILINEAR_SSSE3 -#define HAS_SCALEROWUP2LINEAR_12_SSSE3 -#define HAS_SCALEROWUP2BILINEAR_12_SSSE3 -#define HAS_SCALEROWUP2LINEAR_16_SSE2 -#define HAS_SCALEROWUP2BILINEAR_16_SSE2 -#define HAS_SCALEUVROWUP2LINEAR_SSSE3 -#define HAS_SCALEUVROWUP2BILINEAR_SSSE3 -#define HAS_SCALEUVROWUP2LINEAR_16_SSE2 -#define HAS_SCALEUVROWUP2BILINEAR_16_SSE2 +#define HAS_SCALEROWUP2_LINEAR_SSE2 +#define HAS_SCALEROWUP2_LINEAR_SSSE3 +#define HAS_SCALEROWUP2_BILINEAR_SSE2 +#define HAS_SCALEROWUP2_BILINEAR_SSSE3 +#define HAS_SCALEROWUP2_LINEAR_12_SSSE3 +#define HAS_SCALEROWUP2_BILINEAR_12_SSSE3 +#define HAS_SCALEROWUP2_LINEAR_16_SSE2 +#define HAS_SCALEROWUP2_BILINEAR_16_SSE2 +#define HAS_SCALEUVROWUP2_LINEAR_SSSE3 +#define HAS_SCALEUVROWUP2_BILINEAR_SSSE3 +#define HAS_SCALEUVROWUP2_LINEAR_16_SSE41 +#define HAS_SCALEUVROWUP2_BILINEAR_16_SSE41 #endif // The following are available for gcc/clang x86 platforms, but @@ -97,16 +97,16 @@ extern "C" { (defined(__x86_64__) || defined(__i386__)) && \ (defined(CLANG_HAS_AVX2) || defined(GCC_HAS_AVX2)) #define HAS_SCALEUVROWDOWN2BOX_AVX2 -#define HAS_SCALEROWUP2LINEAR_AVX2 -#define HAS_SCALEROWUP2BILINEAR_AVX2 -#define HAS_SCALEROWUP2LINEAR_12_AVX2 -#define HAS_SCALEROWUP2BILINEAR_12_AVX2 -#define HAS_SCALEROWUP2LINEAR_16_AVX2 -#define HAS_SCALEROWUP2BILINEAR_16_AVX2 -#define HAS_SCALEUVROWUP2LINEAR_AVX2 -#define HAS_SCALEUVROWUP2BILINEAR_AVX2 -#define HAS_SCALEUVROWUP2LINEAR_16_AVX2 -#define HAS_SCALEUVROWUP2BILINEAR_16_AVX2 +#define HAS_SCALEROWUP2_LINEAR_AVX2 +#define HAS_SCALEROWUP2_BILINEAR_AVX2 +#define HAS_SCALEROWUP2_LINEAR_12_AVX2 +#define HAS_SCALEROWUP2_BILINEAR_12_AVX2 +#define HAS_SCALEROWUP2_LINEAR_16_AVX2 +#define HAS_SCALEROWUP2_BILINEAR_16_AVX2 +#define HAS_SCALEUVROWUP2_LINEAR_AVX2 +#define HAS_SCALEUVROWUP2_BILINEAR_AVX2 +#define HAS_SCALEUVROWUP2_LINEAR_16_AVX2 +#define HAS_SCALEUVROWUP2_BILINEAR_16_AVX2 #endif // The following are available on all x86 platforms, but @@ -135,16 +135,16 @@ extern "C" { #define HAS_SCALEROWDOWN4_NEON #define HAS_SCALEUVROWDOWN2BOX_NEON #define HAS_SCALEUVROWDOWNEVEN_NEON -#define HAS_SCALEROWUP2LINEAR_NEON -#define HAS_SCALEROWUP2BILINEAR_NEON -#define HAS_SCALEROWUP2LINEAR_12_NEON -#define HAS_SCALEROWUP2BILINEAR_12_NEON -#define HAS_SCALEROWUP2LINEAR_16_NEON -#define HAS_SCALEROWUP2BILINEAR_16_NEON -#define HAS_SCALEUVROWUP2LINEAR_NEON -#define HAS_SCALEUVROWUP2BILINEAR_NEON -#define HAS_SCALEUVROWUP2LINEAR_16_NEON -#define HAS_SCALEUVROWUP2BILINEAR_16_NEON +#define HAS_SCALEROWUP2_LINEAR_NEON +#define HAS_SCALEROWUP2_BILINEAR_NEON +#define HAS_SCALEROWUP2_LINEAR_12_NEON +#define HAS_SCALEROWUP2_BILINEAR_12_NEON +#define HAS_SCALEROWUP2_LINEAR_16_NEON +#define HAS_SCALEROWUP2_BILINEAR_16_NEON +#define HAS_SCALEUVROWUP2_LINEAR_NEON +#define HAS_SCALEUVROWUP2_BILINEAR_NEON +#define HAS_SCALEUVROWUP2_LINEAR_16_NEON +#define HAS_SCALEUVROWUP2_BILINEAR_16_NEON #endif #if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa) @@ -160,22 +160,17 @@ extern "C" { #define HAS_SCALEROWDOWN4_MSA #endif -#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A) -#define HAS_FIXEDDIV1_MIPS -#define HAS_FIXEDDIV_MIPS -#define HAS_SCALEADDROW_16_MMI -#define HAS_SCALEADDROW_MMI -#define HAS_SCALEARGBCOLS_MMI -#define HAS_SCALEARGBCOLSUP2_MMI -#define HAS_SCALEARGBROWDOWN2_MMI -#define HAS_SCALEARGBROWDOWNEVEN_MMI -#define HAS_SCALECOLS_16_MMI -#define HAS_SCALECOLS_MMI -#define HAS_SCALEROWDOWN2_16_MMI -#define HAS_SCALEROWDOWN2_MMI -#define HAS_SCALEROWDOWN4_16_MMI -#define HAS_SCALEROWDOWN4_MMI -#define HAS_SCALEROWDOWN34_MMI +#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) +#define HAS_SCALEARGBROWDOWN2_LSX +#define HAS_SCALEARGBROWDOWNEVEN_LSX +#define HAS_SCALEROWDOWN2_LSX +#define HAS_SCALEROWDOWN4_LSX +#define HAS_SCALEROWDOWN38_LSX +#define HAS_SCALEFILTERCOLS_LSX +#define HAS_SCALEADDROW_LSX +#define HAS_SCALEARGBCOLS_LSX +#define HAS_SCALEARGBFILTERCOLS_LSX +#define HAS_SCALEROWDOWN34_LSX #endif // Scale ARGB vertically with bilinear interpolation. @@ -205,6 +200,20 @@ void ScalePlaneVertical_16(int src_height, int wpp, enum FilterMode filtering); +void ScalePlaneVertical_16To8(int src_height, + int dst_width, + int dst_height, + int src_stride, + int dst_stride, + const uint16_t* src_argb, + uint8_t* dst_argb, + int x, + int y, + int dy, + int wpp, + int scale, + enum FilterMode filtering); + // Simplify the filtering based on scale factors. enum FilterMode ScaleFilterReduce(int src_width, int src_height, @@ -683,11 +692,11 @@ void ScaleRowUp2_Bilinear_12_Any_SSSE3(const uint16_t* src_ptr, void ScaleRowUp2_Linear_16_Any_SSE2(const uint16_t* src_ptr, uint16_t* dst_ptr, int dst_width); -void ScaleRowUp2_Bilinear_16_Any_SSSE3(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst_ptr, - ptrdiff_t dst_stride, - int dst_width); +void ScaleRowUp2_Bilinear_16_Any_SSE2(const uint16_t* src_ptr, + ptrdiff_t src_stride, + uint16_t* dst_ptr, + ptrdiff_t dst_stride, + int dst_width); void ScaleRowUp2_Linear_Any_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int dst_width); @@ -871,16 +880,6 @@ void ScaleARGBCols_Any_MSA(uint8_t* dst_ptr, int dst_width, int x, int dx); -void ScaleARGBCols_MMI(uint8_t* dst_argb, - const uint8_t* src_argb, - int dst_width, - int x, - int dx); -void ScaleARGBCols_Any_MMI(uint8_t* dst_ptr, - const uint8_t* src_ptr, - int dst_width, - int x, - int dx); // ARGB Row functions void ScaleARGBRowDown2_SSE2(const uint8_t* src_argb, @@ -919,15 +918,15 @@ void ScaleARGBRowDown2Box_MSA(const uint8_t* src_argb, ptrdiff_t src_stride, uint8_t* dst_argb, int dst_width); -void ScaleARGBRowDown2_MMI(const uint8_t* src_argb, +void ScaleARGBRowDown2_LSX(const uint8_t* src_argb, ptrdiff_t src_stride, uint8_t* dst_argb, int dst_width); -void ScaleARGBRowDown2Linear_MMI(const uint8_t* src_argb, +void ScaleARGBRowDown2Linear_LSX(const uint8_t* src_argb, ptrdiff_t src_stride, uint8_t* dst_argb, int dst_width); -void ScaleARGBRowDown2Box_MMI(const uint8_t* src_argb, +void ScaleARGBRowDown2Box_LSX(const uint8_t* src_argb, ptrdiff_t src_stride, uint8_t* dst_argb, int dst_width); @@ -967,15 +966,15 @@ void ScaleARGBRowDown2Box_Any_MSA(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleARGBRowDown2_Any_MMI(const uint8_t* src_ptr, +void ScaleARGBRowDown2_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleARGBRowDown2Linear_Any_MMI(const uint8_t* src_ptr, +void ScaleARGBRowDown2Linear_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleARGBRowDown2Box_Any_MMI(const uint8_t* src_ptr, +void ScaleARGBRowDown2Box_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); @@ -1009,12 +1008,12 @@ void ScaleARGBRowDownEvenBox_MSA(const uint8_t* src_argb, int src_stepx, uint8_t* dst_argb, int dst_width); -void ScaleARGBRowDownEven_MMI(const uint8_t* src_argb, +void ScaleARGBRowDownEven_LSX(const uint8_t* src_argb, ptrdiff_t src_stride, int32_t src_stepx, uint8_t* dst_argb, int dst_width); -void ScaleARGBRowDownEvenBox_MMI(const uint8_t* src_argb, +void ScaleARGBRowDownEvenBox_LSX(const uint8_t* src_argb, ptrdiff_t src_stride, int src_stepx, uint8_t* dst_argb, @@ -1049,12 +1048,12 @@ void ScaleARGBRowDownEvenBox_Any_MSA(const uint8_t* src_ptr, int src_stepx, uint8_t* dst_ptr, int dst_width); -void ScaleARGBRowDownEven_Any_MMI(const uint8_t* src_ptr, +void ScaleARGBRowDownEven_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, int32_t src_stepx, uint8_t* dst_ptr, int dst_width); -void ScaleARGBRowDownEvenBox_Any_MMI(const uint8_t* src_ptr, +void ScaleARGBRowDownEvenBox_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, int src_stepx, uint8_t* dst_ptr, @@ -1101,18 +1100,6 @@ void ScaleUVRowDown2Box_MSA(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_uv, int dst_width); -void ScaleUVRowDown2_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst_uv, - int dst_width); -void ScaleUVRowDown2Linear_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst_uv, - int dst_width); -void ScaleUVRowDown2Box_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst_uv, - int dst_width); void ScaleUVRowDown2_Any_SSSE3(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, @@ -1153,18 +1140,6 @@ void ScaleUVRowDown2Box_Any_MSA(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleUVRowDown2_Any_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst_ptr, - int dst_width); -void ScaleUVRowDown2Linear_Any_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst_ptr, - int dst_width); -void ScaleUVRowDown2Box_Any_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst_ptr, - int dst_width); void ScaleUVRowDownEven_SSSE3(const uint8_t* src_ptr, ptrdiff_t src_stride, int src_stepx, @@ -1195,16 +1170,6 @@ void ScaleUVRowDownEvenBox_MSA(const uint8_t* src_ptr, int src_stepx, uint8_t* dst_uv, int dst_width); -void ScaleUVRowDownEven_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - int32_t src_stepx, - uint8_t* dst_uv, - int dst_width); -void ScaleUVRowDownEvenBox_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - int src_stepx, - uint8_t* dst_uv, - int dst_width); void ScaleUVRowDownEven_Any_SSSE3(const uint8_t* src_ptr, ptrdiff_t src_stride, int src_stepx, @@ -1235,16 +1200,6 @@ void ScaleUVRowDownEvenBox_Any_MSA(const uint8_t* src_ptr, int src_stepx, uint8_t* dst_ptr, int dst_width); -void ScaleUVRowDownEven_Any_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - int32_t src_stepx, - uint8_t* dst_ptr, - int dst_width); -void ScaleUVRowDownEvenBox_Any_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - int src_stepx, - uint8_t* dst_ptr, - int dst_width); void ScaleUVRowUp2_Linear_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, @@ -1294,22 +1249,22 @@ void ScaleUVRowUp2_Bilinear_Any_NEON(const uint8_t* src_ptr, uint8_t* dst_ptr, ptrdiff_t dst_stride, int dst_width); -void ScaleUVRowUp2_Linear_16_SSE2(const uint16_t* src_ptr, - uint16_t* dst_ptr, - int dst_width); -void ScaleUVRowUp2_Bilinear_16_SSE2(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst_ptr, - ptrdiff_t dst_stride, - int dst_width); -void ScaleUVRowUp2_Linear_16_Any_SSE2(const uint16_t* src_ptr, - uint16_t* dst_ptr, - int dst_width); -void ScaleUVRowUp2_Bilinear_16_Any_SSE2(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst_ptr, - ptrdiff_t dst_stride, - int dst_width); +void ScaleUVRowUp2_Linear_16_SSE41(const uint16_t* src_ptr, + uint16_t* dst_ptr, + int dst_width); +void ScaleUVRowUp2_Bilinear_16_SSE41(const uint16_t* src_ptr, + ptrdiff_t src_stride, + uint16_t* dst_ptr, + ptrdiff_t dst_stride, + int dst_width); +void ScaleUVRowUp2_Linear_16_Any_SSE41(const uint16_t* src_ptr, + uint16_t* dst_ptr, + int dst_width); +void ScaleUVRowUp2_Bilinear_16_Any_SSE41(const uint16_t* src_ptr, + ptrdiff_t src_stride, + uint16_t* dst_ptr, + ptrdiff_t dst_stride, + int dst_width); void ScaleUVRowUp2_Linear_16_AVX2(const uint16_t* src_ptr, uint16_t* dst_ptr, int dst_width); @@ -1561,10 +1516,6 @@ void ScaleRowDown34_MSA(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, int dst_width); -void ScaleRowDown34_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst, - int dst_width); void ScaleRowDown34_0_Box_MSA(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* d, @@ -1618,10 +1569,6 @@ void ScaleRowDown34_Any_MSA(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleRowDown34_Any_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst_ptr, - int dst_width); void ScaleRowDown34_0_Box_Any_MSA(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, @@ -1631,93 +1578,129 @@ void ScaleRowDown34_1_Box_Any_MSA(const uint8_t* src_ptr, uint8_t* dst_ptr, int dst_width); -void ScaleRowDown2_MMI(const uint8_t* src_ptr, +void ScaleRowDown2_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, int dst_width); -void ScaleRowDown2_16_MMI(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst, - int dst_width); -void ScaleRowDown2Linear_MMI(const uint8_t* src_ptr, +void ScaleRowDown2Linear_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, int dst_width); -void ScaleRowDown2Linear_16_MMI(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst, - int dst_width); -void ScaleRowDown2Box_MMI(const uint8_t* src_ptr, +void ScaleRowDown2Box_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, int dst_width); -void ScaleRowDown2Box_16_MMI(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst, - int dst_width); -void ScaleRowDown2Box_Odd_MMI(const uint8_t* src_ptr, - ptrdiff_t src_stride, - uint8_t* dst, - int dst_width); -void ScaleRowDown4_MMI(const uint8_t* src_ptr, +void ScaleRowDown4_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, int dst_width); -void ScaleRowDown4_16_MMI(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst, - int dst_width); -void ScaleRowDown4Box_MMI(const uint8_t* src_ptr, +void ScaleRowDown4Box_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst, int dst_width); -void ScaleRowDown4Box_16_MMI(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst, - int dst_width); -void ScaleAddRow_MMI(const uint8_t* src_ptr, uint16_t* dst_ptr, int src_width); -void ScaleAddRow_16_MMI(const uint16_t* src_ptr, - uint32_t* dst_ptr, - int src_width); -void ScaleColsUp2_MMI(uint8_t* dst_ptr, - const uint8_t* src_ptr, - int dst_width, - int x, - int dx); -void ScaleColsUp2_16_MMI(uint16_t* dst_ptr, - const uint16_t* src_ptr, +void ScaleRowDown38_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width); +void ScaleRowDown38_2_Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width); +void ScaleRowDown38_3_Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width); +void ScaleAddRow_LSX(const uint8_t* src_ptr, uint16_t* dst_ptr, int src_width); +void ScaleFilterCols_LSX(uint8_t* dst_ptr, + const uint8_t* src_ptr, int dst_width, int x, int dx); -void ScaleARGBColsUp2_MMI(uint8_t* dst_argb, - const uint8_t* src_argb, - int dst_width, - int x, - int dx); - -void ScaleRowDown2_Any_MMI(const uint8_t* src_ptr, +void ScaleARGBFilterCols_LSX(uint8_t* dst_argb, + const uint8_t* src_argb, + int dst_width, + int x, + int dx); +void ScaleARGBCols_LSX(uint8_t* dst_argb, + const uint8_t* src_argb, + int dst_width, + int x, + int dx); +void ScaleRowDown34_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width); +void ScaleRowDown34_0_Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* d, + int dst_width); +void ScaleRowDown34_1_Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* d, + int dst_width); +void ScaleRowDown2_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleRowDown2Linear_Any_MMI(const uint8_t* src_ptr, +void ScaleRowDown2Linear_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleRowDown2Box_Any_MMI(const uint8_t* src_ptr, +void ScaleRowDown2Box_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleRowDown4_Any_MMI(const uint8_t* src_ptr, +void ScaleRowDown4_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleRowDown4Box_Any_MMI(const uint8_t* src_ptr, +void ScaleRowDown4Box_Any_LSX(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width); -void ScaleAddRow_Any_MMI(const uint8_t* src_ptr, +void ScaleRowDown38_Any_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width); +void ScaleRowDown38_2_Box_Any_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width); +void ScaleRowDown38_3_Box_Any_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width); +void ScaleAddRow_Any_LSX(const uint8_t* src_ptr, uint16_t* dst_ptr, int src_width); +void ScaleFilterCols_Any_LSX(uint8_t* dst_ptr, + const uint8_t* src_ptr, + int dst_width, + int x, + int dx); +void ScaleARGBCols_Any_LSX(uint8_t* dst_ptr, + const uint8_t* src_ptr, + int dst_width, + int x, + int dx); +void ScaleARGBFilterCols_Any_LSX(uint8_t* dst_ptr, + const uint8_t* src_ptr, + int dst_width, + int x, + int dx); +void ScaleRowDown34_Any_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width); +void ScaleRowDown34_0_Box_Any_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width); +void ScaleRowDown34_1_Box_Any_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width); + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/include/libyuv/version.h b/third-party/libyuv/third_party/libyuv/include/libyuv/version.h index 8b06777fcb..ca1dc13735 100644 --- a/third-party/libyuv/third_party/libyuv/include/libyuv/version.h +++ b/third-party/libyuv/third_party/libyuv/include/libyuv/version.h @@ -11,6 +11,6 @@ #ifndef INCLUDE_LIBYUV_VERSION_H_ #define INCLUDE_LIBYUV_VERSION_H_ -#define LIBYUV_VERSION 1789 +#define LIBYUV_VERSION 1850 #endif // INCLUDE_LIBYUV_VERSION_H_ diff --git a/third-party/libyuv/third_party/libyuv/infra/config/OWNERS b/third-party/libyuv/third_party/libyuv/infra/config/OWNERS new file mode 100644 index 0000000000..2c4f90a03c --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/OWNERS @@ -0,0 +1,3 @@ +fbarchard@chromium.org +mbonadei@chromium.org +jansson@google.com diff --git a/third-party/libyuv/third_party/libyuv/infra/config/PRESUBMIT.py b/third-party/libyuv/third_party/libyuv/infra/config/PRESUBMIT.py new file mode 100644 index 0000000000..f79e08ad6a --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/PRESUBMIT.py @@ -0,0 +1,13 @@ +# Copyright 2018 The PDFium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +USE_PYTHON3 = True + + +def CheckChangeOnUpload(input_api, output_api): + return input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api) + + +def CheckChangeOnCommit(input_api, output_api): + return input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api) diff --git a/third-party/libyuv/third_party/libyuv/infra/config/README.md b/third-party/libyuv/third_party/libyuv/infra/config/README.md new file mode 100644 index 0000000000..e5e3b5f818 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/README.md @@ -0,0 +1,2 @@ +This folder contains libyuv project-wide configurations +for chrome-infra services. diff --git a/third-party/libyuv/third_party/libyuv/infra/config/codereview.settings b/third-party/libyuv/third_party/libyuv/infra/config/codereview.settings new file mode 100644 index 0000000000..6d74227357 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/codereview.settings @@ -0,0 +1,6 @@ +# This file is used by gcl and git-cl to get repository specific information. +CODE_REVIEW_SERVER: codereview.chromium.org +PROJECT: libyuv +GERRIT_HOST: True +VIEW_VC: https://chromium.googlesource.com/libyuv/libyuv/+/ + diff --git a/third-party/libyuv/third_party/libyuv/infra/config/commit-queue.cfg b/third-party/libyuv/third_party/libyuv/infra/config/commit-queue.cfg new file mode 100644 index 0000000000..4a8d77f41d --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/commit-queue.cfg @@ -0,0 +1,143 @@ +# Auto-generated by lucicfg. +# Do not modify manually. +# +# For the schema of this file, see Config message: +# https://luci-config.appspot.com/schemas/projects:commit-queue.cfg + +cq_status_host: "chromium-cq-status.appspot.com" +submit_options { + max_burst: 4 + burst_delay { + seconds: 480 + } +} +config_groups { + name: "config" + gerrit { + url: "https://chromium-review.googlesource.com" + projects { + name: "libyuv/libyuv" + ref_regexp: "refs/heads/infra/config" + } + } + verifiers { + gerrit_cq_ability { + committer_list: "project-libyuv-committers" + dry_run_access_list: "project-libyuv-tryjob-access" + } + tryjob { + builders { + name: "libyuv/try/presubmit" + } + retry_config { + single_quota: 1 + global_quota: 2 + failure_weight: 1 + transient_failure_weight: 1 + timeout_weight: 2 + } + } + } +} +config_groups { + name: "master" + gerrit { + url: "https://chromium-review.googlesource.com" + projects { + name: "libyuv/libyuv" + ref_regexp: "refs/heads/main" + ref_regexp: "refs/heads/master" + } + } + verifiers { + gerrit_cq_ability { + committer_list: "project-libyuv-committers" + dry_run_access_list: "project-libyuv-tryjob-access" + } + tryjob { + builders { + name: "libyuv/try/android" + experiment_percentage: 100 + } + builders { + name: "libyuv/try/android_arm64" + experiment_percentage: 100 + } + builders { + name: "libyuv/try/android_rel" + experiment_percentage: 100 + } + builders { + name: "libyuv/try/android_x64" + } + builders { + name: "libyuv/try/android_x86" + } + builders { + name: "libyuv/try/ios_arm64" + } + builders { + name: "libyuv/try/ios_arm64_rel" + } + builders { + name: "libyuv/try/linux" + } + builders { + name: "libyuv/try/linux_asan" + } + builders { + name: "libyuv/try/linux_gcc" + experiment_percentage: 100 + } + builders { + name: "libyuv/try/linux_msan" + } + builders { + name: "libyuv/try/linux_rel" + } + builders { + name: "libyuv/try/linux_tsan2" + } + builders { + name: "libyuv/try/linux_ubsan" + } + builders { + name: "libyuv/try/linux_ubsan_vptr" + } + builders { + name: "libyuv/try/mac" + } + builders { + name: "libyuv/try/mac_asan" + } + builders { + name: "libyuv/try/mac_rel" + } + builders { + name: "libyuv/try/win" + } + builders { + name: "libyuv/try/win_clang" + } + builders { + name: "libyuv/try/win_clang_rel" + } + builders { + name: "libyuv/try/win_rel" + } + builders { + name: "libyuv/try/win_x64_clang_rel" + } + builders { + name: "libyuv/try/win_x64_rel" + } + retry_config { + single_quota: 1 + global_quota: 2 + failure_weight: 1 + transient_failure_weight: 1 + timeout_weight: 2 + } + } + } +} diff --git a/third-party/libyuv/third_party/libyuv/infra/config/cr-buildbucket.cfg b/third-party/libyuv/third_party/libyuv/infra/config/cr-buildbucket.cfg new file mode 100644 index 0000000000..50ea625a94 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/cr-buildbucket.cfg @@ -0,0 +1,1963 @@ +# Auto-generated by lucicfg. +# Do not modify manually. +# +# For the schema of this file, see BuildbucketCfg message: +# https://luci-config.appspot.com/schemas/projects:buildbucket.cfg + +buckets { + name: "ci" + acls { + role: WRITER + group: "project-libyuv-admins" + } + acls { + group: "all" + } + swarming { + builders { + name: "Android ARM64 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Android Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Android Release" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Android Tester ARM32 Debug (Nexus 5X)" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "device_type:bullhead" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Android Tester ARM32 Release (Nexus 5X)" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "device_type:bullhead" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Android Tester ARM64 Debug (Nexus 5X)" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "device_type:bullhead" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Android32 x86 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Android64 x64 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux Asan" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux MSan" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux Tsan v2" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux UBSan" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux UBSan vptr" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux32 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux32 Release" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux64 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Linux64 Release" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Mac Asan" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Mac64 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Mac64 Release" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Win32 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Win32 Debug (Clang)" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Win32 Release" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Win32 Release (Clang)" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Win64 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Win64 Debug (Clang)" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Win64 Release" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "Win64 Release (Clang)" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "iOS ARM64 Debug" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "iOS ARM64 Release" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.ci" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-trusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "client.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + } +} +buckets { + name: "cron" + acls { + role: WRITER + group: "project-libyuv-admins" + } + acls { + group: "all" + } + swarming { + builders { + name: "DEPS Autoroller" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Linux" + dimensions: "pool:luci.webrtc.cron" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "recipe": "libyuv/roll_deps"' + '}' + execution_timeout_secs: 7200 + build_numbers: YES + service_account: "libyuv-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + } +} +buckets { + name: "try" + acls { + role: WRITER + group: "project-libyuv-admins" + } + acls { + group: "all" + } + acls { + role: SCHEDULER + group: "project-libyuv-tryjob-access" + } + acls { + role: SCHEDULER + group: "service-account-cq" + } + swarming { + builders { + name: "android" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "device_type:bullhead" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "android_arm64" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "device_type:bullhead" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "android_rel" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "device_type:bullhead" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "android_x64" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "android_x86" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "ios_arm64" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "ios_arm64_rel" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "linux" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "linux_asan" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "linux_gcc" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "linux_msan" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "linux_rel" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "linux_tsan2" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "linux_ubsan" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "linux_ubsan_vptr" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "mac" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "mac_asan" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "mac_rel" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cpu:x86-64" + dimensions: "os:Mac-10.15" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "presubmit" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Ubuntu-18.04" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": true,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "run_presubmit",' + ' "repo_name": "libyuv",' + ' "runhooks": true' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "win" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": false,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "win_clang" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": false,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "win_clang_rel" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": false,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "win_rel" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": false,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "win_x64_clang_rel" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": false,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + builders { + name: "win_x64_rel" + swarming_host: "chromium-swarm.appspot.com" + swarming_tags: "vpython:native-python-wrapper" + dimensions: "cores:8" + dimensions: "cpu:x86-64" + dimensions: "os:Windows-10" + dimensions: "pool:luci.flex.try" + exe { + cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build" + cipd_version: "refs/heads/main" + cmd: "luciexe" + } + properties: + '{' + ' "$build/goma": {' + ' "enable_ats": false,' + ' "server_host": "goma.chromium.org",' + ' "use_luci_auth": true' + ' },' + ' "$build/reclient": {' + ' "instance": "rbe-webrtc-untrusted",' + ' "metrics_project": "chromium-reclient-metrics"' + ' },' + ' "builder_group": "tryserver.libyuv",' + ' "recipe": "libyuv/libyuv"' + '}' + execution_timeout_secs: 10800 + build_numbers: YES + service_account: "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + experiments { + key: "luci.recipes.use_python3" + value: 100 + } + } + } +} diff --git a/third-party/libyuv/third_party/libyuv/infra/config/luci-logdog.cfg b/third-party/libyuv/third_party/libyuv/infra/config/luci-logdog.cfg new file mode 100644 index 0000000000..adc75bef49 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/luci-logdog.cfg @@ -0,0 +1,9 @@ +# Auto-generated by lucicfg. +# Do not modify manually. +# +# For the schema of this file, see ProjectConfig message: +# https://luci-config.appspot.com/schemas/projects:luci-logdog.cfg + +reader_auth_groups: "all" +writer_auth_groups: "luci-logdog-chromium-writers" +archive_gs_bucket: "chromium-luci-logdog" diff --git a/third-party/libyuv/third_party/libyuv/infra/config/luci-milo.cfg b/third-party/libyuv/third_party/libyuv/infra/config/luci-milo.cfg new file mode 100644 index 0000000000..baf786f21e --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/luci-milo.cfg @@ -0,0 +1,246 @@ +# Auto-generated by lucicfg. +# Do not modify manually. +# +# For the schema of this file, see Project message: +# https://luci-config.appspot.com/schemas/projects:luci-milo.cfg + +consoles { + id: "main" + name: "libyuv Main Console" + repo_url: "https://chromium.googlesource.com/libyuv/libyuv" + refs: "regexp:refs/heads/main" + manifest_name: "REVISION" + builders { + name: "buildbucket/luci.libyuv.ci/Android ARM64 Debug" + category: "Android|Builder" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Android Debug" + category: "Android|Builder" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Android Release" + category: "Android|Builder" + short_name: "rel" + } + builders { + name: "buildbucket/luci.libyuv.ci/Android32 x86 Debug" + category: "Android|Builder|x86" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Android64 x64 Debug" + category: "Android|Builder|x64" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Android Tester ARM32 Debug (Nexus 5X)" + category: "Android|Tester|ARM 32" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Android Tester ARM32 Release (Nexus 5X)" + category: "Android|Tester|ARM 32" + short_name: "rel" + } + builders { + name: "buildbucket/luci.libyuv.ci/Android Tester ARM64 Debug (Nexus 5X)" + category: "Android|Tester|ARM 64" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux Asan" + category: "Linux" + short_name: "asan" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux MSan" + category: "Linux" + short_name: "msan" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux Tsan v2" + category: "Linux" + short_name: "tsan" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux UBSan" + category: "Linux|UBSan" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux UBSan vptr" + category: "Linux|UBSan" + short_name: "vptr" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux32 Debug" + category: "Linux|32" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux32 Release" + category: "Linux|32" + short_name: "rel" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux64 Debug" + category: "Linux|64" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Linux64 Release" + category: "Linux|64" + short_name: "rel" + } + builders { + name: "buildbucket/luci.libyuv.ci/Mac Asan" + category: "Mac" + short_name: "asan" + } + builders { + name: "buildbucket/luci.libyuv.ci/Mac64 Debug" + category: "Mac" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Mac64 Release" + category: "Mac" + short_name: "rel" + } + builders { + name: "buildbucket/luci.libyuv.ci/Win32 Debug" + category: "Win|32|Debug" + } + builders { + name: "buildbucket/luci.libyuv.ci/Win32 Debug (Clang)" + category: "Win|32|Debug" + short_name: "clg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Win32 Release" + category: "Win|32|Release" + } + builders { + name: "buildbucket/luci.libyuv.ci/Win32 Release (Clang)" + category: "Win|32|Release" + short_name: "clg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Win64 Debug" + category: "Win|64|Debug" + short_name: "clg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Win64 Debug (Clang)" + category: "Win|64|Debug" + short_name: "clg" + } + builders { + name: "buildbucket/luci.libyuv.ci/Win64 Release" + category: "Win|64|Release" + } + builders { + name: "buildbucket/luci.libyuv.ci/Win64 Release (Clang)" + category: "Win|64|Release" + short_name: "clg" + } + builders { + name: "buildbucket/luci.libyuv.ci/iOS ARM64 Debug" + category: "iOS|ARM64" + short_name: "dbg" + } + builders { + name: "buildbucket/luci.libyuv.ci/iOS ARM64 Release" + category: "iOS|ARM64" + short_name: "rel" + } + include_experimental_builds: true +} +consoles { + id: "cron" + name: "Cron" + builders { + name: "buildbucket/luci.libyuv.cron/DEPS Autoroller" + } + builder_view_only: true +} +consoles { + id: "try" + name: "libyuv Try Builders" + builders { + name: "buildbucket/luci.libyuv.try/android" + } + builders { + name: "buildbucket/luci.libyuv.try/android_arm64" + } + builders { + name: "buildbucket/luci.libyuv.try/android_rel" + } + builders { + name: "buildbucket/luci.libyuv.try/android_x64" + } + builders { + name: "buildbucket/luci.libyuv.try/android_x86" + } + builders { + name: "buildbucket/luci.libyuv.try/ios_arm64" + } + builders { + name: "buildbucket/luci.libyuv.try/ios_arm64_rel" + } + builders { + name: "buildbucket/luci.libyuv.try/linux" + } + builders { + name: "buildbucket/luci.libyuv.try/linux_asan" + } + builders { + name: "buildbucket/luci.libyuv.try/linux_gcc" + } + builders { + name: "buildbucket/luci.libyuv.try/linux_msan" + } + builders { + name: "buildbucket/luci.libyuv.try/linux_rel" + } + builders { + name: "buildbucket/luci.libyuv.try/linux_tsan2" + } + builders { + name: "buildbucket/luci.libyuv.try/linux_ubsan" + } + builders { + name: "buildbucket/luci.libyuv.try/linux_ubsan_vptr" + } + builders { + name: "buildbucket/luci.libyuv.try/mac" + } + builders { + name: "buildbucket/luci.libyuv.try/mac_asan" + } + builders { + name: "buildbucket/luci.libyuv.try/mac_rel" + } + builders { + name: "buildbucket/luci.libyuv.try/win" + } + builders { + name: "buildbucket/luci.libyuv.try/win_clang" + } + builders { + name: "buildbucket/luci.libyuv.try/win_clang_rel" + } + builders { + name: "buildbucket/luci.libyuv.try/win_rel" + } + builders { + name: "buildbucket/luci.libyuv.try/win_x64_clang_rel" + } + builders { + name: "buildbucket/luci.libyuv.try/win_x64_rel" + } + builder_view_only: true +} +logo_url: "https://storage.googleapis.com/chrome-infra-public/logo/libyuv-logo.png" diff --git a/third-party/libyuv/third_party/libyuv/infra/config/luci-scheduler.cfg b/third-party/libyuv/third_party/libyuv/infra/config/luci-scheduler.cfg new file mode 100644 index 0000000000..0ec5dd0e52 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/luci-scheduler.cfg @@ -0,0 +1,385 @@ +# Auto-generated by lucicfg. +# Do not modify manually. +# +# For the schema of this file, see ProjectConfig message: +# https://luci-config.appspot.com/schemas/projects:luci-scheduler.cfg + +job { + id: "Android ARM64 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Android ARM64 Debug" + } +} +job { + id: "Android Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Android Debug" + } +} +job { + id: "Android Release" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Android Release" + } +} +job { + id: "Android Tester ARM32 Debug (Nexus 5X)" + realm: "ci" + acls { + role: TRIGGERER + granted_to: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + } + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Android Tester ARM32 Debug (Nexus 5X)" + } +} +job { + id: "Android Tester ARM32 Release (Nexus 5X)" + realm: "ci" + acls { + role: TRIGGERER + granted_to: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + } + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Android Tester ARM32 Release (Nexus 5X)" + } +} +job { + id: "Android Tester ARM64 Debug (Nexus 5X)" + realm: "ci" + acls { + role: TRIGGERER + granted_to: "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + } + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Android Tester ARM64 Debug (Nexus 5X)" + } +} +job { + id: "Android32 x86 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Android32 x86 Debug" + } +} +job { + id: "Android64 x64 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Android64 x64 Debug" + } +} +job { + id: "DEPS Autoroller" + realm: "cron" + schedule: "0 14 * * *" + acl_sets: "cron" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "cron" + builder: "DEPS Autoroller" + } +} +job { + id: "Linux Asan" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux Asan" + } +} +job { + id: "Linux MSan" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux MSan" + } +} +job { + id: "Linux Tsan v2" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux Tsan v2" + } +} +job { + id: "Linux UBSan" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux UBSan" + } +} +job { + id: "Linux UBSan vptr" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux UBSan vptr" + } +} +job { + id: "Linux32 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux32 Debug" + } +} +job { + id: "Linux32 Release" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux32 Release" + } +} +job { + id: "Linux64 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux64 Debug" + } +} +job { + id: "Linux64 Release" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Linux64 Release" + } +} +job { + id: "Mac Asan" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Mac Asan" + } +} +job { + id: "Mac64 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Mac64 Debug" + } +} +job { + id: "Mac64 Release" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Mac64 Release" + } +} +job { + id: "Win32 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Win32 Debug" + } +} +job { + id: "Win32 Debug (Clang)" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Win32 Debug (Clang)" + } +} +job { + id: "Win32 Release" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Win32 Release" + } +} +job { + id: "Win32 Release (Clang)" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Win32 Release (Clang)" + } +} +job { + id: "Win64 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Win64 Debug" + } +} +job { + id: "Win64 Debug (Clang)" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Win64 Debug (Clang)" + } +} +job { + id: "Win64 Release" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Win64 Release" + } +} +job { + id: "Win64 Release (Clang)" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "Win64 Release (Clang)" + } +} +job { + id: "iOS ARM64 Debug" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "iOS ARM64 Debug" + } +} +job { + id: "iOS ARM64 Release" + realm: "ci" + acl_sets: "ci" + buildbucket { + server: "cr-buildbucket.appspot.com" + bucket: "ci" + builder: "iOS ARM64 Release" + } +} +trigger { + id: "master-gitiles-trigger" + realm: "ci" + acl_sets: "ci" + triggers: "Android ARM64 Debug" + triggers: "Android Debug" + triggers: "Android Release" + triggers: "Android32 x86 Debug" + triggers: "Android64 x64 Debug" + triggers: "Linux Asan" + triggers: "Linux MSan" + triggers: "Linux Tsan v2" + triggers: "Linux UBSan" + triggers: "Linux UBSan vptr" + triggers: "Linux32 Debug" + triggers: "Linux32 Release" + triggers: "Linux64 Debug" + triggers: "Linux64 Release" + triggers: "Mac Asan" + triggers: "Mac64 Debug" + triggers: "Mac64 Release" + triggers: "Win32 Debug" + triggers: "Win32 Debug (Clang)" + triggers: "Win32 Release" + triggers: "Win32 Release (Clang)" + triggers: "Win64 Debug" + triggers: "Win64 Debug (Clang)" + triggers: "Win64 Release" + triggers: "Win64 Release (Clang)" + triggers: "iOS ARM64 Debug" + triggers: "iOS ARM64 Release" + gitiles { + repo: "https://chromium.googlesource.com/libyuv/libyuv" + refs: "regexp:refs/heads/main" + } +} +acl_sets { + name: "ci" + acls { + role: OWNER + granted_to: "group:project-libyuv-admins" + } + acls { + granted_to: "group:all" + } +} +acl_sets { + name: "cron" + acls { + role: OWNER + granted_to: "group:project-libyuv-admins" + } + acls { + granted_to: "group:all" + } +} diff --git a/third-party/libyuv/third_party/libyuv/infra/config/main.star b/third-party/libyuv/third_party/libyuv/infra/config/main.star new file mode 100755 index 0000000000..b722b114b4 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/main.star @@ -0,0 +1,377 @@ +#!/usr/bin/env lucicfg +# https://chromium.googlesource.com/infra/luci/luci-go/+/master/lucicfg/doc/ + +"""LUCI project configuration for libyuv CQ and CI.""" + +lucicfg.check_version("1.30.9") + +LIBYUV_GIT = "https://chromium.googlesource.com/libyuv/libyuv" +LIBYUV_GERRIT = "https://chromium-review.googlesource.com/libyuv/libyuv" + +GOMA_BACKEND_RBE_PROD = { + "server_host": "goma.chromium.org", + "use_luci_auth": True, +} + +GOMA_BACKEND_RBE_ATS_PROD = { + "server_host": "goma.chromium.org", + "use_luci_auth": True, + "enable_ats": True, +} + +# Disable ATS on Windows CQ/try. +GOMA_BACKEND_RBE_NO_ATS_PROD = { + "server_host": "goma.chromium.org", + "use_luci_auth": True, + "enable_ats": False, +} + +RECLIENT_CI = { + "instance": "rbe-webrtc-trusted", + "metrics_project": "chromium-reclient-metrics", +} + +RECLIENT_CQ = { + "instance": "rbe-webrtc-untrusted", + "metrics_project": "chromium-reclient-metrics", +} + +# Use LUCI Scheduler BBv2 names and add Scheduler realms configs. +lucicfg.enable_experiment("crbug.com/1182002") + +luci.builder.defaults.experiments.set( + { + "luci.recipes.use_python3": 100, + }, +) + +lucicfg.config( + lint_checks = ["default"], + config_dir = ".", + tracked_files = [ + "commit-queue.cfg", + "cr-buildbucket.cfg", + "luci-logdog.cfg", + "luci-milo.cfg", + "luci-scheduler.cfg", + "project.cfg", + "realms.cfg", + ], +) + +# Generates project.cfg + +luci.project( + name = "libyuv", + buildbucket = "cr-buildbucket.appspot.com", + logdog = "luci-logdog.appspot.com", + milo = "luci-milo.appspot.com", + notify = "luci-notify.appspot.com", + scheduler = "luci-scheduler.appspot.com", + swarming = "chromium-swarm.appspot.com", + acls = [ + acl.entry(acl.PROJECT_CONFIGS_READER, groups = ["all"]), + acl.entry(acl.LOGDOG_READER, groups = ["all"]), + acl.entry(acl.LOGDOG_WRITER, groups = ["luci-logdog-chromium-writers"]), + acl.entry(acl.SCHEDULER_READER, groups = ["all"]), + acl.entry(acl.SCHEDULER_OWNER, groups = ["project-libyuv-admins"]), + acl.entry(acl.BUILDBUCKET_READER, groups = ["all"]), + acl.entry(acl.BUILDBUCKET_OWNER, groups = ["project-libyuv-admins"]), + ], + bindings = [ + luci.binding( + roles = "role/swarming.taskTriggerer", # for LED tasks. + groups = "project-libyuv-admins", + ), + luci.binding( + roles = "role/configs.validator", + users = "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com", + ), + ], +) + +# Generates luci-logdog.cfg + +luci.logdog( + gs_bucket = "chromium-luci-logdog", +) + +# Generates luci-scheduler.cfg + +luci.gitiles_poller( + name = "master-gitiles-trigger", + bucket = "ci", + repo = LIBYUV_GIT, +) + +# Generates luci-milo.cfg + +luci.milo( + logo = "https://storage.googleapis.com/chrome-infra-public/logo/libyuv-logo.png", +) + +def libyuv_ci_view(name, category, short_name): + return luci.console_view_entry( + console_view = "main", + builder = name, + category = category, + short_name = short_name, + ) + +def libyuv_try_view(name): + return luci.list_view_entry( + list_view = "try", + builder = name, + ) + +luci.console_view( + name = "main", + title = "libyuv Main Console", + include_experimental_builds = True, + repo = LIBYUV_GIT, +) + +luci.list_view( + name = "cron", + title = "Cron", + entries = ["DEPS Autoroller"], +) + +luci.list_view( + name = "try", + title = "libyuv Try Builders", +) + +# Generates commit-queue.cfg + +def libyuv_try_job_verifier(name, cq_group, experiment_percentage): + return luci.cq_tryjob_verifier( + builder = name, + cq_group = cq_group, + experiment_percentage = experiment_percentage, + ) + +luci.cq( + status_host = "chromium-cq-status.appspot.com", + submit_max_burst = 4, + submit_burst_delay = 8 * time.minute, +) + +luci.cq_group( + name = "master", + watch = [ + cq.refset( + repo = LIBYUV_GERRIT, + refs = ["refs/heads/main", "refs/heads/master"], + ), + ], + acls = [ + acl.entry(acl.CQ_COMMITTER, groups = ["project-libyuv-committers"]), + acl.entry(acl.CQ_DRY_RUNNER, groups = ["project-libyuv-tryjob-access"]), + ], + retry_config = cq.RETRY_ALL_FAILURES, + cancel_stale_tryjobs = True, +) + +luci.cq_group( + name = "config", + watch = [ + cq.refset( + repo = LIBYUV_GERRIT, + refs = ["refs/heads/infra/config"], + ), + ], + acls = [ + acl.entry(acl.CQ_COMMITTER, groups = ["project-libyuv-committers"]), + acl.entry(acl.CQ_DRY_RUNNER, groups = ["project-libyuv-tryjob-access"]), + ], + retry_config = cq.RETRY_ALL_FAILURES, + cancel_stale_tryjobs = True, +) + +# Generates cr-buildbucket.cfg + +luci.bucket( + name = "ci", +) +luci.bucket( + name = "try", + acls = [ + acl.entry(acl.BUILDBUCKET_TRIGGERER, groups = [ + "project-libyuv-tryjob-access", + "service-account-cq", + ]), + ], +) +luci.bucket( + name = "cron", +) + +def get_os_dimensions(os): + if os == "android": + return {"device_type": "bullhead"} + if os == "ios" or os == "mac": + return {"os": "Mac-10.15", "cpu": "x86-64"} + elif os == "win": + return {"os": "Windows-10", "cores": "8", "cpu": "x86-64"} + elif os == "linux": + return {"os": "Ubuntu-18.04", "cores": "8", "cpu": "x86-64"} + return {} + +def get_os_properties(os, try_builder = False): + if os == "android": + return {"$build/goma": GOMA_BACKEND_RBE_PROD} + elif os in ("ios", "mac"): + return {"$build/goma": GOMA_BACKEND_RBE_PROD} + elif os == "win" and try_builder: + return {"$build/goma": GOMA_BACKEND_RBE_NO_ATS_PROD} + elif os == "win": + return {"$build/goma": GOMA_BACKEND_RBE_ATS_PROD} + elif os == "linux": + return {"$build/goma": GOMA_BACKEND_RBE_ATS_PROD} + return {} + +def libyuv_ci_builder(name, dimensions, properties, triggered_by): + return luci.builder( + name = name, + dimensions = dimensions, + properties = properties, + bucket = "ci", + service_account = "libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com", + triggered_by = triggered_by, + swarming_tags = ["vpython:native-python-wrapper"], + execution_timeout = 180 * time.minute, + build_numbers = True, + executable = luci.recipe( + name = "libyuv/libyuv", + cipd_package = "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build", + use_python3 = True, + ), + ) + +def libyuv_try_builder(name, dimensions, properties, recipe_name = "libyuv/libyuv"): + return luci.builder( + name = name, + dimensions = dimensions, + properties = properties, + bucket = "try", + service_account = "libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com", + swarming_tags = ["vpython:native-python-wrapper"], + execution_timeout = 180 * time.minute, + build_numbers = True, + executable = luci.recipe( + name = recipe_name, + cipd_package = "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build", + use_python3 = True, + ), + ) + +def ci_builder(name, os, category, short_name = None): + dimensions = get_os_dimensions(os) + properties = get_os_properties(os) + properties["$build/reclient"] = RECLIENT_CI + + dimensions["pool"] = "luci.flex.ci" + properties["builder_group"] = "client.libyuv" + + triggered_by = ["master-gitiles-trigger" if os != "android" else "Android Debug"] + libyuv_ci_view(name, category, short_name) + return libyuv_ci_builder(name, dimensions, properties, triggered_by) + +def try_builder(name, os, experiment_percentage = None): + dimensions = get_os_dimensions(os) + properties = get_os_properties(os, try_builder = True) + properties["$build/reclient"] = RECLIENT_CQ + + dimensions["pool"] = "luci.flex.try" + properties["builder_group"] = "tryserver.libyuv" + + if name == "presubmit": + recipe_name = "run_presubmit" + properties["repo_name"] = "libyuv" + properties["runhooks"] = True + libyuv_try_job_verifier(name, "config", experiment_percentage) + return libyuv_try_builder(name, dimensions, properties, recipe_name) + + libyuv_try_job_verifier(name, "master", experiment_percentage) + libyuv_try_view(name) + return libyuv_try_builder(name, dimensions, properties) + +luci.builder( + name = "DEPS Autoroller", + bucket = "cron", + service_account = "libyuv-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com", + dimensions = { + "pool": "luci.webrtc.cron", + "os": "Linux", + "cpu": "x86-64", + }, + swarming_tags = ["vpython:native-python-wrapper"], + execution_timeout = 120 * time.minute, + build_numbers = True, + schedule = "0 14 * * *", # Every 2 hours. + executable = luci.recipe( + name = "libyuv/roll_deps", + cipd_package = "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build", + use_python3 = True, + ), +) + +ci_builder("Android ARM64 Debug", "linux", "Android|Builder", "dbg") +ci_builder("Android Debug", "linux", "Android|Builder", "dbg") +ci_builder("Android Release", "linux", "Android|Builder", "rel") +ci_builder("Android32 x86 Debug", "linux", "Android|Builder|x86", "dbg") +ci_builder("Android64 x64 Debug", "linux", "Android|Builder|x64", "dbg") +ci_builder("Android Tester ARM32 Debug (Nexus 5X)", "android", "Android|Tester|ARM 32", "dbg") +ci_builder("Android Tester ARM32 Release (Nexus 5X)", "android", "Android|Tester|ARM 32", "rel") +ci_builder("Android Tester ARM64 Debug (Nexus 5X)", "android", "Android|Tester|ARM 64", "dbg") +ci_builder("Linux Asan", "linux", "Linux", "asan") +ci_builder("Linux MSan", "linux", "Linux", "msan") +ci_builder("Linux Tsan v2", "linux", "Linux", "tsan") +ci_builder("Linux UBSan", "linux", "Linux|UBSan") +ci_builder("Linux UBSan vptr", "linux", "Linux|UBSan", "vptr") +ci_builder("Linux32 Debug", "linux", "Linux|32", "dbg") +ci_builder("Linux32 Release", "linux", "Linux|32", "rel") +ci_builder("Linux64 Debug", "linux", "Linux|64", "dbg") +ci_builder("Linux64 Release", "linux", "Linux|64", "rel") +ci_builder("Mac Asan", "mac", "Mac", "asan") +ci_builder("Mac64 Debug", "mac", "Mac", "dbg") +ci_builder("Mac64 Release", "mac", "Mac", "rel") +ci_builder("Win32 Debug", "win", "Win|32|Debug") +ci_builder("Win32 Debug (Clang)", "win", "Win|32|Debug", "clg") +ci_builder("Win32 Release", "win", "Win|32|Release") +ci_builder("Win32 Release (Clang)", "win", "Win|32|Release", "clg") +ci_builder("Win64 Debug", "win", "Win|64|Debug", "clg") +ci_builder("Win64 Debug (Clang)", "win", "Win|64|Debug", "clg") +ci_builder("Win64 Release", "win", "Win|64|Release") +ci_builder("Win64 Release (Clang)", "win", "Win|64|Release", "clg") +ci_builder("iOS ARM64 Debug", "ios", "iOS|ARM64", "dbg") +ci_builder("iOS ARM64 Release", "ios", "iOS|ARM64", "rel") + +# TODO(crbug.com/1242847): make this not experimental. +try_builder("android", "android", experiment_percentage = 100) +try_builder("android_arm64", "android", experiment_percentage = 100) +try_builder("android_rel", "android", experiment_percentage = 100) + +try_builder("android_x64", "linux") +try_builder("android_x86", "linux") +try_builder("ios_arm64", "ios") +try_builder("ios_arm64_rel", "ios") +try_builder("linux", "linux") +try_builder("linux_asan", "linux") +try_builder("linux_gcc", "linux", experiment_percentage = 100) +try_builder("linux_msan", "linux") +try_builder("linux_rel", "linux") +try_builder("linux_tsan2", "linux") +try_builder("linux_ubsan", "linux") +try_builder("linux_ubsan_vptr", "linux") +try_builder("mac", "mac") +try_builder("mac_asan", "mac") +try_builder("mac_rel", "mac") +try_builder("win", "win") +try_builder("win_clang", "win") +try_builder("win_clang_rel", "win") +try_builder("win_rel", "win") +try_builder("win_x64_clang_rel", "win") +try_builder("win_x64_rel", "win") +try_builder("presubmit", "linux") diff --git a/third-party/libyuv/third_party/libyuv/infra/config/project.cfg b/third-party/libyuv/third_party/libyuv/infra/config/project.cfg new file mode 100644 index 0000000000..52797c1c8a --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/project.cfg @@ -0,0 +1,15 @@ +# Auto-generated by lucicfg. +# Do not modify manually. +# +# For the schema of this file, see ProjectCfg message: +# https://luci-config.appspot.com/schemas/projects:project.cfg + +name: "libyuv" +access: "group:all" +lucicfg { + version: "1.32.1" + package_dir: "." + config_dir: "." + entry_point: "main.star" + experiments: "crbug.com/1182002" +} diff --git a/third-party/libyuv/third_party/libyuv/infra/config/realms.cfg b/third-party/libyuv/third_party/libyuv/infra/config/realms.cfg new file mode 100644 index 0000000000..16ffaac90f --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/infra/config/realms.cfg @@ -0,0 +1,83 @@ +# Auto-generated by lucicfg. +# Do not modify manually. +# +# For the schema of this file, see RealmsCfg message: +# https://luci-config.appspot.com/schemas/projects:realms.cfg + +realms { + name: "@root" + bindings { + role: "role/buildbucket.owner" + principals: "group:project-libyuv-admins" + } + bindings { + role: "role/buildbucket.reader" + principals: "group:all" + } + bindings { + role: "role/configs.reader" + principals: "group:all" + } + bindings { + role: "role/configs.validator" + principals: "user:libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + } + bindings { + role: "role/logdog.reader" + principals: "group:all" + } + bindings { + role: "role/logdog.writer" + principals: "group:luci-logdog-chromium-writers" + } + bindings { + role: "role/scheduler.owner" + principals: "group:project-libyuv-admins" + } + bindings { + role: "role/scheduler.reader" + principals: "group:all" + } + bindings { + role: "role/swarming.taskTriggerer" + principals: "group:project-libyuv-admins" + } +} +realms { + name: "ci" + bindings { + role: "role/buildbucket.builderServiceAccount" + principals: "user:libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + } + bindings { + role: "role/scheduler.triggerer" + principals: "user:libyuv-ci-builder@chops-service-accounts.iam.gserviceaccount.com" + conditions { + restrict { + attribute: "scheduler.job.name" + values: "Android Tester ARM32 Debug (Nexus 5X)" + values: "Android Tester ARM32 Release (Nexus 5X)" + values: "Android Tester ARM64 Debug (Nexus 5X)" + } + } + } +} +realms { + name: "cron" + bindings { + role: "role/buildbucket.builderServiceAccount" + principals: "user:libyuv-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com" + } +} +realms { + name: "try" + bindings { + role: "role/buildbucket.builderServiceAccount" + principals: "user:libyuv-try-builder@chops-service-accounts.iam.gserviceaccount.com" + } + bindings { + role: "role/buildbucket.triggerer" + principals: "group:project-libyuv-tryjob-access" + principals: "group:service-account-cq" + } +} diff --git a/third-party/libyuv/third_party/libyuv/libyuv.gni b/third-party/libyuv/third_party/libyuv/libyuv.gni index 8df40ba2d7..852f08ca9d 100644 --- a/third-party/libyuv/third_party/libyuv/libyuv.gni +++ b/third-party/libyuv/third_party/libyuv/libyuv.gni @@ -6,9 +6,9 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. -import("//build_overrides/build.gni") import("//build/config/arm.gni") import("//build/config/mips.gni") +import("//build_overrides/build.gni") declare_args() { libyuv_include_tests = !build_with_chromium diff --git a/third-party/libyuv/third_party/libyuv/libyuv.gyp b/third-party/libyuv/third_party/libyuv/libyuv.gyp new file mode 100644 index 0000000000..f73a1a4b74 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/libyuv.gyp @@ -0,0 +1,162 @@ +# Copyright 2011 The LibYuv Project Authors. All rights reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +{ + 'includes': [ + 'libyuv.gypi', + ], + # Make sure that if we are being compiled to an xcodeproj, nothing tries to + # include a .pch. + 'xcode_settings': { + 'GCC_PREFIX_HEADER': '', + 'GCC_PRECOMPILE_PREFIX_HEADER': 'NO', + }, + 'variables': { + 'use_system_libjpeg%': 0, + # Can be enabled if your jpeg has GYP support. + 'libyuv_disable_jpeg%': 1, + # 'chromium_code' treats libyuv as internal and increases warning level. + 'chromium_code': 1, + # clang compiler default variable usable by other apps that include libyuv. + 'clang%': 0, + # Link-Time Optimizations. + 'use_lto%': 0, + 'mips_msa%': 0, # Default to msa off. + 'build_neon': 0, + 'build_msa': 0, + 'conditions': [ + ['(target_arch == "armv7" or target_arch == "armv7s" or \ + (target_arch == "arm" and arm_version >= 7) or target_arch == "arm64")\ + and (arm_neon == 1 or arm_neon_optional == 1)', { + 'build_neon': 1, + }], + ['(target_arch == "mipsel" or target_arch == "mips64el")\ + and (mips_msa == 1)', + { + 'build_msa': 1, + }], + ], + }, + + 'targets': [ + { + 'target_name': 'libyuv', + # Change type to 'shared_library' to build .so or .dll files. + 'type': 'static_library', + 'variables': { + 'optimize': 'max', # enable O2 and ltcg. + }, + # Allows libyuv.a redistributable library without external dependencies. + 'standalone_static_library': 1, + 'conditions': [ + # Disable -Wunused-parameter + ['clang == 1', { + 'cflags': [ + '-Wno-unused-parameter', + ], + }], + ['build_neon != 0', { + 'defines': [ + 'LIBYUV_NEON', + ], + 'cflags!': [ + '-mfpu=vfp', + '-mfpu=vfpv3', + '-mfpu=vfpv3-d16', + # '-mthumb', # arm32 not thumb + ], + 'conditions': [ + # Disable LTO in libyuv_neon target due to gcc 4.9 compiler bug. + ['clang == 0 and use_lto == 1', { + 'cflags!': [ + '-flto', + '-ffat-lto-objects', + ], + }], + # arm64 does not need -mfpu=neon option as neon is not optional + ['target_arch != "arm64"', { + 'cflags': [ + '-mfpu=neon', + # '-marm', # arm32 not thumb + ], + }], + ], + }], + ['build_msa != 0', { + 'defines': [ + 'LIBYUV_MSA', + ], + }], + ['OS != "ios" and libyuv_disable_jpeg != 1', { + 'defines': [ + 'HAVE_JPEG' + ], + 'conditions': [ + # Caveat system jpeg support may not support motion jpeg + [ 'use_system_libjpeg == 1', { + 'dependencies': [ + '<(DEPTH)/third_party/libjpeg/libjpeg.gyp:libjpeg', + ], + }, { + 'dependencies': [ + '<(DEPTH)/third_party/libjpeg_turbo/libjpeg.gyp:libjpeg', + ], + }], + [ 'use_system_libjpeg == 1', { + 'link_settings': { + 'libraries': [ + '-ljpeg', + ], + } + }], + ], + }], + ], #conditions + 'defines': [ + # Enable the following 3 macros to turn off assembly for specified CPU. + # 'LIBYUV_DISABLE_X86', + # 'LIBYUV_DISABLE_NEON', + # 'LIBYUV_DISABLE_DSPR2', + # Enable the following macro to build libyuv as a shared library (dll). + # 'LIBYUV_USING_SHARED_LIBRARY', + # TODO(fbarchard): Make these into gyp defines. + ], + 'include_dirs': [ + 'include', + '.', + ], + 'direct_dependent_settings': { + 'include_dirs': [ + 'include', + '.', + ], + 'conditions': [ + ['OS == "android" and target_arch == "arm64"', { + 'ldflags': [ + '-Wl,--dynamic-linker,/system/bin/linker64', + ], + }], + ['OS == "android" and target_arch != "arm64"', { + 'ldflags': [ + '-Wl,--dynamic-linker,/system/bin/linker', + ], + }], + ], #conditions + }, + 'sources': [ + '<@(libyuv_sources)', + ], + }, + ], # targets. +} + +# Local Variables: +# tab-width:2 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=2 shiftwidth=2: diff --git a/third-party/libyuv/third_party/libyuv/libyuv.gypi b/third-party/libyuv/third_party/libyuv/libyuv.gypi new file mode 100644 index 0000000000..48936aa7b0 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/libyuv.gypi @@ -0,0 +1,85 @@ +# Copyright 2014 The LibYuv Project Authors. All rights reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +{ + 'variables': { + 'libyuv_sources': [ + # includes. + 'include/libyuv.h', + 'include/libyuv/basic_types.h', + 'include/libyuv/compare.h', + 'include/libyuv/convert.h', + 'include/libyuv/convert_argb.h', + 'include/libyuv/convert_from.h', + 'include/libyuv/convert_from_argb.h', + 'include/libyuv/cpu_id.h', + 'include/libyuv/macros_msa.h', + 'include/libyuv/mjpeg_decoder.h', + 'include/libyuv/planar_functions.h', + 'include/libyuv/rotate.h', + 'include/libyuv/rotate_argb.h', + 'include/libyuv/rotate_row.h', + 'include/libyuv/row.h', + 'include/libyuv/scale.h', + 'include/libyuv/scale_argb.h', + 'include/libyuv/scale_rgb.h', + 'include/libyuv/scale_row.h', + 'include/libyuv/scale_uv.h', + 'include/libyuv/version.h', + 'include/libyuv/video_common.h', + + # sources. + 'source/compare.cc', + 'source/compare_common.cc', + 'source/compare_gcc.cc', + 'source/compare_msa.cc', + 'source/compare_neon.cc', + 'source/compare_neon64.cc', + 'source/compare_win.cc', + 'source/convert.cc', + 'source/convert_argb.cc', + 'source/convert_from.cc', + 'source/convert_from_argb.cc', + 'source/convert_jpeg.cc', + 'source/convert_to_argb.cc', + 'source/convert_to_i420.cc', + 'source/cpu_id.cc', + 'source/mjpeg_decoder.cc', + 'source/mjpeg_validate.cc', + 'source/planar_functions.cc', + 'source/rotate.cc', + 'source/rotate_any.cc', + 'source/rotate_argb.cc', + 'source/rotate_common.cc', + 'source/rotate_gcc.cc', + 'source/rotate_msa.cc', + 'source/rotate_neon.cc', + 'source/rotate_neon64.cc', + 'source/rotate_win.cc', + 'source/row_any.cc', + 'source/row_common.cc', + 'source/row_gcc.cc', + 'source/row_msa.cc', + 'source/row_neon.cc', + 'source/row_neon64.cc', + 'source/row_win.cc', + 'source/scale.cc', + 'source/scale_any.cc', + 'source/scale_argb.cc', + 'source/scale_common.cc', + 'source/scale_gcc.cc', + 'source/scale_msa.cc', + 'source/scale_neon.cc', + 'source/scale_neon64.cc', + 'source/scale_rgb.cc', + 'source/scale_uv.cc', + 'source/scale_win.cc', + 'source/video_common.cc', + ], + } +} diff --git a/third-party/libyuv/third_party/libyuv/linux.mk b/third-party/libyuv/third_party/libyuv/linux.mk index f5e73ea497..b541b47c16 100644 --- a/third-party/libyuv/third_party/libyuv/linux.mk +++ b/third-party/libyuv/third_party/libyuv/linux.mk @@ -13,7 +13,6 @@ LOCAL_OBJ_FILES := \ source/compare.o \ source/compare_common.o \ source/compare_gcc.o \ - source/compare_mmi.o \ source/compare_msa.o \ source/compare_neon.o \ source/compare_neon64.o \ @@ -34,7 +33,6 @@ LOCAL_OBJ_FILES := \ source/rotate_argb.o \ source/rotate_common.o \ source/rotate_gcc.o \ - source/rotate_mmi.o \ source/rotate_msa.o \ source/rotate_neon.o \ source/rotate_neon64.o \ @@ -42,7 +40,6 @@ LOCAL_OBJ_FILES := \ source/row_any.o \ source/row_common.o \ source/row_gcc.o \ - source/row_mmi.o \ source/row_msa.o \ source/row_neon.o \ source/row_neon64.o \ @@ -52,10 +49,10 @@ LOCAL_OBJ_FILES := \ source/scale_argb.o \ source/scale_common.o \ source/scale_gcc.o \ - source/scale_mmi.o \ source/scale_msa.o \ source/scale_neon.o \ source/scale_neon64.o \ + source/scale_rgb.o \ source/scale_uv.o \ source/scale_win.o \ source/video_common.o diff --git a/third-party/libyuv/third_party/libyuv/source/compare.cc b/third-party/libyuv/third_party/libyuv/source/compare.cc index e93aba1b53..d4713b605e 100644 --- a/third-party/libyuv/third_party/libyuv/source/compare.cc +++ b/third-party/libyuv/third_party/libyuv/source/compare.cc @@ -149,11 +149,6 @@ uint64_t ComputeHammingDistance(const uint8_t* src_a, HammingDistance = HammingDistance_AVX2; } #endif -#if defined(HAS_HAMMINGDISTANCE_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - HammingDistance = HammingDistance_MMI; - } -#endif #if defined(HAS_HAMMINGDISTANCE_MSA) if (TestCpuFlag(kCpuHasMSA)) { HammingDistance = HammingDistance_MSA; @@ -211,11 +206,6 @@ uint64_t ComputeSumSquareError(const uint8_t* src_a, SumSquareError = SumSquareError_AVX2; } #endif -#if defined(HAS_SUMSQUAREERROR_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SumSquareError = SumSquareError_MMI; - } -#endif #if defined(HAS_SUMSQUAREERROR_MSA) if (TestCpuFlag(kCpuHasMSA)) { SumSquareError = SumSquareError_MSA; diff --git a/third-party/libyuv/third_party/libyuv/source/convert.cc b/third-party/libyuv/third_party/libyuv/source/convert.cc index 69f7fb6e01..ad0edd1f24 100644 --- a/third-party/libyuv/third_party/libyuv/source/convert.cc +++ b/third-party/libyuv/third_party/libyuv/source/convert.cc @@ -15,8 +15,9 @@ #include "libyuv/planar_functions.h" #include "libyuv/rotate.h" #include "libyuv/row.h" -#include "libyuv/scale.h" // For ScalePlane() -#include "libyuv/scale_uv.h" // For UVScale() +#include "libyuv/scale.h" // For ScalePlane() +#include "libyuv/scale_row.h" // For FixedDiv +#include "libyuv/scale_uv.h" // For UVScale() #ifdef __cplusplus namespace libyuv { @@ -83,7 +84,8 @@ int I420Copy(const uint8_t* src_y, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; - if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -125,7 +127,8 @@ int I010Copy(const uint16_t* src_y, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; - if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -169,7 +172,8 @@ static int Planar16bitTo8bit(const uint16_t* src_y, int uv_width = SUBSAMPLE(width, subsample_x, subsample_x); int uv_height = SUBSAMPLE(height, subsample_y, subsample_y); int scale = 1 << (24 - depth); - if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -217,6 +221,55 @@ int I010ToI420(const uint16_t* src_y, 1, 10); } +LIBYUV_API +int I210ToI420(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height) { + const int depth = 10; + const int scale = 1 << (24 - depth); + + if (width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_y = src_y + (height - 1) * src_stride_y; + src_u = src_u + (height - 1) * src_stride_u; + src_v = src_v + (height - 1) * src_stride_v; + src_stride_y = -src_stride_y; + src_stride_u = -src_stride_u; + src_stride_v = -src_stride_v; + } + + { + const int uv_width = SUBSAMPLE(width, 1, 1); + const int uv_height = SUBSAMPLE(height, 1, 1); + const int dy = FixedDiv(height, uv_height); + + Convert16To8Plane(src_y, src_stride_y, dst_y, dst_stride_y, scale, width, + height); + ScalePlaneVertical_16To8(height, uv_width, uv_height, src_stride_u, + dst_stride_u, src_u, dst_u, 0, 32768, dy, + /*bpp=*/1, scale, kFilterBilinear); + ScalePlaneVertical_16To8(height, uv_width, uv_height, src_stride_v, + dst_stride_v, src_v, dst_v, 0, 32768, dy, + /*bpp=*/1, scale, kFilterBilinear); + } + return 0; +} + LIBYUV_API int I210ToI422(const uint16_t* src_y, int src_stride_y, @@ -523,6 +576,48 @@ int I422ToI420(const uint8_t* src_y, dst_v, dst_stride_v, width, height, src_uv_width, height); } +LIBYUV_API +int I422ToI210(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint16_t* dst_y, + int dst_stride_y, + uint16_t* dst_u, + int dst_stride_u, + uint16_t* dst_v, + int dst_stride_v, + int width, + int height) { + int halfwidth = (width + 1) >> 1; + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_y = src_y + (height - 1) * src_stride_y; + src_u = src_u + (height - 1) * src_stride_u; + src_v = src_v + (height - 1) * src_stride_v; + src_stride_y = -src_stride_y; + src_stride_u = -src_stride_u; + src_stride_v = -src_stride_v; + } + + // Convert Y plane. + Convert8To16Plane(src_y, src_stride_y, dst_y, dst_stride_y, 1024, width, + height); + // Convert UV planes. + Convert8To16Plane(src_u, src_stride_u, dst_u, dst_stride_u, 1024, halfwidth, + height); + Convert8To16Plane(src_v, src_stride_v, dst_v, dst_stride_v, 1024, halfwidth, + height); + return 0; +} + // TODO(fbarchard): Implement row conversion. LIBYUV_API int I422ToNV21(const uint8_t* src_y, @@ -564,6 +659,79 @@ int I422ToNV21(const uint8_t* src_y, return 0; } +LIBYUV_API +int MM21ToNV12(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_uv, + int dst_stride_uv, + int width, + int height) { + if (!src_uv || !dst_uv || width <= 0) { + return -1; + } + + int sign = height < 0 ? -1 : 1; + + if (dst_y) { + DetilePlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height, 32); + } + DetilePlane(src_uv, src_stride_uv, dst_uv, dst_stride_uv, (width + 1) & ~1, + (height + sign) / 2, 16); + + return 0; +} + +LIBYUV_API +int MM21ToI420(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height) { + int sign = height < 0 ? -1 : 1; + + if (!src_uv || !dst_u || !dst_v || width <= 0) { + return -1; + } + + if (dst_y) { + DetilePlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height, 32); + } + DetileSplitUVPlane(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, + dst_stride_v, (width + 1) & ~1, (height + sign) / 2, 16); + + return 0; +} + +LIBYUV_API +int MM21ToYUY2(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_yuy2, + int dst_stride_yuy2, + int width, + int height) { + if (!src_y || !src_uv || !dst_yuy2 || width <= 0) { + return -1; + } + + DetileToYUY2(src_y, src_stride_y, src_uv, src_stride_uv, dst_yuy2, + dst_stride_yuy2, width, height, 32); + + return 0; +} + #ifdef I422TONV21_ROW_VERSION // Unittest fails for this version. // 422 chroma is 1/2 width, 1x height @@ -628,14 +796,6 @@ int I422ToNV21(const uint8_t* src_y, } } #endif -#if defined(HAS_MERGEUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MergeUVRow = MergeUVRow_Any_MMI; - if (IS_ALIGNED(halfwidth, 8)) { - MergeUVRow = MergeUVRow_MMI; - } - } -#endif #if defined(HAS_MERGEUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { MergeUVRow = MergeUVRow_Any_MSA; @@ -644,6 +804,14 @@ int I422ToNV21(const uint8_t* src_y, } } #endif +#if defined(HAS_MERGEUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + MergeUVRow = MergeUVRow_Any_LSX; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow = MergeUVRow_LSX; + } + } +#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -668,14 +836,6 @@ int I422ToNV21(const uint8_t* src_y, } } #endif -#if defined(HAS_INTERPOLATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - InterpolateRow = InterpolateRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - InterpolateRow = InterpolateRow_MMI; - } - } -#endif #if defined(HAS_INTERPOLATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { InterpolateRow = InterpolateRow_Any_MSA; @@ -684,6 +844,14 @@ int I422ToNV21(const uint8_t* src_y, } } #endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(width, 32)) { + InterpolateRow = InterpolateRow_LSX; + } + } +#endif if (dst_y) { CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, halfwidth, height); @@ -747,8 +915,7 @@ int I444ToNV12(const uint8_t* src_y, int dst_stride_uv, int width, int height) { - if (!src_y || !src_u || !src_v || !dst_y || !dst_uv || width <= 0 || - height == 0) { + if (!src_y || !src_u || !src_v || !dst_uv || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. @@ -1073,18 +1240,6 @@ int YUY2ToI420(const uint8_t* src_yuy2, } } #endif -#if defined(HAS_YUY2TOYROW_MMI) && defined(HAS_YUY2TOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - YUY2ToYRow = YUY2ToYRow_Any_MMI; - YUY2ToUVRow = YUY2ToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - YUY2ToYRow = YUY2ToYRow_MMI; - if (IS_ALIGNED(width, 16)) { - YUY2ToUVRow = YUY2ToUVRow_MMI; - } - } - } -#endif #if defined(HAS_YUY2TOYROW_MSA) && defined(HAS_YUY2TOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { YUY2ToYRow = YUY2ToYRow_Any_MSA; @@ -1095,6 +1250,16 @@ int YUY2ToI420(const uint8_t* src_yuy2, } } #endif +#if defined(HAS_YUY2TOYROW_LASX) && defined(HAS_YUY2TOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + YUY2ToYRow = YUY2ToYRow_Any_LASX; + YUY2ToUVRow = YUY2ToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + YUY2ToYRow = YUY2ToYRow_LASX; + YUY2ToUVRow = YUY2ToUVRow_LASX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { YUY2ToUVRow(src_yuy2, src_stride_yuy2, dst_u, dst_v, width); @@ -1166,16 +1331,6 @@ int UYVYToI420(const uint8_t* src_uyvy, } } #endif -#if defined(HAS_UYVYTOYROW_MMI) && defined(HAS_UYVYTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - UYVYToYRow = UYVYToYRow_Any_MMI; - UYVYToUVRow = UYVYToUVRow_Any_MMI; - if (IS_ALIGNED(width, 16)) { - UYVYToYRow = UYVYToYRow_MMI; - UYVYToUVRow = UYVYToUVRow_MMI; - } - } -#endif #if defined(HAS_UYVYTOYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { UYVYToYRow = UYVYToYRow_Any_MSA; @@ -1186,6 +1341,16 @@ int UYVYToI420(const uint8_t* src_uyvy, } } #endif +#if defined(HAS_UYVYTOYROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + UYVYToYRow = UYVYToYRow_Any_LASX; + UYVYToUVRow = UYVYToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + UYVYToYRow = UYVYToYRow_LASX; + UYVYToUVRow = UYVYToUVRow_LASX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { UYVYToUVRow(src_uyvy, src_stride_uyvy, dst_u, dst_v, width); @@ -1368,47 +1533,51 @@ int ARGBToI420(const uint8_t* src_argb, src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } -#if defined(HAS_ARGBTOYROW_NEON) && defined(HAS_ARGBTOUVROW_NEON) +#if defined(HAS_ARGBTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYRow = ARGBToYRow_Any_NEON; - ARGBToUVRow = ARGBToUVRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYRow = ARGBToYRow_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_NEON; - } } } #endif -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBToUVRow = ARGBToUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_NEON; + } + } +#endif +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif -#if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYRow = ARGBToYRow_Any_MMI; - ARGBToUVRow = ARGBToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_MMI; +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; } } #endif @@ -1424,6 +1593,16 @@ int ARGBToI420(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYROW_LASX) && defined(HAS_ARGBTOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + ARGBToUVRow = ARGBToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + ARGBToUVRow = ARGBToUVRow_LASX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { ARGBToUVRow(src_argb, src_stride_argb, dst_u, dst_v, width); @@ -1468,20 +1647,10 @@ int BGRAToI420(const uint8_t* src_bgra, src_bgra = src_bgra + (height - 1) * src_stride_bgra; src_stride_bgra = -src_stride_bgra; } -#if defined(HAS_BGRATOYROW_SSSE3) && defined(HAS_BGRATOUVROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - BGRAToUVRow = BGRAToUVRow_Any_SSSE3; - BGRAToYRow = BGRAToYRow_Any_SSSE3; - if (IS_ALIGNED(width, 16)) { - BGRAToUVRow = BGRAToUVRow_SSSE3; - BGRAToYRow = BGRAToYRow_SSSE3; - } - } -#endif #if defined(HAS_BGRATOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { BGRAToYRow = BGRAToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { BGRAToYRow = BGRAToYRow_NEON; } } @@ -1494,15 +1663,35 @@ int BGRAToI420(const uint8_t* src_bgra, } } #endif -#if defined(HAS_BGRATOYROW_MMI) && defined(HAS_BGRATOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - BGRAToYRow = BGRAToYRow_Any_MMI; - BGRAToUVRow = BGRAToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - BGRAToYRow = BGRAToYRow_MMI; - } +#if defined(HAS_BGRATOYROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + BGRAToYRow = BGRAToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - BGRAToUVRow = BGRAToUVRow_MMI; + BGRAToYRow = BGRAToYRow_SSSE3; + } + } +#endif +#if defined(HAS_BGRATOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + BGRAToUVRow = BGRAToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + BGRAToUVRow = BGRAToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_BGRATOYROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + BGRAToYRow = BGRAToYRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + BGRAToYRow = BGRAToYRow_AVX2; + } + } +#endif +#if defined(HAS_BGRATOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + BGRAToUVRow = BGRAToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + BGRAToUVRow = BGRAToUVRow_AVX2; } } #endif @@ -1512,10 +1701,22 @@ int BGRAToI420(const uint8_t* src_bgra, BGRAToUVRow = BGRAToUVRow_Any_MSA; if (IS_ALIGNED(width, 16)) { BGRAToYRow = BGRAToYRow_MSA; + } + if (IS_ALIGNED(width, 32)) { BGRAToUVRow = BGRAToUVRow_MSA; } } #endif +#if defined(HAS_BGRATOYROW_LASX) && defined(HAS_BGRATOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + BGRAToYRow = BGRAToYRow_Any_LASX; + BGRAToUVRow = BGRAToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + BGRAToYRow = BGRAToYRow_LASX; + BGRAToUVRow = BGRAToUVRow_LASX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { BGRAToUVRow(src_bgra, src_stride_bgra, dst_u, dst_v, width); @@ -1560,30 +1761,42 @@ int ABGRToI420(const uint8_t* src_abgr, src_abgr = src_abgr + (height - 1) * src_stride_abgr; src_stride_abgr = -src_stride_abgr; } -#if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3) +#if defined(HAS_ABGRTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ABGRToUVRow = ABGRToUVRow_Any_SSSE3; ABGRToYRow = ABGRToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ABGRToUVRow = ABGRToUVRow_SSSE3; ABGRToYRow = ABGRToYRow_SSSE3; } } #endif -#if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2) +#if defined(HAS_ABGRTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ABGRToUVRow = ABGRToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ABGRToUVRow = ABGRToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ABGRTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ABGRToUVRow = ABGRToUVRow_Any_AVX2; ABGRToYRow = ABGRToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ABGRToUVRow = ABGRToUVRow_AVX2; ABGRToYRow = ABGRToYRow_AVX2; } } #endif +#if defined(HAS_ABGRTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ABGRToUVRow = ABGRToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ABGRToUVRow = ABGRToUVRow_AVX2; + } + } +#endif #if defined(HAS_ABGRTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ABGRToYRow = ABGRToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ABGRToYRow = ABGRToYRow_NEON; } } @@ -1596,18 +1809,6 @@ int ABGRToI420(const uint8_t* src_abgr, } } #endif -#if defined(HAS_ABGRTOYROW_MMI) && defined(HAS_ABGRTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ABGRToYRow = ABGRToYRow_Any_MMI; - ABGRToUVRow = ABGRToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ABGRToYRow = ABGRToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ABGRToUVRow = ABGRToUVRow_MMI; - } - } -#endif #if defined(HAS_ABGRTOYROW_MSA) && defined(HAS_ABGRTOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ABGRToYRow = ABGRToYRow_Any_MSA; @@ -1618,6 +1819,16 @@ int ABGRToI420(const uint8_t* src_abgr, } } #endif +#if defined(HAS_ABGRTOYROW_LSX) && defined(HAS_ABGRTOUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ABGRToYRow = ABGRToYRow_Any_LSX; + ABGRToUVRow = ABGRToUVRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ABGRToYRow = ABGRToYRow_LSX; + ABGRToUVRow = ABGRToUVRow_LSX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { ABGRToUVRow(src_abgr, src_stride_abgr, dst_u, dst_v, width); @@ -1662,20 +1873,26 @@ int RGBAToI420(const uint8_t* src_rgba, src_rgba = src_rgba + (height - 1) * src_stride_rgba; src_stride_rgba = -src_stride_rgba; } -#if defined(HAS_RGBATOYROW_SSSE3) && defined(HAS_RGBATOUVROW_SSSE3) +#if defined(HAS_RGBATOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - RGBAToUVRow = RGBAToUVRow_Any_SSSE3; RGBAToYRow = RGBAToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - RGBAToUVRow = RGBAToUVRow_SSSE3; RGBAToYRow = RGBAToYRow_SSSE3; } } #endif +#if defined(HAS_RGBATOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + RGBAToUVRow = RGBAToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + RGBAToUVRow = RGBAToUVRow_SSSE3; + } + } +#endif #if defined(HAS_RGBATOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { RGBAToYRow = RGBAToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { RGBAToYRow = RGBAToYRow_NEON; } } @@ -1688,18 +1905,6 @@ int RGBAToI420(const uint8_t* src_rgba, } } #endif -#if defined(HAS_RGBATOYROW_MMI) && defined(HAS_RGBATOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RGBAToYRow = RGBAToYRow_Any_MMI; - RGBAToUVRow = RGBAToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RGBAToYRow = RGBAToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - RGBAToUVRow = RGBAToUVRow_MMI; - } - } -#endif #if defined(HAS_RGBATOYROW_MSA) && defined(HAS_RGBATOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { RGBAToYRow = RGBAToYRow_Any_MSA; @@ -1710,6 +1915,16 @@ int RGBAToI420(const uint8_t* src_rgba, } } #endif +#if defined(HAS_RGBATOYROW_LSX) && defined(HAS_RGBATOUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + RGBAToYRow = RGBAToYRow_Any_LSX; + RGBAToUVRow = RGBAToUVRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + RGBAToYRow = RGBAToYRow_LSX; + RGBAToUVRow = RGBAToUVRow_LSX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { RGBAToUVRow(src_rgba, src_stride_rgba, dst_u, dst_v, width); @@ -1727,6 +1942,12 @@ int RGBAToI420(const uint8_t* src_rgba, return 0; } +// Enabled if 1 pass is available +#if (defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \ + defined(HAS_RGB24TOYROW_LSX)) +#define HAS_RGB24TOYROW +#endif + // Convert RGB24 to I420. LIBYUV_API int RGB24ToI420(const uint8_t* src_rgb24, @@ -1740,8 +1961,7 @@ int RGB24ToI420(const uint8_t* src_rgb24, int width, int height) { int y; -#if (defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \ - defined(HAS_RGB24TOYROW_MMI)) +#if defined(HAS_RGB24TOYROW) void (*RGB24ToUVRow)(const uint8_t* src_rgb24, int src_stride_rgb24, uint8_t* dst_u, uint8_t* dst_v, int width) = RGB24ToUVRow_C; @@ -1766,29 +1986,16 @@ int RGB24ToI420(const uint8_t* src_rgb24, src_stride_rgb24 = -src_stride_rgb24; } +#if defined(HAS_RGB24TOYROW) + // Neon version does direct RGB24 to YUV. #if defined(HAS_RGB24TOYROW_NEON) && defined(HAS_RGB24TOUVROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { RGB24ToUVRow = RGB24ToUVRow_Any_NEON; RGB24ToYRow = RGB24ToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { RGB24ToYRow = RGB24ToYRow_NEON; - if (IS_ALIGNED(width, 16)) { - RGB24ToUVRow = RGB24ToUVRow_NEON; - } - } - } -// MMI and MSA version does direct RGB24 to YUV. -#elif (defined(HAS_RGB24TOYROW_MMI) || defined(HAS_RGB24TOYROW_MSA)) -#if defined(HAS_RGB24TOYROW_MMI) && defined(HAS_RGB24TOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RGB24ToUVRow = RGB24ToUVRow_Any_MMI; - RGB24ToYRow = RGB24ToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RGB24ToYRow = RGB24ToYRow_MMI; - if (IS_ALIGNED(width, 16)) { - RGB24ToUVRow = RGB24ToUVRow_MMI; - } + RGB24ToUVRow = RGB24ToUVRow_NEON; } } #endif @@ -1802,16 +2009,30 @@ int RGB24ToI420(const uint8_t* src_rgb24, } } #endif -// Other platforms do intermediate conversion from RGB24 to ARGB. -#else -#if defined(HAS_RGB24TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - RGB24ToARGBRow = RGB24ToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - RGB24ToARGBRow = RGB24ToARGBRow_NEON; +#if defined(HAS_RGB24TOYROW_LSX) && defined(HAS_RGB24TOUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + RGB24ToUVRow = RGB24ToUVRow_Any_LSX; + RGB24ToYRow = RGB24ToYRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + RGB24ToYRow = RGB24ToYRow_LSX; + RGB24ToUVRow = RGB24ToUVRow_LSX; } } #endif +#if defined(HAS_RGB24TOYROW_LASX) && defined(HAS_RGB24TOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + RGB24ToUVRow = RGB24ToUVRow_Any_LASX; + RGB24ToYRow = RGB24ToYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + RGB24ToYRow = RGB24ToYRow_LASX; + RGB24ToUVRow = RGB24ToUVRow_LASX; + } + } +#endif + +// Other platforms do intermediate conversion from RGB24 to ARGB. +#else // HAS_RGB24TOYROW + #if defined(HAS_RGB24TOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { RGB24ToARGBRow = RGB24ToARGBRow_Any_SSSE3; @@ -1820,60 +2041,58 @@ int RGB24ToI420(const uint8_t* src_rgb24, } } #endif -#if defined(HAS_ARGBTOYROW_NEON) && defined(HAS_ARGBTOUVROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToUVRow = ARGBToUVRow_Any_NEON; - ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_NEON; - } - } - } -#endif -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } #endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; + } + } +#endif +#endif // HAS_RGB24TOYROW { -#if !(defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \ - defined(HAS_RGB24TOYROW_MMI)) +#if !defined(HAS_RGB24TOYROW) // Allocate 2 rows of ARGB. - const int kRowSize = (width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); #endif for (y = 0; y < height - 1; y += 2) { -#if (defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \ - defined(HAS_RGB24TOYROW_MMI)) +#if defined(HAS_RGB24TOYROW) RGB24ToUVRow(src_rgb24, src_stride_rgb24, dst_u, dst_v, width); RGB24ToYRow(src_rgb24, dst_y, width); RGB24ToYRow(src_rgb24 + src_stride_rgb24, dst_y + dst_stride_y, width); #else RGB24ToARGBRow(src_rgb24, row, width); - RGB24ToARGBRow(src_rgb24 + src_stride_rgb24, row + kRowSize, width); - ARGBToUVRow(row, kRowSize, dst_u, dst_v, width); + RGB24ToARGBRow(src_rgb24 + src_stride_rgb24, row + row_size, width); + ARGBToUVRow(row, row_size, dst_u, dst_v, width); ARGBToYRow(row, dst_y, width); - ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width); + ARGBToYRow(row + row_size, dst_y + dst_stride_y, width); #endif src_rgb24 += src_stride_rgb24 * 2; dst_y += dst_stride_y * 2; @@ -1881,8 +2100,7 @@ int RGB24ToI420(const uint8_t* src_rgb24, dst_v += dst_stride_v; } if (height & 1) { -#if (defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \ - defined(HAS_RGB24TOYROW_MMI)) +#if defined(HAS_RGB24TOYROW) RGB24ToUVRow(src_rgb24, 0, dst_u, dst_v, width); RGB24ToYRow(src_rgb24, dst_y, width); #else @@ -1891,15 +2109,19 @@ int RGB24ToI420(const uint8_t* src_rgb24, ARGBToYRow(row, dst_y, width); #endif } -#if !(defined(HAS_RGB24TOYROW_NEON) || defined(HAS_RGB24TOYROW_MSA) || \ - defined(HAS_RGB24TOYROW_MMI)) +#if !defined(HAS_RGB24TOYROW) free_aligned_buffer_64(row); #endif } return 0; } +#undef HAS_RGB24TOYROW + +// Enabled if 1 pass is available +#if defined(HAS_RGB24TOYJROW_NEON) || defined(HAS_RGB24TOYJROW_MSA) +#define HAS_RGB24TOYJROW +#endif -// TODO(fbarchard): Use Matrix version to implement I420 and J420. // Convert RGB24 to J420. LIBYUV_API int RGB24ToJ420(const uint8_t* src_rgb24, @@ -1913,8 +2135,7 @@ int RGB24ToJ420(const uint8_t* src_rgb24, int width, int height) { int y; -#if (defined(HAS_RGB24TOYJROW_NEON) && defined(HAS_RGB24TOUVJROW_NEON)) || \ - defined(HAS_RGB24TOYJROW_MSA) || defined(HAS_RGB24TOYJROW_MMI) +#if defined(HAS_RGB24TOYJROW) void (*RGB24ToUVJRow)(const uint8_t* src_rgb24, int src_stride_rgb24, uint8_t* dst_u, uint8_t* dst_v, int width) = RGB24ToUVJRow_C; @@ -1939,29 +2160,16 @@ int RGB24ToJ420(const uint8_t* src_rgb24, src_stride_rgb24 = -src_stride_rgb24; } +#if defined(HAS_RGB24TOYJROW) + // Neon version does direct RGB24 to YUV. #if defined(HAS_RGB24TOYJROW_NEON) && defined(HAS_RGB24TOUVJROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { RGB24ToUVJRow = RGB24ToUVJRow_Any_NEON; RGB24ToYJRow = RGB24ToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { RGB24ToYJRow = RGB24ToYJRow_NEON; - if (IS_ALIGNED(width, 16)) { - RGB24ToUVJRow = RGB24ToUVJRow_NEON; - } - } - } -// MMI and MSA version does direct RGB24 to YUV. -#elif (defined(HAS_RGB24TOYJROW_MMI) || defined(HAS_RGB24TOYJROW_MSA)) -#if defined(HAS_RGB24TOYJROW_MMI) && defined(HAS_RGB24TOUVJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RGB24ToUVJRow = RGB24ToUVJRow_Any_MMI; - RGB24ToYJRow = RGB24ToYJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RGB24ToYJRow = RGB24ToYJRow_MMI; - if (IS_ALIGNED(width, 16)) { - RGB24ToUVJRow = RGB24ToUVJRow_MMI; - } + RGB24ToUVJRow = RGB24ToUVJRow_NEON; } } #endif @@ -1975,15 +2183,10 @@ int RGB24ToJ420(const uint8_t* src_rgb24, } } #endif -#else -#if defined(HAS_RGB24TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - RGB24ToARGBRow = RGB24ToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - RGB24ToARGBRow = RGB24ToARGBRow_NEON; - } - } -#endif + +// Other platforms do intermediate conversion from RGB24 to ARGB. +#else // HAS_RGB24TOYJROW + #if defined(HAS_RGB24TOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { RGB24ToARGBRow = RGB24ToARGBRow_Any_SSSE3; @@ -1992,60 +2195,58 @@ int RGB24ToJ420(const uint8_t* src_rgb24, } } #endif -#if defined(HAS_ARGBTOYJROW_NEON) && defined(HAS_ARGBTOUVJROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToUVJRow = ARGBToUVJRow_Any_NEON; - ARGBToYJRow = ARGBToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - ARGBToYJRow = ARGBToYJRow_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUVJRow = ARGBToUVJRow_NEON; - } - } - } -#endif -#if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3) +#if defined(HAS_ARGBTOYJROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; ARGBToYJRow = ARGBToYJRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVJRow = ARGBToUVJRow_SSSE3; ARGBToYJRow = ARGBToYJRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYJROW_AVX2) && defined(HAS_ARGBTOUVJROW_AVX2) +#if defined(HAS_ARGBTOYJROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVJRow = ARGBToUVJRow_Any_AVX2; ARGBToYJRow = ARGBToYJRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVJRow = ARGBToUVJRow_AVX2; ARGBToYJRow = ARGBToYJRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVJRow = ARGBToUVJRow_SSSE3; + } + } #endif +#if defined(HAS_ARGBTOUVJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVJRow = ARGBToUVJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVJRow = ARGBToUVJRow_AVX2; + } + } +#endif +#endif // HAS_RGB24TOYJROW { -#if !((defined(HAS_RGB24TOYJROW_NEON) && defined(HAS_RGB24TOUVJROW_NEON)) || \ - defined(HAS_RGB24TOYJROW_MSA) || defined(HAS_RGB24TOYJROW_MMI)) +#if !defined(HAS_RGB24TOYJROW) // Allocate 2 rows of ARGB. - const int kRowSize = (width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); #endif for (y = 0; y < height - 1; y += 2) { -#if ((defined(HAS_RGB24TOYJROW_NEON) && defined(HAS_RGB24TOUVJROW_NEON)) || \ - defined(HAS_RGB24TOYJROW_MSA) || defined(HAS_RGB24TOYJROW_MMI)) +#if defined(HAS_RGB24TOYJROW) RGB24ToUVJRow(src_rgb24, src_stride_rgb24, dst_u, dst_v, width); RGB24ToYJRow(src_rgb24, dst_y, width); RGB24ToYJRow(src_rgb24 + src_stride_rgb24, dst_y + dst_stride_y, width); #else RGB24ToARGBRow(src_rgb24, row, width); - RGB24ToARGBRow(src_rgb24 + src_stride_rgb24, row + kRowSize, width); - ARGBToUVJRow(row, kRowSize, dst_u, dst_v, width); + RGB24ToARGBRow(src_rgb24 + src_stride_rgb24, row + row_size, width); + ARGBToUVJRow(row, row_size, dst_u, dst_v, width); ARGBToYJRow(row, dst_y, width); - ARGBToYJRow(row + kRowSize, dst_y + dst_stride_y, width); + ARGBToYJRow(row + row_size, dst_y + dst_stride_y, width); #endif src_rgb24 += src_stride_rgb24 * 2; dst_y += dst_stride_y * 2; @@ -2053,8 +2254,7 @@ int RGB24ToJ420(const uint8_t* src_rgb24, dst_v += dst_stride_v; } if (height & 1) { -#if ((defined(HAS_RGB24TOYJROW_NEON) && defined(HAS_RGB24TOUVJROW_NEON)) || \ - defined(HAS_RGB24TOYJROW_MSA) || defined(HAS_RGB24TOYJROW_MMI)) +#if defined(HAS_RGB24TOYJROW) RGB24ToUVJRow(src_rgb24, 0, dst_u, dst_v, width); RGB24ToYJRow(src_rgb24, dst_y, width); #else @@ -2063,13 +2263,19 @@ int RGB24ToJ420(const uint8_t* src_rgb24, ARGBToYJRow(row, dst_y, width); #endif } -#if !((defined(HAS_RGB24TOYJROW_NEON) && defined(HAS_RGB24TOUVJROW_NEON)) || \ - defined(HAS_RGB24TOYJROW_MSA) || defined(HAS_RGB24TOYJROW_MMI)) +#if !defined(HAS_RGB24TOYJROW) free_aligned_buffer_64(row); #endif } return 0; } +#undef HAS_RGB24TOYJROW + +// Enabled if 1 pass is available +#if (defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \ + defined(HAS_RAWTOYROW_LSX)) +#define HAS_RAWTOYROW +#endif // Convert RAW to I420. LIBYUV_API @@ -2084,8 +2290,7 @@ int RAWToI420(const uint8_t* src_raw, int width, int height) { int y; -#if (defined(HAS_RAWTOYROW_NEON) && defined(HAS_RAWTOUVROW_NEON)) || \ - defined(HAS_RAWTOYROW_MSA) || defined(HAS_RAWTOYROW_MMI) +#if defined(HAS_RAWTOYROW) void (*RAWToUVRow)(const uint8_t* src_raw, int src_stride_raw, uint8_t* dst_u, uint8_t* dst_v, int width) = RAWToUVRow_C; void (*RAWToYRow)(const uint8_t* src_raw, uint8_t* dst_y, int width) = @@ -2109,29 +2314,16 @@ int RAWToI420(const uint8_t* src_raw, src_stride_raw = -src_stride_raw; } +#if defined(HAS_RAWTOYROW) + // Neon version does direct RAW to YUV. #if defined(HAS_RAWTOYROW_NEON) && defined(HAS_RAWTOUVROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { RAWToUVRow = RAWToUVRow_Any_NEON; RAWToYRow = RAWToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { RAWToYRow = RAWToYRow_NEON; - if (IS_ALIGNED(width, 16)) { - RAWToUVRow = RAWToUVRow_NEON; - } - } - } -// MMI and MSA version does direct RAW to YUV. -#elif (defined(HAS_RAWTOYROW_MMI) || defined(HAS_RAWTOYROW_MSA)) -#if defined(HAS_RAWTOYROW_MMI) && defined(HAS_RAWTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RAWToUVRow = RAWToUVRow_Any_MMI; - RAWToYRow = RAWToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RAWToYRow = RAWToYRow_MMI; - if (IS_ALIGNED(width, 16)) { - RAWToUVRow = RAWToUVRow_MMI; - } + RAWToUVRow = RAWToUVRow_NEON; } } #endif @@ -2145,28 +2337,30 @@ int RAWToI420(const uint8_t* src_raw, } } #endif +#if defined(HAS_RAWTOYROW_LSX) && defined(HAS_RAWTOUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + RAWToUVRow = RAWToUVRow_Any_LSX; + RAWToYRow = RAWToYRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + RAWToYRow = RAWToYRow_LSX; + RAWToUVRow = RAWToUVRow_LSX; + } + } +#endif +#if defined(HAS_RAWTOYROW_LASX) && defined(HAS_RAWTOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + RAWToUVRow = RAWToUVRow_Any_LASX; + RAWToYRow = RAWToYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + RAWToYRow = RAWToYRow_LASX; + RAWToUVRow = RAWToUVRow_LASX; + } + } +#endif + // Other platforms do intermediate conversion from RAW to ARGB. -#else -#if defined(HAS_RAWTOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - RAWToARGBRow = RAWToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - RAWToARGBRow = RAWToARGBRow_NEON; - } - } -#endif -#if defined(HAS_ARGBTOYROW_NEON) && defined(HAS_ARGBTOUVROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToUVRow = ARGBToUVRow_Any_NEON; - ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_NEON; - } - } - } -#endif +#else // HAS_RAWTOYROW + #if defined(HAS_RAWTOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { RAWToARGBRow = RAWToARGBRow_Any_SSSE3; @@ -2175,48 +2369,58 @@ int RAWToI420(const uint8_t* src_raw, } } #endif -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } #endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; + } + } +#endif +#endif // HAS_RAWTOYROW { -#if !(defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \ - defined(HAS_RAWTOYROW_MMI)) +#if !defined(HAS_RAWTOYROW) // Allocate 2 rows of ARGB. - const int kRowSize = (width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); #endif for (y = 0; y < height - 1; y += 2) { -#if (defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \ - defined(HAS_RAWTOYROW_MMI)) +#if defined(HAS_RAWTOYROW) RAWToUVRow(src_raw, src_stride_raw, dst_u, dst_v, width); RAWToYRow(src_raw, dst_y, width); RAWToYRow(src_raw + src_stride_raw, dst_y + dst_stride_y, width); #else RAWToARGBRow(src_raw, row, width); - RAWToARGBRow(src_raw + src_stride_raw, row + kRowSize, width); - ARGBToUVRow(row, kRowSize, dst_u, dst_v, width); + RAWToARGBRow(src_raw + src_stride_raw, row + row_size, width); + ARGBToUVRow(row, row_size, dst_u, dst_v, width); ARGBToYRow(row, dst_y, width); - ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width); + ARGBToYRow(row + row_size, dst_y + dst_stride_y, width); #endif src_raw += src_stride_raw * 2; dst_y += dst_stride_y * 2; @@ -2224,8 +2428,7 @@ int RAWToI420(const uint8_t* src_raw, dst_v += dst_stride_v; } if (height & 1) { -#if (defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \ - defined(HAS_RAWTOYROW_MMI)) +#if defined(HAS_RAWTOYROW) RAWToUVRow(src_raw, 0, dst_u, dst_v, width); RAWToYRow(src_raw, dst_y, width); #else @@ -2234,15 +2437,19 @@ int RAWToI420(const uint8_t* src_raw, ARGBToYRow(row, dst_y, width); #endif } -#if !(defined(HAS_RAWTOYROW_NEON) || defined(HAS_RAWTOYROW_MSA) || \ - defined(HAS_RAWTOYROW_MMI)) +#if !defined(HAS_RAWTOYROW) free_aligned_buffer_64(row); #endif } return 0; } +#undef HAS_RAWTOYROW + +// Enabled if 1 pass is available +#if defined(HAS_RAWTOYJROW_NEON) || defined(HAS_RAWTOYJROW_MSA) +#define HAS_RAWTOYJROW +#endif -// TODO(fbarchard): Use Matrix version to implement I420 and J420. // Convert RAW to J420. LIBYUV_API int RAWToJ420(const uint8_t* src_raw, @@ -2256,8 +2463,7 @@ int RAWToJ420(const uint8_t* src_raw, int width, int height) { int y; -#if (defined(HAS_RAWTOYJROW_NEON) && defined(HAS_RAWTOUVJROW_NEON)) || \ - defined(HAS_RAWTOYJROW_MSA) || defined(HAS_RAWTOYJROW_MMI) +#if defined(HAS_RAWTOYJROW) void (*RAWToUVJRow)(const uint8_t* src_raw, int src_stride_raw, uint8_t* dst_u, uint8_t* dst_v, int width) = RAWToUVJRow_C; @@ -2282,29 +2488,16 @@ int RAWToJ420(const uint8_t* src_raw, src_stride_raw = -src_stride_raw; } +#if defined(HAS_RAWTOYJROW) + // Neon version does direct RAW to YUV. #if defined(HAS_RAWTOYJROW_NEON) && defined(HAS_RAWTOUVJROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { RAWToUVJRow = RAWToUVJRow_Any_NEON; RAWToYJRow = RAWToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { RAWToYJRow = RAWToYJRow_NEON; - if (IS_ALIGNED(width, 16)) { - RAWToUVJRow = RAWToUVJRow_NEON; - } - } - } -// MMI and MSA version does direct RAW to YUV. -#elif (defined(HAS_RAWTOYJROW_MMI) || defined(HAS_RAWTOYJROW_MSA)) -#if defined(HAS_RAWTOYJROW_MMI) && defined(HAS_RAWTOUVJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RAWToUVJRow = RAWToUVJRow_Any_MMI; - RAWToYJRow = RAWToYJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RAWToYJRow = RAWToYJRow_MMI; - if (IS_ALIGNED(width, 16)) { - RAWToUVJRow = RAWToUVJRow_MMI; - } + RAWToUVJRow = RAWToUVJRow_NEON; } } #endif @@ -2318,27 +2511,10 @@ int RAWToJ420(const uint8_t* src_raw, } } #endif -#else -#if defined(HAS_RAWTOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - RAWToARGBRow = RAWToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - RAWToARGBRow = RAWToARGBRow_NEON; - } - } -#endif -#if defined(HAS_ARGBTOYJROW_NEON) && defined(HAS_ARGBTOUVJROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToUVJRow = ARGBToUVJRow_Any_NEON; - ARGBToYJRow = ARGBToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - ARGBToYJRow = ARGBToYJRow_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUVJRow = ARGBToUVJRow_NEON; - } - } - } -#endif + +// Other platforms do intermediate conversion from RAW to ARGB. +#else // HAS_RAWTOYJROW + #if defined(HAS_RAWTOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { RAWToARGBRow = RAWToARGBRow_Any_SSSE3; @@ -2347,48 +2523,58 @@ int RAWToJ420(const uint8_t* src_raw, } } #endif -#if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3) +#if defined(HAS_ARGBTOYJROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; ARGBToYJRow = ARGBToYJRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVJRow = ARGBToUVJRow_SSSE3; ARGBToYJRow = ARGBToYJRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYJROW_AVX2) && defined(HAS_ARGBTOUVJROW_AVX2) +#if defined(HAS_ARGBTOYJROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVJRow = ARGBToUVJRow_Any_AVX2; ARGBToYJRow = ARGBToYJRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVJRow = ARGBToUVJRow_AVX2; ARGBToYJRow = ARGBToYJRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVJRow = ARGBToUVJRow_SSSE3; + } + } #endif +#if defined(HAS_ARGBTOUVJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVJRow = ARGBToUVJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVJRow = ARGBToUVJRow_AVX2; + } + } +#endif +#endif // HAS_RAWTOYJROW { -#if !((defined(HAS_RAWTOYJROW_NEON) && defined(HAS_RAWTOUVJROW_NEON)) || \ - defined(HAS_RAWTOYJROW_MSA) || defined(HAS_RAWTOYJROW_MMI)) +#if !defined(HAS_RAWTOYJROW) // Allocate 2 rows of ARGB. - const int kRowSize = (width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); #endif for (y = 0; y < height - 1; y += 2) { -#if ((defined(HAS_RAWTOYJROW_NEON) && defined(HAS_RAWTOUVJROW_NEON)) || \ - defined(HAS_RAWTOYJROW_MSA) || defined(HAS_RAWTOYJROW_MMI)) +#if defined(HAS_RAWTOYJROW) RAWToUVJRow(src_raw, src_stride_raw, dst_u, dst_v, width); RAWToYJRow(src_raw, dst_y, width); RAWToYJRow(src_raw + src_stride_raw, dst_y + dst_stride_y, width); #else RAWToARGBRow(src_raw, row, width); - RAWToARGBRow(src_raw + src_stride_raw, row + kRowSize, width); - ARGBToUVJRow(row, kRowSize, dst_u, dst_v, width); + RAWToARGBRow(src_raw + src_stride_raw, row + row_size, width); + ARGBToUVJRow(row, row_size, dst_u, dst_v, width); ARGBToYJRow(row, dst_y, width); - ARGBToYJRow(row + kRowSize, dst_y + dst_stride_y, width); + ARGBToYJRow(row + row_size, dst_y + dst_stride_y, width); #endif src_raw += src_stride_raw * 2; dst_y += dst_stride_y * 2; @@ -2396,8 +2582,7 @@ int RAWToJ420(const uint8_t* src_raw, dst_v += dst_stride_v; } if (height & 1) { -#if ((defined(HAS_RAWTOYJROW_NEON) && defined(HAS_RAWTOUVJROW_NEON)) || \ - defined(HAS_RAWTOYJROW_MSA) || defined(HAS_RAWTOYJROW_MMI)) +#if defined(HAS_RAWTOYJROW) RAWToUVJRow(src_raw, 0, dst_u, dst_v, width); RAWToYJRow(src_raw, dst_y, width); #else @@ -2406,13 +2591,13 @@ int RAWToJ420(const uint8_t* src_raw, ARGBToYJRow(row, dst_y, width); #endif } -#if !((defined(HAS_RAWTOYJROW_NEON) && defined(HAS_RAWTOUVJROW_NEON)) || \ - defined(HAS_RAWTOYJROW_MSA) || defined(HAS_RAWTOYJROW_MMI)) +#if !defined(HAS_RAWTOYJROW) free_aligned_buffer_64(row); #endif } return 0; } +#undef HAS_RAWTOYJROW // Convert RGB565 to I420. LIBYUV_API @@ -2428,7 +2613,7 @@ int RGB565ToI420(const uint8_t* src_rgb565, int height) { int y; #if (defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \ - defined(HAS_RGB565TOYROW_MMI)) + defined(HAS_RGB565TOYROW_LSX) || defined(HAS_RGB565TOYROW_LASX)) void (*RGB565ToUVRow)(const uint8_t* src_rgb565, int src_stride_rgb565, uint8_t* dst_u, uint8_t* dst_v, int width) = RGB565ToUVRow_C; @@ -2465,20 +2650,9 @@ int RGB565ToI420(const uint8_t* src_rgb565, } } } -// MMI and MSA version does direct RGB565 to YUV. -#elif (defined(HAS_RGB565TOYROW_MMI) || defined(HAS_RGB565TOYROW_MSA)) -#if defined(HAS_RGB565TOYROW_MMI) && defined(HAS_RGB565TOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RGB565ToUVRow = RGB565ToUVRow_Any_MMI; - RGB565ToYRow = RGB565ToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RGB565ToYRow = RGB565ToYRow_MMI; - if (IS_ALIGNED(width, 16)) { - RGB565ToUVRow = RGB565ToUVRow_MMI; - } - } - } -#endif +// MSA version does direct RGB565 to YUV. +#elif (defined(HAS_RGB565TOYROW_MSA) || defined(HAS_RGB565TOYROW_LSX) || \ + defined(HAS_RGB565TOYROW_LASX)) #if defined(HAS_RGB565TOYROW_MSA) && defined(HAS_RGB565TOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { RGB565ToUVRow = RGB565ToUVRow_Any_MSA; @@ -2489,6 +2663,26 @@ int RGB565ToI420(const uint8_t* src_rgb565, } } #endif +#if defined(HAS_RGB565TOYROW_LSX) && defined(HAS_RGB565TOUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + RGB565ToUVRow = RGB565ToUVRow_Any_LSX; + RGB565ToYRow = RGB565ToYRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + RGB565ToYRow = RGB565ToYRow_LSX; + RGB565ToUVRow = RGB565ToUVRow_LSX; + } + } +#endif +#if defined(HAS_RGB565TOYROW_LASX) && defined(HAS_RGB565TOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + RGB565ToUVRow = RGB565ToUVRow_Any_LASX; + RGB565ToYRow = RGB565ToYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + RGB565ToYRow = RGB565ToYRow_LASX; + RGB565ToUVRow = RGB565ToUVRow_LASX; + } + } +#endif // Other platforms do intermediate conversion from RGB565 to ARGB. #else #if defined(HAS_RGB565TOARGBROW_SSE2) @@ -2507,46 +2701,58 @@ int RGB565ToI420(const uint8_t* src_rgb565, } } #endif -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; + } + } +#endif #endif { #if !(defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \ - defined(HAS_RGB565TOYROW_MMI)) + defined(HAS_RGB565TOYROW_LSX) || defined(HAS_RGB565TOYROW_LASX)) // Allocate 2 rows of ARGB. - const int kRowSize = (width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); #endif for (y = 0; y < height - 1; y += 2) { #if (defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \ - defined(HAS_RGB565TOYROW_MMI)) + defined(HAS_RGB565TOYROW_LSX) || defined(HAS_RGB565TOYROW_LASX)) RGB565ToUVRow(src_rgb565, src_stride_rgb565, dst_u, dst_v, width); RGB565ToYRow(src_rgb565, dst_y, width); RGB565ToYRow(src_rgb565 + src_stride_rgb565, dst_y + dst_stride_y, width); #else RGB565ToARGBRow(src_rgb565, row, width); - RGB565ToARGBRow(src_rgb565 + src_stride_rgb565, row + kRowSize, width); - ARGBToUVRow(row, kRowSize, dst_u, dst_v, width); + RGB565ToARGBRow(src_rgb565 + src_stride_rgb565, row + row_size, width); + ARGBToUVRow(row, row_size, dst_u, dst_v, width); ARGBToYRow(row, dst_y, width); - ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width); + ARGBToYRow(row + row_size, dst_y + dst_stride_y, width); #endif src_rgb565 += src_stride_rgb565 * 2; dst_y += dst_stride_y * 2; @@ -2555,7 +2761,7 @@ int RGB565ToI420(const uint8_t* src_rgb565, } if (height & 1) { #if (defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \ - defined(HAS_RGB565TOYROW_MMI)) + defined(HAS_RGB565TOYROW_LSX) || defined(HAS_RGB565TOYROW_LASX)) RGB565ToUVRow(src_rgb565, 0, dst_u, dst_v, width); RGB565ToYRow(src_rgb565, dst_y, width); #else @@ -2565,7 +2771,7 @@ int RGB565ToI420(const uint8_t* src_rgb565, #endif } #if !(defined(HAS_RGB565TOYROW_NEON) || defined(HAS_RGB565TOYROW_MSA) || \ - defined(HAS_RGB565TOYROW_MMI)) + defined(HAS_RGB565TOYROW_LSX) || defined(HAS_RGB565TOYROW_LASX)) free_aligned_buffer_64(row); #endif } @@ -2586,7 +2792,7 @@ int ARGB1555ToI420(const uint8_t* src_argb1555, int height) { int y; #if (defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \ - defined(HAS_ARGB1555TOYROW_MMI)) + defined(HAS_ARGB1555TOYROW_LSX) || defined(HAS_ARGB1555TOYROW_LASX)) void (*ARGB1555ToUVRow)(const uint8_t* src_argb1555, int src_stride_argb1555, uint8_t* dst_u, uint8_t* dst_v, int width) = ARGB1555ToUVRow_C; @@ -2624,20 +2830,9 @@ int ARGB1555ToI420(const uint8_t* src_argb1555, } } } -// MMI and MSA version does direct ARGB1555 to YUV. -#elif (defined(HAS_ARGB1555TOYROW_MMI) || defined(HAS_ARGB1555TOYROW_MSA)) -#if defined(HAS_ARGB1555TOYROW_MMI) && defined(HAS_ARGB1555TOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGB1555ToUVRow = ARGB1555ToUVRow_Any_MMI; - ARGB1555ToYRow = ARGB1555ToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGB1555ToYRow = ARGB1555ToYRow_MMI; - if (IS_ALIGNED(width, 16)) { - ARGB1555ToUVRow = ARGB1555ToUVRow_MMI; - } - } - } -#endif +// MSA version does direct ARGB1555 to YUV. +#elif (defined(HAS_ARGB1555TOYROW_MSA) || defined(HAS_ARGB1555TOYROW_LSX) || \ + defined(HAS_ARGB1555TOYROW_LASX)) #if defined(HAS_ARGB1555TOYROW_MSA) && defined(HAS_ARGB1555TOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGB1555ToUVRow = ARGB1555ToUVRow_Any_MSA; @@ -2648,6 +2843,26 @@ int ARGB1555ToI420(const uint8_t* src_argb1555, } } #endif +#if defined(HAS_ARGB1555TOYROW_LSX) && defined(HAS_ARGB1555TOUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGB1555ToUVRow = ARGB1555ToUVRow_Any_LSX; + ARGB1555ToYRow = ARGB1555ToYRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ARGB1555ToYRow = ARGB1555ToYRow_LSX; + ARGB1555ToUVRow = ARGB1555ToUVRow_LSX; + } + } +#endif +#if defined(HAS_ARGB1555TOYROW_LASX) && defined(HAS_ARGB1555TOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGB1555ToUVRow = ARGB1555ToUVRow_Any_LASX; + ARGB1555ToYRow = ARGB1555ToYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGB1555ToYRow = ARGB1555ToYRow_LASX; + ARGB1555ToUVRow = ARGB1555ToUVRow_LASX; + } + } +#endif // Other platforms do intermediate conversion from ARGB1555 to ARGB. #else #if defined(HAS_ARGB1555TOARGBROW_SSE2) @@ -2666,49 +2881,61 @@ int ARGB1555ToI420(const uint8_t* src_argb1555, } } #endif -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; + } + } +#endif #endif { #if !(defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \ - defined(HAS_ARGB1555TOYROW_MMI)) + defined(HAS_ARGB1555TOYROW_LSX) || defined(HAS_ARGB1555TOYROW_LASX)) // Allocate 2 rows of ARGB. - const int kRowSize = (width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); #endif for (y = 0; y < height - 1; y += 2) { #if (defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \ - defined(HAS_ARGB1555TOYROW_MMI)) + defined(HAS_ARGB1555TOYROW_LSX) || defined(HAS_ARGB1555TOYROW_LASX)) ARGB1555ToUVRow(src_argb1555, src_stride_argb1555, dst_u, dst_v, width); ARGB1555ToYRow(src_argb1555, dst_y, width); ARGB1555ToYRow(src_argb1555 + src_stride_argb1555, dst_y + dst_stride_y, width); #else ARGB1555ToARGBRow(src_argb1555, row, width); - ARGB1555ToARGBRow(src_argb1555 + src_stride_argb1555, row + kRowSize, + ARGB1555ToARGBRow(src_argb1555 + src_stride_argb1555, row + row_size, width); - ARGBToUVRow(row, kRowSize, dst_u, dst_v, width); + ARGBToUVRow(row, row_size, dst_u, dst_v, width); ARGBToYRow(row, dst_y, width); - ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width); + ARGBToYRow(row + row_size, dst_y + dst_stride_y, width); #endif src_argb1555 += src_stride_argb1555 * 2; dst_y += dst_stride_y * 2; @@ -2717,7 +2944,7 @@ int ARGB1555ToI420(const uint8_t* src_argb1555, } if (height & 1) { #if (defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \ - defined(HAS_ARGB1555TOYROW_MMI)) + defined(HAS_ARGB1555TOYROW_LSX) || defined(HAS_ARGB1555TOYROW_LASX)) ARGB1555ToUVRow(src_argb1555, 0, dst_u, dst_v, width); ARGB1555ToYRow(src_argb1555, dst_y, width); #else @@ -2727,7 +2954,7 @@ int ARGB1555ToI420(const uint8_t* src_argb1555, #endif } #if !(defined(HAS_ARGB1555TOYROW_NEON) || defined(HAS_ARGB1555TOYROW_MSA) || \ - defined(HAS_ARGB1555TOYROW_MMI)) + defined(HAS_ARGB1555TOYROW_LSX) || defined(HAS_ARGB1555TOYROW_LASX)) free_aligned_buffer_64(row); #endif } @@ -2747,7 +2974,7 @@ int ARGB4444ToI420(const uint8_t* src_argb4444, int width, int height) { int y; -#if (defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI)) +#if defined(HAS_ARGB4444TOYROW_NEON) void (*ARGB4444ToUVRow)(const uint8_t* src_argb4444, int src_stride_argb4444, uint8_t* dst_u, uint8_t* dst_v, int width) = ARGB4444ToUVRow_C; @@ -2785,17 +3012,6 @@ int ARGB4444ToI420(const uint8_t* src_argb4444, } } } -#elif defined(HAS_ARGB4444TOYROW_MMI) && defined(HAS_ARGB4444TOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGB4444ToUVRow = ARGB4444ToUVRow_Any_MMI; - ARGB4444ToYRow = ARGB4444ToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGB4444ToYRow = ARGB4444ToYRow_MMI; - if (IS_ALIGNED(width, 16)) { - ARGB4444ToUVRow = ARGB4444ToUVRow_MMI; - } - } - } // Other platforms do intermediate conversion from ARGB4444 to ARGB. #else #if defined(HAS_ARGB4444TOARGBROW_SSE2) @@ -2822,35 +3038,51 @@ int ARGB4444ToI420(const uint8_t* src_argb4444, } } #endif -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGB4444TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ARGB4444ToARGBRow = ARGB4444ToARGBRow_LSX; + } + } +#endif +#if defined(HAS_ARGB4444TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGB4444ToARGBRow = ARGB4444ToARGBRow_LASX; + } + } +#endif +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif -#if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToUVRow = ARGBToUVRow_Any_MMI; - ARGBToYRow = ARGBToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_MMI; - } +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; } } #endif @@ -2866,28 +3098,38 @@ int ARGB4444ToI420(const uint8_t* src_argb4444, } } #endif +#if defined(HAS_ARGBTOYROW_LASX) && defined(HAS_ARGBTOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + ARGBToUVRow = ARGBToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + ARGBToUVRow = ARGBToUVRow_LASX; + } + } +#endif #endif { -#if !(defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI)) +#if !(defined(HAS_ARGB4444TOYROW_NEON)) // Allocate 2 rows of ARGB. - const int kRowSize = (width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); #endif for (y = 0; y < height - 1; y += 2) { -#if (defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI)) +#if defined(HAS_ARGB4444TOYROW_NEON) ARGB4444ToUVRow(src_argb4444, src_stride_argb4444, dst_u, dst_v, width); ARGB4444ToYRow(src_argb4444, dst_y, width); ARGB4444ToYRow(src_argb4444 + src_stride_argb4444, dst_y + dst_stride_y, width); #else ARGB4444ToARGBRow(src_argb4444, row, width); - ARGB4444ToARGBRow(src_argb4444 + src_stride_argb4444, row + kRowSize, + ARGB4444ToARGBRow(src_argb4444 + src_stride_argb4444, row + row_size, width); - ARGBToUVRow(row, kRowSize, dst_u, dst_v, width); + ARGBToUVRow(row, row_size, dst_u, dst_v, width); ARGBToYRow(row, dst_y, width); - ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width); + ARGBToYRow(row + row_size, dst_y + dst_stride_y, width); #endif src_argb4444 += src_stride_argb4444 * 2; dst_y += dst_stride_y * 2; @@ -2895,7 +3137,7 @@ int ARGB4444ToI420(const uint8_t* src_argb4444, dst_v += dst_stride_v; } if (height & 1) { -#if (defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI)) +#if defined(HAS_ARGB4444TOYROW_NEON) ARGB4444ToUVRow(src_argb4444, 0, dst_u, dst_v, width); ARGB4444ToYRow(src_argb4444, dst_y, width); #else @@ -2904,7 +3146,7 @@ int ARGB4444ToI420(const uint8_t* src_argb4444, ARGBToYRow(row, dst_y, width); #endif } -#if !(defined(HAS_ARGB4444TOYROW_NEON) || defined(HAS_ARGB4444TOYROW_MMI)) +#if !(defined(HAS_ARGB4444TOYROW_NEON)) free_aligned_buffer_64(row); #endif } @@ -2955,19 +3197,11 @@ int RGB24ToJ400(const uint8_t* src_rgb24, #if defined(HAS_RGB24TOYJROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { RGB24ToYJRow = RGB24ToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { RGB24ToYJRow = RGB24ToYJRow_NEON; } } #endif -#if defined(HAS_RGB24TOYJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RGB24ToYJRow = RGB24ToYJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RGB24ToYJRow = RGB24ToYJRow_MMI; - } - } -#endif #if defined(HAS_RGB24TOYJROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { RGB24ToYJRow = RGB24ToYJRow_Any_MSA; @@ -2999,6 +3233,7 @@ int RAWToJ400(const uint8_t* src_raw, if (!src_raw || !dst_yj || width <= 0 || height == 0) { return -1; } + if (height < 0) { height = -height; src_raw = src_raw + (height - 1) * src_stride_raw; @@ -3010,6 +3245,7 @@ int RAWToJ400(const uint8_t* src_raw, height = 1; src_stride_raw = dst_stride_yj = 0; } + #if defined(HAS_RAWTOYJROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { RAWToYJRow = RAWToYJRow_Any_SSSE3; @@ -3029,19 +3265,11 @@ int RAWToJ400(const uint8_t* src_raw, #if defined(HAS_RAWTOYJROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { RAWToYJRow = RAWToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { RAWToYJRow = RAWToYJRow_NEON; } } #endif -#if defined(HAS_RAWTOYJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RAWToYJRow = RAWToYJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RAWToYJRow = RAWToYJRow_MMI; - } - } -#endif #if defined(HAS_RAWTOYJROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { RAWToYJRow = RAWToYJRow_Any_MSA; @@ -3059,18 +3287,6 @@ int RAWToJ400(const uint8_t* src_raw, return 0; } -static void SplitPixels(const uint8_t* src_u, - int src_pixel_stride_uv, - uint8_t* dst_u, - int width) { - int i; - for (i = 0; i < width; ++i) { - *dst_u = *src_u; - ++dst_u; - src_u += src_pixel_stride_uv; - } -} - // Convert Android420 to I420. LIBYUV_API int Android420ToI420(const uint8_t* src_y, @@ -3088,58 +3304,10 @@ int Android420ToI420(const uint8_t* src_y, int dst_stride_v, int width, int height) { - int y; - const ptrdiff_t vu_off = src_v - src_u; - int halfwidth = (width + 1) >> 1; - int halfheight = (height + 1) >> 1; - if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - halfheight = (height + 1) >> 1; - src_y = src_y + (height - 1) * src_stride_y; - src_u = src_u + (halfheight - 1) * src_stride_u; - src_v = src_v + (halfheight - 1) * src_stride_v; - src_stride_y = -src_stride_y; - src_stride_u = -src_stride_u; - src_stride_v = -src_stride_v; - } - - if (dst_y) { - CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); - } - - // Copy UV planes as is - I420 - if (src_pixel_stride_uv == 1) { - CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight); - CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight); - return 0; - // Split UV planes - NV21 - } - if (src_pixel_stride_uv == 2 && vu_off == -1 && - src_stride_u == src_stride_v) { - SplitUVPlane(src_v, src_stride_v, dst_v, dst_stride_v, dst_u, dst_stride_u, - halfwidth, halfheight); - return 0; - // Split UV planes - NV12 - } - if (src_pixel_stride_uv == 2 && vu_off == 1 && src_stride_u == src_stride_v) { - SplitUVPlane(src_u, src_stride_u, dst_u, dst_stride_u, dst_v, dst_stride_v, - halfwidth, halfheight); - return 0; - } - - for (y = 0; y < halfheight; ++y) { - SplitPixels(src_u, src_pixel_stride_uv, dst_u, halfwidth); - SplitPixels(src_v, src_pixel_stride_uv, dst_v, halfwidth); - src_u += src_stride_u; - src_v += src_stride_v; - dst_u += dst_stride_u; - dst_v += dst_stride_v; - } - return 0; + return Android420ToI420Rotate(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, src_pixel_stride_uv, dst_y, + dst_stride_y, dst_u, dst_stride_u, dst_v, + dst_stride_v, width, height, kRotate0); } #ifdef __cplusplus diff --git a/third-party/libyuv/third_party/libyuv/source/convert_argb.cc b/third-party/libyuv/third_party/libyuv/source/convert_argb.cc index d8f7b27738..64425c5967 100644 --- a/third-party/libyuv/third_party/libyuv/source/convert_argb.cc +++ b/third-party/libyuv/third_party/libyuv/source/convert_argb.cc @@ -10,6 +10,9 @@ #include "libyuv/convert_argb.h" +#include + +#include "libyuv/convert_from_argb.h" #include "libyuv/cpu_id.h" #ifdef HAVE_JPEG #include "libyuv/mjpeg_decoder.h" @@ -17,6 +20,7 @@ #include "libyuv/planar_functions.h" // For CopyPlane and ARGBShuffle. #include "libyuv/rotate_argb.h" #include "libyuv/row.h" +#include "libyuv/scale_row.h" // For ScaleRowUp2_Linear and ScaleRowUp2_Bilinear #include "libyuv/video_common.h" #ifdef __cplusplus @@ -65,6 +69,7 @@ int I420ToARGBMatrix(const uint8_t* src_y, const uint8_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I422ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -90,6 +95,15 @@ int I420ToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOARGBROW_AVX512BW) + if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == + (kCpuHasAVX512BW | kCpuHasAVX512VL)) { + I422ToARGBRow = I422ToARGBRow_Any_AVX512BW; + if (IS_ALIGNED(width, 32)) { + I422ToARGBRow = I422ToARGBRow_AVX512BW; + } + } +#endif #if defined(HAS_I422TOARGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { I422ToARGBRow = I422ToARGBRow_Any_NEON; @@ -98,14 +112,6 @@ int I420ToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToARGBRow = I422ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToARGBRow = I422ToARGBRow_MMI; - } - } -#endif #if defined(HAS_I422TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToARGBRow = I422ToARGBRow_Any_MSA; @@ -114,6 +120,14 @@ int I420ToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToARGBRow = I422ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToARGBRow = I422ToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToARGBRow(src_y, src_u, src_v, dst_argb, yuvconstants, width); @@ -289,6 +303,7 @@ int I422ToARGBMatrix(const uint8_t* src_y, const uint8_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I422ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -321,6 +336,15 @@ int I422ToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOARGBROW_AVX512BW) + if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == + (kCpuHasAVX512BW | kCpuHasAVX512VL)) { + I422ToARGBRow = I422ToARGBRow_Any_AVX512BW; + if (IS_ALIGNED(width, 32)) { + I422ToARGBRow = I422ToARGBRow_AVX512BW; + } + } +#endif #if defined(HAS_I422TOARGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { I422ToARGBRow = I422ToARGBRow_Any_NEON; @@ -329,14 +353,6 @@ int I422ToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToARGBRow = I422ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToARGBRow = I422ToARGBRow_MMI; - } - } -#endif #if defined(HAS_I422TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToARGBRow = I422ToARGBRow_Any_MSA; @@ -345,6 +361,14 @@ int I422ToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToARGBRow = I422ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToARGBRow = I422ToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToARGBRow(src_y, src_u, src_v, dst_argb, yuvconstants, width); @@ -518,6 +542,7 @@ int I444ToARGBMatrix(const uint8_t* src_y, const uint8_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I444ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -558,14 +583,6 @@ int I444ToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I444TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I444ToARGBRow = I444ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I444ToARGBRow = I444ToARGBRow_MMI; - } - } -#endif #if defined(HAS_I444TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I444ToARGBRow = I444ToARGBRow_Any_MSA; @@ -574,6 +591,14 @@ int I444ToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I444TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + I444ToARGBRow = I444ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + I444ToARGBRow = I444ToARGBRow_LSX; + } + } +#endif for (y = 0; y < height; ++y) { I444ToARGBRow(src_y, src_u, src_v, dst_argb, yuvconstants, width); @@ -729,6 +754,128 @@ int U444ToABGR(const uint8_t* src_y, width, height); } +// Convert I444 to RGB24 with matrix. +LIBYUV_API +int I444ToRGB24Matrix(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I444ToRGB24Row)(const uint8_t* y_buf, const uint8_t* u_buf, + const uint8_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I444ToRGB24Row_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_rgb24 || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_rgb24 = dst_rgb24 + (height - 1) * dst_stride_rgb24; + dst_stride_rgb24 = -dst_stride_rgb24; + } + // Coalesce rows. + if (src_stride_y == width && src_stride_u == width && src_stride_v == width && + dst_stride_rgb24 == width * 3) { + width *= height; + height = 1; + src_stride_y = src_stride_u = src_stride_v = dst_stride_rgb24 = 0; + } +#if defined(HAS_I444TORGB24ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I444ToRGB24Row = I444ToRGB24Row_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + I444ToRGB24Row = I444ToRGB24Row_SSSE3; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I444ToRGB24Row = I444ToRGB24Row_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + I444ToRGB24Row = I444ToRGB24Row_AVX2; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I444ToRGB24Row = I444ToRGB24Row_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I444ToRGB24Row = I444ToRGB24Row_NEON; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + I444ToRGB24Row = I444ToRGB24Row_Any_MSA; + if (IS_ALIGNED(width, 8)) { + I444ToRGB24Row = I444ToRGB24Row_MSA; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + I444ToRGB24Row = I444ToRGB24Row_Any_LSX; + if (IS_ALIGNED(width, 16)) { + I444ToRGB24Row = I444ToRGB24Row_LSX; + } + } +#endif + + for (y = 0; y < height; ++y) { + I444ToRGB24Row(src_y, src_u, src_v, dst_rgb24, yuvconstants, width); + dst_rgb24 += dst_stride_rgb24; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + return 0; +} + +// Convert I444 to RGB24. +LIBYUV_API +int I444ToRGB24(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + int width, + int height) { + return I444ToRGB24Matrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_rgb24, dst_stride_rgb24, + &kYuvI601Constants, width, height); +} + +// Convert I444 to RAW. +LIBYUV_API +int I444ToRAW(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_raw, + int dst_stride_raw, + int width, + int height) { + return I444ToRGB24Matrix(src_y, src_stride_y, src_v, + src_stride_v, // Swap U and V + src_u, src_stride_u, dst_raw, dst_stride_raw, + &kYvuI601Constants, // Use Yvu matrix + width, height); +} + // Convert 10 bit YUV to ARGB with matrix. // TODO(fbarchard): Consider passing scale multiplier to I210ToARGB to // multiply 10 bit yuv into high bits to allow any number of bits. @@ -749,6 +896,7 @@ int I010ToAR30Matrix(const uint16_t* src_y, const uint16_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I210ToAR30Row_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_ar30 || width <= 0 || height == 0) { return -1; } @@ -908,6 +1056,7 @@ int I012ToAR30Matrix(const uint16_t* src_y, const uint16_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I212ToAR30Row_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_ar30 || width <= 0 || height == 0) { return -1; } @@ -965,6 +1114,7 @@ int I210ToAR30Matrix(const uint16_t* src_y, const uint16_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I210ToAR30Row_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_ar30 || width <= 0 || height == 0) { return -1; } @@ -1119,6 +1269,7 @@ int I410ToAR30Matrix(const uint16_t* src_y, const uint16_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I410ToAR30Row_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_ar30 || width <= 0 || height == 0) { return -1; } @@ -1172,6 +1323,7 @@ int I010ToARGBMatrix(const uint16_t* src_y, const uint16_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I210ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -1335,6 +1487,7 @@ int I012ToARGBMatrix(const uint16_t* src_y, const uint16_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I212ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -1390,6 +1543,7 @@ int I210ToARGBMatrix(const uint16_t* src_y, const uint16_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I210ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -1550,6 +1704,7 @@ int I410ToARGBMatrix(const uint16_t* src_y, const uint16_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I410ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -1599,6 +1754,7 @@ int P010ToARGBMatrix(const uint16_t* src_y, void (*P210ToARGBRow)( const uint16_t* y_buf, const uint16_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = P210ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_uv || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -1649,6 +1805,7 @@ int P210ToARGBMatrix(const uint16_t* src_y, void (*P210ToARGBRow)( const uint16_t* y_buf, const uint16_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = P210ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_uv || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -1697,6 +1854,7 @@ int P010ToAR30Matrix(const uint16_t* src_y, void (*P210ToAR30Row)( const uint16_t* y_buf, const uint16_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = P210ToAR30Row_C; + assert(yuvconstants); if (!src_y || !src_uv || !dst_ar30 || width <= 0 || height == 0) { return -1; } @@ -1747,6 +1905,7 @@ int P210ToAR30Matrix(const uint16_t* src_y, void (*P210ToAR30Row)( const uint16_t* y_buf, const uint16_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = P210ToAR30Row_C; + assert(yuvconstants); if (!src_y || !src_uv || !dst_ar30 || width <= 0 || height == 0) { return -1; } @@ -1805,7 +1964,9 @@ int I420AlphaToARGBMatrix(const uint8_t* src_y, int width) = I422AlphaToARGBRow_C; void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) = ARGBAttenuateRow_C; - if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -1838,14 +1999,6 @@ int I420AlphaToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I422ALPHATOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422AlphaToARGBRow = I422AlphaToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422AlphaToARGBRow = I422AlphaToARGBRow_MMI; - } - } -#endif #if defined(HAS_I422ALPHATOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422AlphaToARGBRow = I422AlphaToARGBRow_Any_MSA; @@ -1854,6 +2007,14 @@ int I420AlphaToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422ALPHATOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_LASX; + } + } +#endif #if defined(HAS_ARGBATTENUATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3; @@ -1878,14 +2039,6 @@ int I420AlphaToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_ARGBATTENUATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBAttenuateRow = ARGBAttenuateRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBAttenuateRow = ARGBAttenuateRow_MMI; - } - } -#endif #if defined(HAS_ARGBATTENUATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; @@ -1936,7 +2089,9 @@ int I422AlphaToARGBMatrix(const uint8_t* src_y, int width) = I422AlphaToARGBRow_C; void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) = ARGBAttenuateRow_C; - if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -1969,14 +2124,6 @@ int I422AlphaToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I422ALPHATOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422AlphaToARGBRow = I422AlphaToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422AlphaToARGBRow = I422AlphaToARGBRow_MMI; - } - } -#endif #if defined(HAS_I422ALPHATOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422AlphaToARGBRow = I422AlphaToARGBRow_Any_MSA; @@ -1985,6 +2132,14 @@ int I422AlphaToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422ALPHATOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_LASX; + } + } +#endif #if defined(HAS_ARGBATTENUATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3; @@ -2009,14 +2164,6 @@ int I422AlphaToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_ARGBATTENUATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBAttenuateRow = ARGBAttenuateRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBAttenuateRow = ARGBAttenuateRow_MMI; - } - } -#endif #if defined(HAS_ARGBATTENUATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; @@ -2065,7 +2212,9 @@ int I444AlphaToARGBMatrix(const uint8_t* src_y, int width) = I444AlphaToARGBRow_C; void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) = ARGBAttenuateRow_C; - if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -2098,14 +2247,6 @@ int I444AlphaToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I444ALPHATOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I444AlphaToARGBRow = I444AlphaToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I444AlphaToARGBRow = I444AlphaToARGBRow_MMI; - } - } -#endif #if defined(HAS_I444ALPHATOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I444AlphaToARGBRow = I444AlphaToARGBRow_Any_MSA; @@ -2138,14 +2279,6 @@ int I444AlphaToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_ARGBATTENUATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBAttenuateRow = ARGBAttenuateRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBAttenuateRow = ARGBAttenuateRow_MMI; - } - } -#endif #if defined(HAS_ARGBATTENUATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; @@ -2323,7 +2456,9 @@ int I010AlphaToARGBMatrix(const uint16_t* src_y, int width) = I210AlphaToARGBRow_C; void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) = ARGBAttenuateRow_C; - if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -2372,14 +2507,6 @@ int I010AlphaToARGBMatrix(const uint16_t* src_y, } } #endif -#if defined(HAS_ARGBATTENUATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBAttenuateRow = ARGBAttenuateRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBAttenuateRow = ARGBAttenuateRow_MMI; - } - } -#endif #if defined(HAS_ARGBATTENUATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; @@ -2430,7 +2557,9 @@ int I210AlphaToARGBMatrix(const uint16_t* src_y, int width) = I210AlphaToARGBRow_C; void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) = ARGBAttenuateRow_C; - if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -2479,14 +2608,6 @@ int I210AlphaToARGBMatrix(const uint16_t* src_y, } } #endif -#if defined(HAS_ARGBATTENUATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBAttenuateRow = ARGBAttenuateRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBAttenuateRow = ARGBAttenuateRow_MMI; - } - } -#endif #if defined(HAS_ARGBATTENUATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; @@ -2535,7 +2656,9 @@ int I410AlphaToARGBMatrix(const uint16_t* src_y, int width) = I410AlphaToARGBRow_C; void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) = ARGBAttenuateRow_C; - if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -2584,14 +2707,6 @@ int I410AlphaToARGBMatrix(const uint16_t* src_y, } } #endif -#if defined(HAS_ARGBATTENUATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBAttenuateRow = ARGBAttenuateRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBAttenuateRow = ARGBAttenuateRow_MMI; - } - } -#endif #if defined(HAS_ARGBATTENUATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; @@ -2629,6 +2744,7 @@ int I400ToARGBMatrix(const uint8_t* src_y, void (*I400ToARGBRow)(const uint8_t* y_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I400ToARGBRow_C; + assert(yuvconstants); if (!src_y || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -2668,14 +2784,6 @@ int I400ToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I400TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I400ToARGBRow = I400ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - I400ToARGBRow = I400ToARGBRow_MMI; - } - } -#endif #if defined(HAS_I400TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I400ToARGBRow = I400ToARGBRow_Any_MSA; @@ -2684,6 +2792,14 @@ int I400ToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I400TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + I400ToARGBRow = I400ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + I400ToARGBRow = I400ToARGBRow_LSX; + } + } +#endif for (y = 0; y < height; ++y) { I400ToARGBRow(src_y, dst_argb, yuvconstants, width); @@ -2755,14 +2871,6 @@ int J400ToARGB(const uint8_t* src_y, } } #endif -#if defined(HAS_J400TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - J400ToARGBRow = J400ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - J400ToARGBRow = J400ToARGBRow_MMI; - } - } -#endif #if defined(HAS_J400TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { J400ToARGBRow = J400ToARGBRow_Any_MSA; @@ -2770,6 +2878,14 @@ int J400ToARGB(const uint8_t* src_y, J400ToARGBRow = J400ToARGBRow_MSA; } } +#endif +#if defined(HAS_J400TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + J400ToARGBRow = J400ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + J400ToARGBRow = J400ToARGBRow_LSX; + } + } #endif for (y = 0; y < height; ++y) { J400ToARGBRow(src_y, dst_argb, width); @@ -2909,14 +3025,6 @@ int RGB24ToARGB(const uint8_t* src_rgb24, } } #endif -#if defined(HAS_RGB24TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RGB24ToARGBRow = RGB24ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - RGB24ToARGBRow = RGB24ToARGBRow_MMI; - } - } -#endif #if defined(HAS_RGB24TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { RGB24ToARGBRow = RGB24ToARGBRow_Any_MSA; @@ -2925,6 +3033,22 @@ int RGB24ToARGB(const uint8_t* src_rgb24, } } #endif +#if defined(HAS_RGB24TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + RGB24ToARGBRow = RGB24ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + RGB24ToARGBRow = RGB24ToARGBRow_LSX; + } + } +#endif +#if defined(HAS_RGB24TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + RGB24ToARGBRow = RGB24ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + RGB24ToARGBRow = RGB24ToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { RGB24ToARGBRow(src_rgb24, dst_argb, width); @@ -2976,14 +3100,6 @@ int RAWToARGB(const uint8_t* src_raw, } } #endif -#if defined(HAS_RAWTOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RAWToARGBRow = RAWToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - RAWToARGBRow = RAWToARGBRow_MMI; - } - } -#endif #if defined(HAS_RAWTOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { RAWToARGBRow = RAWToARGBRow_Any_MSA; @@ -2992,6 +3108,22 @@ int RAWToARGB(const uint8_t* src_raw, } } #endif +#if defined(HAS_RAWTOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + RAWToARGBRow = RAWToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + RAWToARGBRow = RAWToARGBRow_LSX; + } + } +#endif +#if defined(HAS_RAWTOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + RAWToARGBRow = RAWToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + RAWToARGBRow = RAWToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { RAWToARGBRow(src_raw, dst_argb, width); @@ -3102,14 +3234,6 @@ int RGB565ToARGB(const uint8_t* src_rgb565, } } #endif -#if defined(HAS_RGB565TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RGB565ToARGBRow = RGB565ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - RGB565ToARGBRow = RGB565ToARGBRow_MMI; - } - } -#endif #if defined(HAS_RGB565TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { RGB565ToARGBRow = RGB565ToARGBRow_Any_MSA; @@ -3118,6 +3242,22 @@ int RGB565ToARGB(const uint8_t* src_rgb565, } } #endif +#if defined(HAS_RGB565TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + RGB565ToARGBRow = RGB565ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + RGB565ToARGBRow = RGB565ToARGBRow_LSX; + } + } +#endif +#if defined(HAS_RGB565TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + RGB565ToARGBRow = RGB565ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + RGB565ToARGBRow = RGB565ToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { RGB565ToARGBRow(src_rgb565, dst_argb, width); @@ -3177,14 +3317,6 @@ int ARGB1555ToARGB(const uint8_t* src_argb1555, } } #endif -#if defined(HAS_ARGB1555TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGB1555ToARGBRow = ARGB1555ToARGBRow_MMI; - } - } -#endif #if defined(HAS_ARGB1555TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_MSA; @@ -3193,6 +3325,22 @@ int ARGB1555ToARGB(const uint8_t* src_argb1555, } } #endif +#if defined(HAS_ARGB1555TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ARGB1555ToARGBRow = ARGB1555ToARGBRow_LSX; + } + } +#endif +#if defined(HAS_ARGB1555TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGB1555ToARGBRow = ARGB1555ToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGB1555ToARGBRow(src_argb1555, dst_argb, width); @@ -3252,14 +3400,6 @@ int ARGB4444ToARGB(const uint8_t* src_argb4444, } } #endif -#if defined(HAS_ARGB4444TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGB4444ToARGBRow = ARGB4444ToARGBRow_MMI; - } - } -#endif #if defined(HAS_ARGB4444TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_MSA; @@ -3268,6 +3408,22 @@ int ARGB4444ToARGB(const uint8_t* src_argb4444, } } #endif +#if defined(HAS_ARGB4444TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ARGB4444ToARGBRow = ARGB4444ToARGBRow_LSX; + } + } +#endif +#if defined(HAS_ARGB4444TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGB4444ToARGBRow = ARGB4444ToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGB4444ToARGBRow(src_argb4444, dst_argb, width); @@ -3506,6 +3662,7 @@ int NV12ToARGBMatrix(const uint8_t* src_y, void (*NV12ToARGBRow)( const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = NV12ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_uv || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -3539,14 +3696,6 @@ int NV12ToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_NV12TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - NV12ToARGBRow = NV12ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - NV12ToARGBRow = NV12ToARGBRow_MMI; - } - } -#endif #if defined(HAS_NV12TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { NV12ToARGBRow = NV12ToARGBRow_Any_MSA; @@ -3555,6 +3704,22 @@ int NV12ToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_NV12TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + NV12ToARGBRow = NV12ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 8)) { + NV12ToARGBRow = NV12ToARGBRow_LSX; + } + } +#endif +#if defined(HAS_NV12TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + NV12ToARGBRow = NV12ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + NV12ToARGBRow = NV12ToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { NV12ToARGBRow(src_y, src_uv, dst_argb, yuvconstants, width); @@ -3582,6 +3747,7 @@ int NV21ToARGBMatrix(const uint8_t* src_y, void (*NV21ToARGBRow)( const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = NV21ToARGBRow_C; + assert(yuvconstants); if (!src_y || !src_vu || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -3615,14 +3781,6 @@ int NV21ToARGBMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_NV21TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - NV21ToARGBRow = NV21ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - NV21ToARGBRow = NV21ToARGBRow_MMI; - } - } -#endif #if defined(HAS_NV21TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { NV21ToARGBRow = NV21ToARGBRow_Any_MSA; @@ -3631,6 +3789,22 @@ int NV21ToARGBMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_NV21TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + NV21ToARGBRow = NV21ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 8)) { + NV21ToARGBRow = NV21ToARGBRow_LSX; + } + } +#endif +#if defined(HAS_NV21TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + NV21ToARGBRow = NV21ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + NV21ToARGBRow = NV21ToARGBRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { NV21ToARGBRow(src_y, src_vu, dst_argb, yuvconstants, width); @@ -3717,6 +3891,7 @@ int NV12ToRGB24Matrix(const uint8_t* src_y, void (*NV12ToRGB24Row)( const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = NV12ToRGB24Row_C; + assert(yuvconstants); if (!src_y || !src_uv || !dst_rgb24 || width <= 0 || height == 0) { return -1; } @@ -3750,14 +3925,6 @@ int NV12ToRGB24Matrix(const uint8_t* src_y, } } #endif -#if defined(HAS_NV12TORGB24ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - NV12ToRGB24Row = NV12ToRGB24Row_Any_MMI; - if (IS_ALIGNED(width, 8)) { - NV12ToRGB24Row = NV12ToRGB24Row_MMI; - } - } -#endif for (y = 0; y < height; ++y) { NV12ToRGB24Row(src_y, src_uv, dst_rgb24, yuvconstants, width); @@ -3785,6 +3952,7 @@ int NV21ToRGB24Matrix(const uint8_t* src_y, void (*NV21ToRGB24Row)( const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = NV21ToRGB24Row_C; + assert(yuvconstants); if (!src_y || !src_vu || !dst_rgb24 || width <= 0 || height == 0) { return -1; } @@ -3818,14 +3986,6 @@ int NV21ToRGB24Matrix(const uint8_t* src_y, } } #endif -#if defined(HAS_NV21TORGB24ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - NV21ToRGB24Row = NV21ToRGB24Row_Any_MMI; - if (IS_ALIGNED(width, 8)) { - NV21ToRGB24Row = NV21ToRGB24Row_MMI; - } - } -#endif for (y = 0; y < height; ++y) { NV21ToRGB24Row(src_y, src_vu, dst_rgb24, yuvconstants, width); @@ -3925,6 +4085,14 @@ int NV21ToYUV24(const uint8_t* src_y, } } #endif +#if defined(HAS_NV21TOYUV24ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + NV21ToYUV24Row = NV21ToYUV24Row_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + NV21ToYUV24Row = NV21ToYUV24Row_SSSE3; + } + } +#endif #if defined(HAS_NV21TOYUV24ROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { NV21ToYUV24Row = NV21ToYUV24Row_Any_AVX2; @@ -3995,14 +4163,6 @@ int YUY2ToARGB(const uint8_t* src_yuy2, } } #endif -#if defined(HAS_YUY2TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - YUY2ToARGBRow = YUY2ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - YUY2ToARGBRow = YUY2ToARGBRow_MMI; - } - } -#endif #if defined(HAS_YUY2TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { YUY2ToARGBRow = YUY2ToARGBRow_Any_MSA; @@ -4010,6 +4170,14 @@ int YUY2ToARGB(const uint8_t* src_yuy2, YUY2ToARGBRow = YUY2ToARGBRow_MSA; } } +#endif +#if defined(HAS_YUY2TOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + YUY2ToARGBRow = YUY2ToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 8)) { + YUY2ToARGBRow = YUY2ToARGBRow_LSX; + } + } #endif for (y = 0; y < height; ++y) { YUY2ToARGBRow(src_yuy2, dst_argb, &kYuvI601Constants, width); @@ -4070,14 +4238,6 @@ int UYVYToARGB(const uint8_t* src_uyvy, } } #endif -#if defined(HAS_UYVYTOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - UYVYToARGBRow = UYVYToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - UYVYToARGBRow = UYVYToARGBRow_MMI; - } - } -#endif #if defined(HAS_UYVYTOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { UYVYToARGBRow = UYVYToARGBRow_Any_MSA; @@ -4085,6 +4245,14 @@ int UYVYToARGB(const uint8_t* src_uyvy, UYVYToARGBRow = UYVYToARGBRow_MSA; } } +#endif +#if defined(HAS_UYVYTOARGBROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + UYVYToARGBRow = UYVYToARGBRow_Any_LSX; + if (IS_ALIGNED(width, 8)) { + UYVYToARGBRow = UYVYToARGBRow_LSX; + } + } #endif for (y = 0; y < height; ++y) { UYVYToARGBRow(src_uyvy, dst_argb, &kYuvI601Constants, width); @@ -4127,6 +4295,7 @@ int Android420ToARGBMatrix(const uint8_t* src_y, const ptrdiff_t vu_off = src_v - src_u; int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { return -1; } @@ -4227,6 +4396,7 @@ int I422ToRGBAMatrix(const uint8_t* src_y, const uint8_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I422ToRGBARow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_rgba || width <= 0 || height == 0) { return -1; } @@ -4260,14 +4430,6 @@ int I422ToRGBAMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TORGBAROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToRGBARow = I422ToRGBARow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToRGBARow = I422ToRGBARow_MMI; - } - } -#endif #if defined(HAS_I422TORGBAROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToRGBARow = I422ToRGBARow_Any_MSA; @@ -4276,6 +4438,14 @@ int I422ToRGBAMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TORGBAROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToRGBARow = I422ToRGBARow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToRGBARow = I422ToRGBARow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToRGBARow(src_y, src_u, src_v, dst_rgba, yuvconstants, width); @@ -4338,6 +4508,7 @@ int NV12ToRGB565Matrix(const uint8_t* src_y, void (*NV12ToRGB565Row)( const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = NV12ToRGB565Row_C; + assert(yuvconstants); if (!src_y || !src_uv || !dst_rgb565 || width <= 0 || height == 0) { return -1; } @@ -4371,14 +4542,6 @@ int NV12ToRGB565Matrix(const uint8_t* src_y, } } #endif -#if defined(HAS_NV12TORGB565ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - NV12ToRGB565Row = NV12ToRGB565Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - NV12ToRGB565Row = NV12ToRGB565Row_MMI; - } - } -#endif #if defined(HAS_NV12TORGB565ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { NV12ToRGB565Row = NV12ToRGB565Row_Any_MSA; @@ -4387,6 +4550,22 @@ int NV12ToRGB565Matrix(const uint8_t* src_y, } } #endif +#if defined(HAS_NV12TORGB565ROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + NV12ToRGB565Row = NV12ToRGB565Row_Any_LSX; + if (IS_ALIGNED(width, 8)) { + NV12ToRGB565Row = NV12ToRGB565Row_LSX; + } + } +#endif +#if defined(HAS_NV12TORGB565ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + NV12ToRGB565Row = NV12ToRGB565Row_Any_LASX; + if (IS_ALIGNED(width, 16)) { + NV12ToRGB565Row = NV12ToRGB565Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { NV12ToRGB565Row(src_y, src_uv, dst_rgb565, yuvconstants, width); @@ -4432,6 +4611,7 @@ int I420ToRGBAMatrix(const uint8_t* src_y, const uint8_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I422ToRGBARow_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_rgba || width <= 0 || height == 0) { return -1; } @@ -4465,14 +4645,6 @@ int I420ToRGBAMatrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TORGBAROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToRGBARow = I422ToRGBARow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToRGBARow = I422ToRGBARow_MMI; - } - } -#endif #if defined(HAS_I422TORGBAROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToRGBARow = I422ToRGBARow_Any_MSA; @@ -4481,6 +4653,14 @@ int I420ToRGBAMatrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TORGBAROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToRGBARow = I422ToRGBARow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToRGBARow = I422ToRGBARow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToRGBARow(src_y, src_u, src_v, dst_rgba, yuvconstants, width); @@ -4548,6 +4728,7 @@ int I420ToRGB24Matrix(const uint8_t* src_y, const uint8_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I422ToRGB24Row_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_rgb24 || width <= 0 || height == 0) { return -1; } @@ -4581,14 +4762,6 @@ int I420ToRGB24Matrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TORGB24ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToRGB24Row = I422ToRGB24Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToRGB24Row = I422ToRGB24Row_MMI; - } - } -#endif #if defined(HAS_I422TORGB24ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToRGB24Row = I422ToRGB24Row_Any_MSA; @@ -4597,6 +4770,14 @@ int I420ToRGB24Matrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TORGB24ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToRGB24Row = I422ToRGB24Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToRGB24Row = I422ToRGB24Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToRGB24Row(src_y, src_u, src_v, dst_rgb24, yuvconstants, width); @@ -4718,6 +4899,121 @@ int H420ToRAW(const uint8_t* src_y, width, height); } +// Convert I422 to RGB24 with matrix. +LIBYUV_API +int I422ToRGB24Matrix(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I422ToRGB24Row)(const uint8_t* y_buf, const uint8_t* u_buf, + const uint8_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I422ToRGB24Row_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_rgb24 || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_rgb24 = dst_rgb24 + (height - 1) * dst_stride_rgb24; + dst_stride_rgb24 = -dst_stride_rgb24; + } +#if defined(HAS_I422TORGB24ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I422ToRGB24Row = I422ToRGB24Row_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + I422ToRGB24Row = I422ToRGB24Row_SSSE3; + } + } +#endif +#if defined(HAS_I422TORGB24ROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I422ToRGB24Row = I422ToRGB24Row_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + I422ToRGB24Row = I422ToRGB24Row_AVX2; + } + } +#endif +#if defined(HAS_I422TORGB24ROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I422ToRGB24Row = I422ToRGB24Row_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I422ToRGB24Row = I422ToRGB24Row_NEON; + } + } +#endif +#if defined(HAS_I422TORGB24ROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + I422ToRGB24Row = I422ToRGB24Row_Any_MSA; + if (IS_ALIGNED(width, 16)) { + I422ToRGB24Row = I422ToRGB24Row_MSA; + } + } +#endif +#if defined(HAS_I422TORGB24ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToRGB24Row = I422ToRGB24Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToRGB24Row = I422ToRGB24Row_LASX; + } + } +#endif + + for (y = 0; y < height; ++y) { + I422ToRGB24Row(src_y, src_u, src_v, dst_rgb24, yuvconstants, width); + dst_rgb24 += dst_stride_rgb24; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + return 0; +} + +// Convert I422 to RGB24. +LIBYUV_API +int I422ToRGB24(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + int width, + int height) { + return I422ToRGB24Matrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_rgb24, dst_stride_rgb24, + &kYuvI601Constants, width, height); +} + +// Convert I422 to RAW. +LIBYUV_API +int I422ToRAW(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_raw, + int dst_stride_raw, + int width, + int height) { + return I422ToRGB24Matrix(src_y, src_stride_y, src_v, + src_stride_v, // Swap U and V + src_u, src_stride_u, dst_raw, dst_stride_raw, + &kYvuI601Constants, // Use Yvu matrix + width, height); +} + // Convert I420 to ARGB1555. LIBYUV_API int I420ToARGB1555(const uint8_t* src_y, @@ -4769,14 +5065,6 @@ int I420ToARGB1555(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TOARGB1555ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToARGB1555Row = I422ToARGB1555Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToARGB1555Row = I422ToARGB1555Row_MMI; - } - } -#endif #if defined(HAS_I422TOARGB1555ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToARGB1555Row = I422ToARGB1555Row_Any_MSA; @@ -4785,6 +5073,14 @@ int I420ToARGB1555(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOARGB1555ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToARGB1555Row = I422ToARGB1555Row_Any_LASX; + if (IS_ALIGNED(width, 8)) { + I422ToARGB1555Row = I422ToARGB1555Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToARGB1555Row(src_y, src_u, src_v, dst_argb1555, &kYuvI601Constants, @@ -4850,14 +5146,6 @@ int I420ToARGB4444(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TOARGB4444ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToARGB4444Row = I422ToARGB4444Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToARGB4444Row = I422ToARGB4444Row_MMI; - } - } -#endif #if defined(HAS_I422TOARGB4444ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToARGB4444Row = I422ToARGB4444Row_Any_MSA; @@ -4866,6 +5154,14 @@ int I420ToARGB4444(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOARGB4444ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToARGB4444Row = I422ToARGB4444Row_Any_LASX; + if (IS_ALIGNED(width, 8)) { + I422ToARGB4444Row = I422ToARGB4444Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToARGB4444Row(src_y, src_u, src_v, dst_argb4444, &kYuvI601Constants, @@ -4898,6 +5194,7 @@ int I420ToRGB565Matrix(const uint8_t* src_y, const uint8_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I422ToRGB565Row_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_rgb565 || width <= 0 || height == 0) { return -1; } @@ -4931,14 +5228,6 @@ int I420ToRGB565Matrix(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TORGB565ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToRGB565Row = I422ToRGB565Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToRGB565Row = I422ToRGB565Row_MMI; - } - } -#endif #if defined(HAS_I422TORGB565ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToRGB565Row = I422ToRGB565Row_Any_MSA; @@ -4947,6 +5236,14 @@ int I420ToRGB565Matrix(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TORGB565ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToRGB565Row = I422ToRGB565Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToRGB565Row = I422ToRGB565Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToRGB565Row(src_y, src_u, src_v, dst_rgb565, yuvconstants, width); @@ -5011,23 +5308,25 @@ int H420ToRGB565(const uint8_t* src_y, &kYuvH709Constants, width, height); } -// Convert I422 to RGB565. +// Convert I422 to RGB565 with specified color matrix. LIBYUV_API -int I422ToRGB565(const uint8_t* src_y, - int src_stride_y, - const uint8_t* src_u, - int src_stride_u, - const uint8_t* src_v, - int src_stride_v, - uint8_t* dst_rgb565, - int dst_stride_rgb565, - int width, - int height) { +int I422ToRGB565Matrix(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb565, + int dst_stride_rgb565, + const struct YuvConstants* yuvconstants, + int width, + int height) { int y; void (*I422ToRGB565Row)(const uint8_t* y_buf, const uint8_t* u_buf, const uint8_t* v_buf, uint8_t* rgb_buf, const struct YuvConstants* yuvconstants, int width) = I422ToRGB565Row_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_rgb565 || width <= 0 || height == 0) { return -1; } @@ -5069,9 +5368,17 @@ int I422ToRGB565(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TORGB565ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToRGB565Row = I422ToRGB565Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToRGB565Row = I422ToRGB565Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { - I422ToRGB565Row(src_y, src_u, src_v, dst_rgb565, &kYuvI601Constants, width); + I422ToRGB565Row(src_y, src_u, src_v, dst_rgb565, yuvconstants, width); dst_rgb565 += dst_stride_rgb565; src_y += src_stride_y; src_u += src_stride_u; @@ -5080,6 +5387,23 @@ int I422ToRGB565(const uint8_t* src_y, return 0; } +// Convert I422 to RGB565. +LIBYUV_API +int I422ToRGB565(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb565, + int dst_stride_rgb565, + int width, + int height) { + return I422ToRGB565Matrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_rgb565, dst_stride_rgb565, + &kYuvI601Constants, width, height); +} + // Ordered 8x8 dither for 888 to 565. Values from 0 to 7. static const uint8_t kDither565_4x4[16] = { 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2, @@ -5134,6 +5458,15 @@ int I420ToRGB565Dither(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOARGBROW_AVX512BW) + if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == + (kCpuHasAVX512BW | kCpuHasAVX512VL)) { + I422ToARGBRow = I422ToARGBRow_Any_AVX512BW; + if (IS_ALIGNED(width, 32)) { + I422ToARGBRow = I422ToARGBRow_AVX512BW; + } + } +#endif #if defined(HAS_I422TOARGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { I422ToARGBRow = I422ToARGBRow_Any_NEON; @@ -5142,14 +5475,6 @@ int I420ToRGB565Dither(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToARGBRow = I422ToARGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - I422ToARGBRow = I422ToARGBRow_MMI; - } - } -#endif #if defined(HAS_I422TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToARGBRow = I422ToARGBRow_Any_MSA; @@ -5158,6 +5483,14 @@ int I420ToRGB565Dither(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToARGBRow = I422ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToARGBRow = I422ToARGBRow_LASX; + } + } +#endif #if defined(HAS_ARGBTORGB565DITHERROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2; @@ -5182,14 +5515,6 @@ int I420ToRGB565Dither(const uint8_t* src_y, } } #endif -#if defined(HAS_ARGBTORGB565DITHERROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MMI; - } - } -#endif #if defined(HAS_ARGBTORGB565DITHERROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MSA; @@ -5197,6 +5522,14 @@ int I420ToRGB565Dither(const uint8_t* src_y, ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MSA; } } +#endif +#if defined(HAS_ARGBTORGB565DITHERROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_LASX; + } + } #endif { // Allocate a row of argb. @@ -5237,6 +5570,7 @@ int I420ToAR30Matrix(const uint8_t* src_y, const struct YuvConstants* yuvconstants, int width) = I422ToAR30Row_C; + assert(yuvconstants); if (!src_y || !src_u || !src_v || !dst_ar30 || width <= 0 || height == 0) { return -1; } @@ -5344,6 +5678,2310 @@ int H420ToAB30(const uint8_t* src_y, &kYvuH709Constants, width, height); } +static int I420ToARGBMatrixBilinear(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I444ToARGBRow)(const uint8_t* y_buf, const uint8_t* u_buf, + const uint8_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I444ToARGBRow_C; + void (*Scale2RowUp_Bilinear)(const uint8_t* src_ptr, ptrdiff_t src_stride, + uint8_t* dst_ptr, ptrdiff_t dst_stride, + int dst_width) = ScaleRowUp2_Bilinear_Any_C; + void (*ScaleRowUp2_Linear)(const uint8_t* src_ptr, uint8_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I444TOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I444ToARGBRow = I444ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I444ToARGBRow = I444ToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I444TOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I444ToARGBRow = I444ToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I444ToARGBRow = I444ToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_I444TOARGBROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I444ToARGBRow = I444ToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I444ToARGBRow = I444ToARGBRow_NEON; + } + } +#endif +#if defined(HAS_I444TOARGBROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + I444ToARGBRow = I444ToARGBRow_Any_MSA; + if (IS_ALIGNED(width, 8)) { + I444ToARGBRow = I444ToARGBRow_MSA; + } + } +#endif +#if defined(HAS_I444TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I444ToARGBRow = I444ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I444ToARGBRow = I444ToARGBRow_LASX; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_SSE2; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSE2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_SSSE3; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSSE3; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_AVX2; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_AVX2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_NEON; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_NEON; + } +#endif + + // alloc 4 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 4); + uint8_t* temp_u_1 = row; + uint8_t* temp_u_2 = row + row_size; + uint8_t* temp_v_1 = row + row_size * 2; + uint8_t* temp_v_2 = row + row_size * 3; + + ScaleRowUp2_Linear(src_u, temp_u_1, width); + ScaleRowUp2_Linear(src_v, temp_v_1, width); + I444ToARGBRow(src_y, temp_u_1, temp_v_1, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + + for (y = 0; y < height - 2; y += 2) { + Scale2RowUp_Bilinear(src_u, src_stride_u, temp_u_1, row_size, width); + Scale2RowUp_Bilinear(src_v, src_stride_v, temp_v_1, row_size, width); + I444ToARGBRow(src_y, temp_u_1, temp_v_1, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + I444ToARGBRow(src_y, temp_u_2, temp_v_2, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + + if (!(height & 1)) { + ScaleRowUp2_Linear(src_u, temp_u_1, width); + ScaleRowUp2_Linear(src_v, temp_v_1, width); + I444ToARGBRow(src_y, temp_u_1, temp_v_1, dst_argb, yuvconstants, width); + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I422ToARGBMatrixLinear(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I444ToARGBRow)(const uint8_t* y_buf, const uint8_t* u_buf, + const uint8_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I444ToARGBRow_C; + void (*ScaleRowUp2_Linear)(const uint8_t* src_ptr, uint8_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I444TOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I444ToARGBRow = I444ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I444ToARGBRow = I444ToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I444TOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I444ToARGBRow = I444ToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I444ToARGBRow = I444ToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_I444TOARGBROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I444ToARGBRow = I444ToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I444ToARGBRow = I444ToARGBRow_NEON; + } + } +#endif +#if defined(HAS_I444TOARGBROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + I444ToARGBRow = I444ToARGBRow_Any_MSA; + if (IS_ALIGNED(width, 8)) { + I444ToARGBRow = I444ToARGBRow_MSA; + } + } +#endif +#if defined(HAS_I444TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I444ToARGBRow = I444ToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I444ToARGBRow = I444ToARGBRow_LASX; + } + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSE2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSSE3; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_AVX2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_NEON; + } +#endif + + // alloc 2 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 2); + uint8_t* temp_u = row; + uint8_t* temp_v = row + row_size; + + for (y = 0; y < height; ++y) { + ScaleRowUp2_Linear(src_u, temp_u, width); + ScaleRowUp2_Linear(src_v, temp_v, width); + I444ToARGBRow(src_y, temp_u, temp_v, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I420ToRGB24MatrixBilinear(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I444ToRGB24Row)(const uint8_t* y_buf, const uint8_t* u_buf, + const uint8_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I444ToRGB24Row_C; + void (*Scale2RowUp_Bilinear)(const uint8_t* src_ptr, ptrdiff_t src_stride, + uint8_t* dst_ptr, ptrdiff_t dst_stride, + int dst_width) = ScaleRowUp2_Bilinear_Any_C; + void (*ScaleRowUp2_Linear)(const uint8_t* src_ptr, uint8_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_rgb24 || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_rgb24 = dst_rgb24 + (height - 1) * dst_stride_rgb24; + dst_stride_rgb24 = -dst_stride_rgb24; + } +#if defined(HAS_I444TORGB24ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I444ToRGB24Row = I444ToRGB24Row_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + I444ToRGB24Row = I444ToRGB24Row_SSSE3; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I444ToRGB24Row = I444ToRGB24Row_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + I444ToRGB24Row = I444ToRGB24Row_AVX2; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I444ToRGB24Row = I444ToRGB24Row_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I444ToRGB24Row = I444ToRGB24Row_NEON; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + I444ToRGB24Row = I444ToRGB24Row_Any_MSA; + if (IS_ALIGNED(width, 8)) { + I444ToRGB24Row = I444ToRGB24Row_MSA; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I444ToRGB24Row = I444ToRGB24Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I444ToRGB24Row = I444ToRGB24Row_LASX; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_SSE2; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSE2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_SSSE3; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSSE3; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_AVX2; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_AVX2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_NEON; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_NEON; + } +#endif + + // alloc 4 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 4); + uint8_t* temp_u_1 = row; + uint8_t* temp_u_2 = row + row_size; + uint8_t* temp_v_1 = row + row_size * 2; + uint8_t* temp_v_2 = row + row_size * 3; + + ScaleRowUp2_Linear(src_u, temp_u_1, width); + ScaleRowUp2_Linear(src_v, temp_v_1, width); + I444ToRGB24Row(src_y, temp_u_1, temp_v_1, dst_rgb24, yuvconstants, width); + dst_rgb24 += dst_stride_rgb24; + src_y += src_stride_y; + + for (y = 0; y < height - 2; y += 2) { + Scale2RowUp_Bilinear(src_u, src_stride_u, temp_u_1, row_size, width); + Scale2RowUp_Bilinear(src_v, src_stride_v, temp_v_1, row_size, width); + I444ToRGB24Row(src_y, temp_u_1, temp_v_1, dst_rgb24, yuvconstants, width); + dst_rgb24 += dst_stride_rgb24; + src_y += src_stride_y; + I444ToRGB24Row(src_y, temp_u_2, temp_v_2, dst_rgb24, yuvconstants, width); + dst_rgb24 += dst_stride_rgb24; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + + if (!(height & 1)) { + ScaleRowUp2_Linear(src_u, temp_u_1, width); + ScaleRowUp2_Linear(src_v, temp_v_1, width); + I444ToRGB24Row(src_y, temp_u_1, temp_v_1, dst_rgb24, yuvconstants, width); + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I010ToAR30MatrixBilinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I410ToAR30Row)(const uint16_t* y_buf, const uint16_t* u_buf, + const uint16_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I410ToAR30Row_C; + void (*Scale2RowUp_Bilinear_12)( + const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, + ptrdiff_t dst_stride, int dst_width) = ScaleRowUp2_Bilinear_16_Any_C; + void (*ScaleRowUp2_Linear_12)(const uint16_t* src_ptr, uint16_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_ar30 || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_ar30 = dst_ar30 + (height - 1) * dst_stride_ar30; + dst_stride_ar30 = -dst_stride_ar30; + } +#if defined(HAS_I410TOAR30ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I410ToAR30Row = I410ToAR30Row_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I410ToAR30Row = I410ToAR30Row_SSSE3; + } + } +#endif +#if defined(HAS_I410TOAR30ROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I410ToAR30Row = I410ToAR30Row_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I410ToAR30Row = I410ToAR30Row_AVX2; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_SSSE3; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_SSSE3; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_AVX2; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_AVX2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_NEON; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_NEON; + } +#endif + + // alloc 4 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 4 * sizeof(uint16_t)); + uint16_t* temp_u_1 = (uint16_t*)(row); + uint16_t* temp_u_2 = (uint16_t*)(row) + row_size; + uint16_t* temp_v_1 = (uint16_t*)(row) + row_size * 2; + uint16_t* temp_v_2 = (uint16_t*)(row) + row_size * 3; + + ScaleRowUp2_Linear_12(src_u, temp_u_1, width); + ScaleRowUp2_Linear_12(src_v, temp_v_1, width); + I410ToAR30Row(src_y, temp_u_1, temp_v_1, dst_ar30, yuvconstants, width); + dst_ar30 += dst_stride_ar30; + src_y += src_stride_y; + + for (y = 0; y < height - 2; y += 2) { + Scale2RowUp_Bilinear_12(src_u, src_stride_u, temp_u_1, row_size, width); + Scale2RowUp_Bilinear_12(src_v, src_stride_v, temp_v_1, row_size, width); + I410ToAR30Row(src_y, temp_u_1, temp_v_1, dst_ar30, yuvconstants, width); + dst_ar30 += dst_stride_ar30; + src_y += src_stride_y; + I410ToAR30Row(src_y, temp_u_2, temp_v_2, dst_ar30, yuvconstants, width); + dst_ar30 += dst_stride_ar30; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + + if (!(height & 1)) { + ScaleRowUp2_Linear_12(src_u, temp_u_1, width); + ScaleRowUp2_Linear_12(src_v, temp_v_1, width); + I410ToAR30Row(src_y, temp_u_1, temp_v_1, dst_ar30, yuvconstants, width); + } + + free_aligned_buffer_64(row); + + return 0; +} + +static int I210ToAR30MatrixLinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I410ToAR30Row)(const uint16_t* y_buf, const uint16_t* u_buf, + const uint16_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I410ToAR30Row_C; + void (*ScaleRowUp2_Linear_12)(const uint16_t* src_ptr, uint16_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_ar30 || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_ar30 = dst_ar30 + (height - 1) * dst_stride_ar30; + dst_stride_ar30 = -dst_stride_ar30; + } +#if defined(HAS_I410TOAR30ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I410ToAR30Row = I410ToAR30Row_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I410ToAR30Row = I410ToAR30Row_SSSE3; + } + } +#endif +#if defined(HAS_I410TOAR30ROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I410ToAR30Row = I410ToAR30Row_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I410ToAR30Row = I410ToAR30Row_AVX2; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_LINEAR_12_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_SSSE3; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_12_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_AVX2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_12_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_NEON; + } +#endif + + // alloc 2 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); + uint16_t* temp_u = (uint16_t*)(row); + uint16_t* temp_v = (uint16_t*)(row) + row_size; + + for (y = 0; y < height; ++y) { + ScaleRowUp2_Linear_12(src_u, temp_u, width); + ScaleRowUp2_Linear_12(src_v, temp_v, width); + I410ToAR30Row(src_y, temp_u, temp_v, dst_ar30, yuvconstants, width); + dst_ar30 += dst_stride_ar30; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + free_aligned_buffer_64(row); + return 0; +} + +static int I010ToARGBMatrixBilinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I410ToARGBRow)(const uint16_t* y_buf, const uint16_t* u_buf, + const uint16_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I410ToARGBRow_C; + void (*Scale2RowUp_Bilinear_12)( + const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, + ptrdiff_t dst_stride, int dst_width) = ScaleRowUp2_Bilinear_16_Any_C; + void (*ScaleRowUp2_Linear_12)(const uint16_t* src_ptr, uint16_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I410TOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I410ToARGBRow = I410ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I410ToARGBRow = I410ToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I410TOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I410ToARGBRow = I410ToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I410ToARGBRow = I410ToARGBRow_AVX2; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_SSSE3; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_SSSE3; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_AVX2; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_AVX2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_NEON; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_NEON; + } +#endif + + // alloc 4 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 4 * sizeof(uint16_t)); + uint16_t* temp_u_1 = (uint16_t*)(row); + uint16_t* temp_u_2 = (uint16_t*)(row) + row_size; + uint16_t* temp_v_1 = (uint16_t*)(row) + row_size * 2; + uint16_t* temp_v_2 = (uint16_t*)(row) + row_size * 3; + + ScaleRowUp2_Linear_12(src_u, temp_u_1, width); + ScaleRowUp2_Linear_12(src_v, temp_v_1, width); + I410ToARGBRow(src_y, temp_u_1, temp_v_1, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + + for (y = 0; y < height - 2; y += 2) { + Scale2RowUp_Bilinear_12(src_u, src_stride_u, temp_u_1, row_size, width); + Scale2RowUp_Bilinear_12(src_v, src_stride_v, temp_v_1, row_size, width); + I410ToARGBRow(src_y, temp_u_1, temp_v_1, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + I410ToARGBRow(src_y, temp_u_2, temp_v_2, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + + if (!(height & 1)) { + ScaleRowUp2_Linear_12(src_u, temp_u_1, width); + ScaleRowUp2_Linear_12(src_v, temp_v_1, width); + I410ToARGBRow(src_y, temp_u_1, temp_v_1, dst_argb, yuvconstants, width); + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I210ToARGBMatrixLinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I410ToARGBRow)(const uint16_t* y_buf, const uint16_t* u_buf, + const uint16_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I410ToARGBRow_C; + void (*ScaleRowUp2_Linear_12)(const uint16_t* src_ptr, uint16_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_argb || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I410TOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I410ToARGBRow = I410ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I410ToARGBRow = I410ToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I410TOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I410ToARGBRow = I410ToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I410ToARGBRow = I410ToARGBRow_AVX2; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_LINEAR_12_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_SSSE3; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_12_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_AVX2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_12_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_NEON; + } +#endif + + // alloc 2 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); + uint16_t* temp_u = (uint16_t*)(row); + uint16_t* temp_v = (uint16_t*)(row) + row_size; + + for (y = 0; y < height; ++y) { + ScaleRowUp2_Linear_12(src_u, temp_u, width); + ScaleRowUp2_Linear_12(src_v, temp_v, width); + I410ToARGBRow(src_y, temp_u, temp_v, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I420AlphaToARGBMatrixBilinear( + const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + const uint8_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate) { + int y; + void (*I444AlphaToARGBRow)(const uint8_t* y_buf, const uint8_t* u_buf, + const uint8_t* v_buf, const uint8_t* a_buf, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) = I444AlphaToARGBRow_C; + void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, + int width) = ARGBAttenuateRow_C; + void (*Scale2RowUp_Bilinear)(const uint8_t* src_ptr, ptrdiff_t src_stride, + uint8_t* dst_ptr, ptrdiff_t dst_stride, + int dst_width) = ScaleRowUp2_Bilinear_Any_C; + void (*ScaleRowUp2_Linear)(const uint8_t* src_ptr, uint8_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I444ALPHATOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I444ALPHATOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_I444ALPHATOARGBROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_NEON; + } + } +#endif +#if defined(HAS_I444ALPHATOARGBROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_MSA; + if (IS_ALIGNED(width, 8)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_MSA; + } + } +#endif +#if defined(HAS_I444ALPHATOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_LASX; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3; + if (IS_ALIGNED(width, 4)) { + ARGBAttenuateRow = ARGBAttenuateRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_AVX2; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_NEON; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_MSA; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_SSE2; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSE2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_SSSE3; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSSE3; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_AVX2; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_AVX2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_NEON; + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_NEON; + } +#endif + + // alloc 4 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 4); + uint8_t* temp_u_1 = row; + uint8_t* temp_u_2 = row + row_size; + uint8_t* temp_v_1 = row + row_size * 2; + uint8_t* temp_v_2 = row + row_size * 3; + + ScaleRowUp2_Linear(src_u, temp_u_1, width); + ScaleRowUp2_Linear(src_v, temp_v_1, width); + I444AlphaToARGBRow(src_y, temp_u_1, temp_v_1, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_a += src_stride_a; + + for (y = 0; y < height - 2; y += 2) { + Scale2RowUp_Bilinear(src_u, src_stride_u, temp_u_1, row_size, width); + Scale2RowUp_Bilinear(src_v, src_stride_v, temp_v_1, row_size, width); + I444AlphaToARGBRow(src_y, temp_u_1, temp_v_1, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_a += src_stride_a; + I444AlphaToARGBRow(src_y, temp_u_2, temp_v_2, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + src_a += src_stride_a; + } + + if (!(height & 1)) { + ScaleRowUp2_Linear(src_u, temp_u_1, width); + ScaleRowUp2_Linear(src_v, temp_v_1, width); + I444AlphaToARGBRow(src_y, temp_u_1, temp_v_1, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I422AlphaToARGBMatrixLinear(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + const uint8_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate) { + int y; + void (*I444AlphaToARGBRow)(const uint8_t* y_buf, const uint8_t* u_buf, + const uint8_t* v_buf, const uint8_t* a_buf, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) = I444AlphaToARGBRow_C; + void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, + int width) = ARGBAttenuateRow_C; + void (*ScaleRowUp2_Linear)(const uint8_t* src_ptr, uint8_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I444ALPHATOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I444ALPHATOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_I444ALPHATOARGBROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_NEON; + } + } +#endif +#if defined(HAS_I444ALPHATOARGBROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_MSA; + if (IS_ALIGNED(width, 8)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_MSA; + } + } +#endif +#if defined(HAS_I444ALPHATOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + I444AlphaToARGBRow = I444AlphaToARGBRow_LASX; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3; + if (IS_ALIGNED(width, 4)) { + ARGBAttenuateRow = ARGBAttenuateRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_AVX2; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_NEON; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_MSA; + } + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSE2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSSE3; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_AVX2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_NEON; + } +#endif + + // alloc 2 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 2); + uint8_t* temp_u = row; + uint8_t* temp_v = row + row_size; + + for (y = 0; y < height; ++y) { + ScaleRowUp2_Linear(src_u, temp_u, width); + ScaleRowUp2_Linear(src_v, temp_v, width); + I444AlphaToARGBRow(src_y, temp_u, temp_v, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_a += src_stride_a; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I010AlphaToARGBMatrixBilinear( + const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + const uint16_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate) { + int y; + void (*I410AlphaToARGBRow)(const uint16_t* y_buf, const uint16_t* u_buf, + const uint16_t* v_buf, const uint16_t* a_buf, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) = I410AlphaToARGBRow_C; + void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, + int width) = ARGBAttenuateRow_C; + void (*Scale2RowUp_Bilinear_12)( + const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, + ptrdiff_t dst_stride, int dst_width) = ScaleRowUp2_Bilinear_16_Any_C; + void (*ScaleRowUp2_Linear_12)(const uint16_t* src_ptr, uint16_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I410ALPHATOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I410AlphaToARGBRow = I410AlphaToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I410AlphaToARGBRow = I410AlphaToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I410ALPHATOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I410AlphaToARGBRow = I410AlphaToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I410AlphaToARGBRow = I410AlphaToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3; + if (IS_ALIGNED(width, 4)) { + ARGBAttenuateRow = ARGBAttenuateRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_AVX2; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_NEON; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_MSA; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_SSSE3; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_SSSE3; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_AVX2; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_AVX2; + } +#endif + +#if defined(HAS_SCALEROWUP2_BILINEAR_12_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_NEON; + ScaleRowUp2_Linear_12 = ScaleRowUp2_Linear_12_Any_NEON; + } +#endif + + // alloc 4 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 4 * sizeof(uint16_t)); + uint16_t* temp_u_1 = (uint16_t*)(row); + uint16_t* temp_u_2 = (uint16_t*)(row) + row_size; + uint16_t* temp_v_1 = (uint16_t*)(row) + row_size * 2; + uint16_t* temp_v_2 = (uint16_t*)(row) + row_size * 3; + + ScaleRowUp2_Linear_12(src_u, temp_u_1, width); + ScaleRowUp2_Linear_12(src_v, temp_v_1, width); + I410AlphaToARGBRow(src_y, temp_u_1, temp_v_1, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_a += src_stride_a; + + for (y = 0; y < height - 2; y += 2) { + Scale2RowUp_Bilinear_12(src_u, src_stride_u, temp_u_1, row_size, width); + Scale2RowUp_Bilinear_12(src_v, src_stride_v, temp_v_1, row_size, width); + I410AlphaToARGBRow(src_y, temp_u_1, temp_v_1, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_a += src_stride_a; + I410AlphaToARGBRow(src_y, temp_u_2, temp_v_2, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_a += src_stride_a; + src_u += src_stride_u; + src_v += src_stride_v; + } + + if (!(height & 1)) { + ScaleRowUp2_Linear_12(src_u, temp_u_1, width); + ScaleRowUp2_Linear_12(src_v, temp_v_1, width); + I410AlphaToARGBRow(src_y, temp_u_1, temp_v_1, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I210AlphaToARGBMatrixLinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + const uint16_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate) { + int y; + void (*I410AlphaToARGBRow)(const uint16_t* y_buf, const uint16_t* u_buf, + const uint16_t* v_buf, const uint16_t* a_buf, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) = I410AlphaToARGBRow_C; + void (*ARGBAttenuateRow)(const uint8_t* src_argb, uint8_t* dst_argb, + int width) = ARGBAttenuateRow_C; + void (*ScaleRowUp2_Linear)(const uint16_t* src_ptr, uint16_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !src_a || !dst_argb || width <= 0 || + height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I410ALPHATOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I410AlphaToARGBRow = I410AlphaToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I410AlphaToARGBRow = I410AlphaToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I410ALPHATOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I410AlphaToARGBRow = I410AlphaToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I410AlphaToARGBRow = I410AlphaToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3; + if (IS_ALIGNED(width, 4)) { + ARGBAttenuateRow = ARGBAttenuateRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_AVX2; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_NEON; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_MSA; + } + } +#endif + +#if defined(HAS_SCALEROWUP2_LINEAR_12_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_12_Any_SSSE3; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_12_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_12_Any_AVX2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_12_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_12_Any_NEON; + } +#endif + + // alloc 2 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); + uint16_t* temp_u = (uint16_t*)(row); + uint16_t* temp_v = (uint16_t*)(row) + row_size; + + for (y = 0; y < height; ++y) { + ScaleRowUp2_Linear(src_u, temp_u, width); + ScaleRowUp2_Linear(src_v, temp_v, width); + I410AlphaToARGBRow(src_y, temp_u, temp_v, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_a += src_stride_a; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + free_aligned_buffer_64(row); + return 0; +} + +static int P010ToARGBMatrixBilinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*P410ToARGBRow)( + const uint16_t* y_buf, const uint16_t* uv_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = P410ToARGBRow_C; + void (*Scale2RowUp_Bilinear_16)( + const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, + ptrdiff_t dst_stride, int dst_width) = ScaleUVRowUp2_Bilinear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_uv || !dst_argb || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_P410TOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + P410ToARGBRow = P410ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + P410ToARGBRow = P410ToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_P410TOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + P410ToARGBRow = P410ToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + P410ToARGBRow = P410ToARGBRow_AVX2; + } + } +#endif + +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_SSE41 + if (TestCpuFlag(kCpuHasSSE41)) { + Scale2RowUp_Bilinear_16 = ScaleUVRowUp2_Bilinear_16_Any_SSE41; + } +#endif + +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_AVX2 + if (TestCpuFlag(kCpuHasAVX2)) { + Scale2RowUp_Bilinear_16 = ScaleUVRowUp2_Bilinear_16_Any_AVX2; + } +#endif + +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_NEON + if (TestCpuFlag(kCpuHasNEON)) { + Scale2RowUp_Bilinear_16 = ScaleUVRowUp2_Bilinear_16_Any_NEON; + } +#endif + + // alloc 2 lines temp + const int row_size = (2 * width + 31) & ~31; + align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); + uint16_t* temp_uv_1 = (uint16_t*)(row); + uint16_t* temp_uv_2 = (uint16_t*)(row) + row_size; + + Scale2RowUp_Bilinear_16(src_uv, 0, temp_uv_1, row_size, width); + P410ToARGBRow(src_y, temp_uv_1, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + + for (y = 0; y < height - 2; y += 2) { + Scale2RowUp_Bilinear_16(src_uv, src_stride_uv, temp_uv_1, row_size, width); + P410ToARGBRow(src_y, temp_uv_1, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + P410ToARGBRow(src_y, temp_uv_2, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_uv += src_stride_uv; + } + + if (!(height & 1)) { + Scale2RowUp_Bilinear_16(src_uv, 0, temp_uv_1, row_size, width); + P410ToARGBRow(src_y, temp_uv_1, dst_argb, yuvconstants, width); + } + + free_aligned_buffer_64(row); + return 0; +} + +static int P210ToARGBMatrixLinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*P410ToARGBRow)( + const uint16_t* y_buf, const uint16_t* uv_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = P410ToARGBRow_C; + void (*ScaleRowUp2_Linear)(const uint16_t* src_uv, uint16_t* dst_uv, + int dst_width) = ScaleUVRowUp2_Linear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_uv || !dst_argb || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_P410TOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + P410ToARGBRow = P410ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + P410ToARGBRow = P410ToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_P410TOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + P410ToARGBRow = P410ToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + P410ToARGBRow = P410ToARGBRow_AVX2; + } + } +#endif + +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_SSE41 + if (TestCpuFlag(kCpuHasSSE41)) { + ScaleRowUp2_Linear = ScaleUVRowUp2_Linear_16_Any_SSE41; + } +#endif + +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_AVX2 + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowUp2_Linear = ScaleUVRowUp2_Linear_16_Any_AVX2; + } +#endif + +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_NEON + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowUp2_Linear = ScaleUVRowUp2_Linear_16_Any_NEON; + } +#endif + + const int row_size = (2 * width + 31) & ~31; + align_buffer_64(row, row_size * sizeof(uint16_t)); + uint16_t* temp_uv = (uint16_t*)(row); + + for (y = 0; y < height; ++y) { + ScaleRowUp2_Linear(src_uv, temp_uv, width); + P410ToARGBRow(src_y, temp_uv, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_uv += src_stride_uv; + } + + free_aligned_buffer_64(row); + return 0; +} + +static int P010ToAR30MatrixBilinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*P410ToAR30Row)( + const uint16_t* y_buf, const uint16_t* uv_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = P410ToAR30Row_C; + void (*Scale2RowUp_Bilinear_16)( + const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, + ptrdiff_t dst_stride, int dst_width) = ScaleUVRowUp2_Bilinear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_uv || !dst_ar30 || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_ar30 = dst_ar30 + (height - 1) * dst_stride_ar30; + dst_stride_ar30 = -dst_stride_ar30; + } +#if defined(HAS_P410TOAR30ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + P410ToAR30Row = P410ToAR30Row_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + P410ToAR30Row = P410ToAR30Row_SSSE3; + } + } +#endif +#if defined(HAS_P410TOAR30ROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + P410ToAR30Row = P410ToAR30Row_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + P410ToAR30Row = P410ToAR30Row_AVX2; + } + } +#endif + +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_SSE41 + if (TestCpuFlag(kCpuHasSSE41)) { + Scale2RowUp_Bilinear_16 = ScaleUVRowUp2_Bilinear_16_Any_SSE41; + } +#endif + +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_AVX2 + if (TestCpuFlag(kCpuHasAVX2)) { + Scale2RowUp_Bilinear_16 = ScaleUVRowUp2_Bilinear_16_Any_AVX2; + } +#endif + +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_NEON + if (TestCpuFlag(kCpuHasNEON)) { + Scale2RowUp_Bilinear_16 = ScaleUVRowUp2_Bilinear_16_Any_NEON; + } +#endif + + // alloc 2 lines temp + const int row_size = (2 * width + 31) & ~31; + align_buffer_64(row, row_size * 2 * sizeof(uint16_t)); + uint16_t* temp_uv_1 = (uint16_t*)(row); + uint16_t* temp_uv_2 = (uint16_t*)(row) + row_size; + + Scale2RowUp_Bilinear_16(src_uv, 0, temp_uv_1, row_size, width); + P410ToAR30Row(src_y, temp_uv_1, dst_ar30, yuvconstants, width); + dst_ar30 += dst_stride_ar30; + src_y += src_stride_y; + + for (y = 0; y < height - 2; y += 2) { + Scale2RowUp_Bilinear_16(src_uv, src_stride_uv, temp_uv_1, row_size, width); + P410ToAR30Row(src_y, temp_uv_1, dst_ar30, yuvconstants, width); + dst_ar30 += dst_stride_ar30; + src_y += src_stride_y; + P410ToAR30Row(src_y, temp_uv_2, dst_ar30, yuvconstants, width); + dst_ar30 += dst_stride_ar30; + src_y += src_stride_y; + src_uv += src_stride_uv; + } + + if (!(height & 1)) { + Scale2RowUp_Bilinear_16(src_uv, 0, temp_uv_1, row_size, width); + P410ToAR30Row(src_y, temp_uv_1, dst_ar30, yuvconstants, width); + } + + free_aligned_buffer_64(row); + return 0; +} + +static int P210ToAR30MatrixLinear(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*P410ToAR30Row)( + const uint16_t* y_buf, const uint16_t* uv_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = P410ToAR30Row_C; + void (*ScaleRowUp2_Linear)(const uint16_t* src_uv, uint16_t* dst_uv, + int dst_width) = ScaleUVRowUp2_Linear_16_Any_C; + assert(yuvconstants); + if (!src_y || !src_uv || !dst_ar30 || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_ar30 = dst_ar30 + (height - 1) * dst_stride_ar30; + dst_stride_ar30 = -dst_stride_ar30; + } +#if defined(HAS_P410TOAR30ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + P410ToAR30Row = P410ToAR30Row_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + P410ToAR30Row = P410ToAR30Row_SSSE3; + } + } +#endif +#if defined(HAS_P410TOAR30ROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + P410ToAR30Row = P410ToAR30Row_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + P410ToAR30Row = P410ToAR30Row_AVX2; + } + } +#endif + +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_SSE41 + if (TestCpuFlag(kCpuHasSSE41)) { + ScaleRowUp2_Linear = ScaleUVRowUp2_Linear_16_Any_SSE41; + } +#endif + +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_AVX2 + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowUp2_Linear = ScaleUVRowUp2_Linear_16_Any_AVX2; + } +#endif + +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_NEON + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowUp2_Linear = ScaleUVRowUp2_Linear_16_Any_NEON; + } +#endif + + const int row_size = (2 * width + 31) & ~31; + align_buffer_64(row, row_size * sizeof(uint16_t)); + uint16_t* temp_uv = (uint16_t*)(row); + + for (y = 0; y < height; ++y) { + ScaleRowUp2_Linear(src_uv, temp_uv, width); + P410ToAR30Row(src_y, temp_uv, dst_ar30, yuvconstants, width); + dst_ar30 += dst_stride_ar30; + src_y += src_stride_y; + src_uv += src_stride_uv; + } + + free_aligned_buffer_64(row); + return 0; +} + +static int I422ToRGB24MatrixLinear(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height) { + int y; + void (*I444ToRGB24Row)(const uint8_t* y_buf, const uint8_t* u_buf, + const uint8_t* v_buf, uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = + I444ToRGB24Row_C; + void (*ScaleRowUp2_Linear)(const uint8_t* src_ptr, uint8_t* dst_ptr, + int dst_width) = ScaleRowUp2_Linear_Any_C; + assert(yuvconstants); + if (!src_y || !src_u || !src_v || !dst_rgb24 || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_rgb24 = dst_rgb24 + (height - 1) * dst_stride_rgb24; + dst_stride_rgb24 = -dst_stride_rgb24; + } +#if defined(HAS_I444TORGB24ROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I444ToRGB24Row = I444ToRGB24Row_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + I444ToRGB24Row = I444ToRGB24Row_SSSE3; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I444ToRGB24Row = I444ToRGB24Row_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + I444ToRGB24Row = I444ToRGB24Row_AVX2; + } + } +#endif +#if defined(HAS_I444TORGB24ROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I444ToRGB24Row = I444ToRGB24Row_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I444ToRGB24Row = I444ToRGB24Row_NEON; + } + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSE2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSSE3; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_AVX2; + } +#endif +#if defined(HAS_SCALEROWUP2_LINEAR_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_NEON; + } +#endif + + // alloc 2 lines temp + const int row_size = (width + 31) & ~31; + align_buffer_64(row, row_size * 2); + uint8_t* temp_u = row; + uint8_t* temp_v = row + row_size; + + for (y = 0; y < height; ++y) { + ScaleRowUp2_Linear(src_u, temp_u, width); + ScaleRowUp2_Linear(src_v, temp_v, width); + I444ToRGB24Row(src_y, temp_u, temp_v, dst_rgb24, yuvconstants, width); + dst_rgb24 += dst_stride_rgb24; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + + free_aligned_buffer_64(row); + return 0; +} + +LIBYUV_API +int I422ToRGB24MatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I422ToRGB24Matrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_rgb24, dst_stride_rgb24, + yuvconstants, width, height); + case kFilterBilinear: + case kFilterBox: + case kFilterLinear: + return I422ToRGB24MatrixLinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, + dst_rgb24, dst_stride_rgb24, yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int I420ToARGBMatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I420ToARGBMatrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_argb, dst_stride_argb, + yuvconstants, width, height); + case kFilterBilinear: + case kFilterBox: + return I420ToARGBMatrixBilinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, + dst_argb, dst_stride_argb, yuvconstants, width, height); + case kFilterLinear: + // Actually we can do this, but probably there's no usage. + return -1; + } + + return -1; +} + +LIBYUV_API +int I422ToARGBMatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I422ToARGBMatrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_argb, dst_stride_argb, + yuvconstants, width, height); + case kFilterBilinear: + case kFilterBox: + case kFilterLinear: + return I422ToARGBMatrixLinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, + dst_argb, dst_stride_argb, yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int I420ToRGB24MatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_rgb24, + int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I420ToRGB24Matrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_rgb24, dst_stride_rgb24, + yuvconstants, width, height); + case kFilterLinear: // TODO(fb): Implement Linear using Bilinear stride 0 + case kFilterBilinear: + case kFilterBox: + return I420ToRGB24MatrixBilinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, + dst_rgb24, dst_stride_rgb24, yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int I010ToAR30MatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I010ToAR30Matrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_ar30, dst_stride_ar30, + yuvconstants, width, height); + case kFilterLinear: // TODO(fb): Implement Linear using Bilinear stride 0 + case kFilterBilinear: + case kFilterBox: + return I010ToAR30MatrixBilinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, + dst_ar30, dst_stride_ar30, yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int I210ToAR30MatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I210ToAR30Matrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_ar30, dst_stride_ar30, + yuvconstants, width, height); + case kFilterBilinear: + case kFilterBox: + case kFilterLinear: + return I210ToAR30MatrixLinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, + dst_ar30, dst_stride_ar30, yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int I010ToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I010ToARGBMatrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_argb, dst_stride_argb, + yuvconstants, width, height); + case kFilterLinear: // TODO(fb): Implement Linear using Bilinear stride 0 + case kFilterBilinear: + case kFilterBox: + return I010ToARGBMatrixBilinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, + dst_argb, dst_stride_argb, yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int I210ToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I210ToARGBMatrix(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_argb, dst_stride_argb, + yuvconstants, width, height); + case kFilterBilinear: + case kFilterBox: + case kFilterLinear: + return I210ToARGBMatrixLinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, + dst_argb, dst_stride_argb, yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int I420AlphaToARGBMatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + const uint8_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I420AlphaToARGBMatrix(src_y, src_stride_y, src_u, src_stride_u, + src_v, src_stride_v, src_a, src_stride_a, + dst_argb, dst_stride_argb, yuvconstants, + width, height, attenuate); + case kFilterLinear: // TODO(fb): Implement Linear using Bilinear stride 0 + case kFilterBilinear: + case kFilterBox: + return I420AlphaToARGBMatrixBilinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, src_a, + src_stride_a, dst_argb, dst_stride_argb, yuvconstants, width, height, + attenuate); + } + + return -1; +} + +LIBYUV_API +int I422AlphaToARGBMatrixFilter(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + const uint8_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I422AlphaToARGBMatrix(src_y, src_stride_y, src_u, src_stride_u, + src_v, src_stride_v, src_a, src_stride_a, + dst_argb, dst_stride_argb, yuvconstants, + width, height, attenuate); + case kFilterBilinear: + case kFilterBox: + case kFilterLinear: + return I422AlphaToARGBMatrixLinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, src_a, + src_stride_a, dst_argb, dst_stride_argb, yuvconstants, width, height, + attenuate); + } + + return -1; +} + +LIBYUV_API +int I010AlphaToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + const uint16_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I010AlphaToARGBMatrix(src_y, src_stride_y, src_u, src_stride_u, + src_v, src_stride_v, src_a, src_stride_a, + dst_argb, dst_stride_argb, yuvconstants, + width, height, attenuate); + case kFilterLinear: // TODO(fb): Implement Linear using Bilinear stride 0 + case kFilterBilinear: + case kFilterBox: + return I010AlphaToARGBMatrixBilinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, src_a, + src_stride_a, dst_argb, dst_stride_argb, yuvconstants, width, height, + attenuate); + } + + return -1; +} + +LIBYUV_API +int I210AlphaToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + const uint16_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + int attenuate, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return I210AlphaToARGBMatrix(src_y, src_stride_y, src_u, src_stride_u, + src_v, src_stride_v, src_a, src_stride_a, + dst_argb, dst_stride_argb, yuvconstants, + width, height, attenuate); + case kFilterBilinear: + case kFilterBox: + case kFilterLinear: + return I210AlphaToARGBMatrixLinear( + src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v, src_a, + src_stride_a, dst_argb, dst_stride_argb, yuvconstants, width, height, + attenuate); + } + + return -1; +} + +// TODO(fb): Verify this function works correctly. P010 is like NV12 but 10 bit +// UV is biplanar. +LIBYUV_API +int P010ToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return P010ToARGBMatrix(src_y, src_stride_y, src_uv, src_stride_uv, + dst_argb, dst_stride_argb, yuvconstants, width, + height); + case kFilterLinear: // TODO(fb): Implement Linear using Bilinear stride 0 + case kFilterBilinear: + case kFilterBox: + return P010ToARGBMatrixBilinear(src_y, src_stride_y, src_uv, + src_stride_uv, dst_argb, dst_stride_argb, + yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int P210ToARGBMatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_argb, + int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return P210ToARGBMatrix(src_y, src_stride_y, src_uv, src_stride_uv, + dst_argb, dst_stride_argb, yuvconstants, width, + height); + case kFilterBilinear: + case kFilterBox: + case kFilterLinear: + return P210ToARGBMatrixLinear(src_y, src_stride_y, src_uv, src_stride_uv, + dst_argb, dst_stride_argb, yuvconstants, + width, height); + } + + return -1; +} + +LIBYUV_API +int P010ToAR30MatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return P010ToAR30Matrix(src_y, src_stride_y, src_uv, src_stride_uv, + dst_ar30, dst_stride_ar30, yuvconstants, width, + height); + case kFilterLinear: // TODO(fb): Implement Linear using Bilinear stride 0 + case kFilterBilinear: + case kFilterBox: + return P010ToAR30MatrixBilinear(src_y, src_stride_y, src_uv, + src_stride_uv, dst_ar30, dst_stride_ar30, + yuvconstants, width, height); + } + + return -1; +} + +LIBYUV_API +int P210ToAR30MatrixFilter(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_uv, + int src_stride_uv, + uint8_t* dst_ar30, + int dst_stride_ar30, + const struct YuvConstants* yuvconstants, + int width, + int height, + enum FilterMode filter) { + switch (filter) { + case kFilterNone: + return P210ToAR30Matrix(src_y, src_stride_y, src_uv, src_stride_uv, + dst_ar30, dst_stride_ar30, yuvconstants, width, + height); + case kFilterBilinear: + case kFilterBox: + case kFilterLinear: + return P210ToAR30MatrixLinear(src_y, src_stride_y, src_uv, src_stride_uv, + dst_ar30, dst_stride_ar30, yuvconstants, + width, height); + } + + return -1; +} + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/source/convert_from.cc b/third-party/libyuv/third_party/libyuv/source/convert_from.cc index 687f0a72c2..8bd07e4ce2 100644 --- a/third-party/libyuv/third_party/libyuv/source/convert_from.cc +++ b/third-party/libyuv/third_party/libyuv/source/convert_from.cc @@ -85,7 +85,8 @@ int I420ToI010(const uint8_t* src_y, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; - if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -129,7 +130,8 @@ int I420ToI012(const uint8_t* src_y, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; - if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -436,14 +438,6 @@ int I420ToYUY2(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TOYUY2ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToYUY2Row = I422ToYUY2Row_Any_MMI; - if (IS_ALIGNED(width, 8)) { - I422ToYUY2Row = I422ToYUY2Row_MMI; - } - } -#endif #if defined(HAS_I422TOYUY2ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToYUY2Row = I422ToYUY2Row_Any_MSA; @@ -452,6 +446,14 @@ int I420ToYUY2(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOYUY2ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToYUY2Row = I422ToYUY2Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToYUY2Row = I422ToYUY2Row_LASX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width); @@ -523,14 +525,6 @@ int I422ToUYVY(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TOUYVYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToUYVYRow = I422ToUYVYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - I422ToUYVYRow = I422ToUYVYRow_MMI; - } - } -#endif #if defined(HAS_I422TOUYVYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToUYVYRow = I422ToUYVYRow_Any_MSA; @@ -539,6 +533,14 @@ int I422ToUYVY(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOUYVYROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToUYVYRow = I422ToUYVYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToUYVYRow = I422ToUYVYRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width); @@ -598,14 +600,6 @@ int I420ToUYVY(const uint8_t* src_y, } } #endif -#if defined(HAS_I422TOUYVYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToUYVYRow = I422ToUYVYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - I422ToUYVYRow = I422ToUYVYRow_MMI; - } - } -#endif #if defined(HAS_I422TOUYVYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToUYVYRow = I422ToUYVYRow_Any_MSA; @@ -614,6 +608,14 @@ int I420ToUYVY(const uint8_t* src_y, } } #endif +#if defined(HAS_I422TOUYVYROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToUYVYRow = I422ToUYVYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToUYVYRow = I422ToUYVYRow_LASX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width); @@ -645,8 +647,7 @@ int I420ToNV12(const uint8_t* src_y, int height) { int halfwidth = (width + 1) / 2; int halfheight = (height + 1) / 2; - if (!src_y || !src_u || !src_v || !dst_y || !dst_uv || width <= 0 || - height == 0) { + if (!src_y || !src_u || !src_v || !dst_uv || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. @@ -772,7 +773,8 @@ int ConvertFromI420(const uint8_t* y, height); break; case FOURCC_NV12: { - uint8_t* dst_uv = dst_sample + width * height; + int dst_y_stride = dst_sample_stride ? dst_sample_stride : width; + uint8_t* dst_uv = dst_sample + dst_y_stride * height; r = I420ToNV12(y, y_stride, u, u_stride, v, v_stride, dst_sample, dst_sample_stride ? dst_sample_stride : width, dst_uv, dst_sample_stride ? dst_sample_stride : width, width, @@ -780,7 +782,8 @@ int ConvertFromI420(const uint8_t* y, break; } case FOURCC_NV21: { - uint8_t* dst_vu = dst_sample + width * height; + int dst_y_stride = dst_sample_stride ? dst_sample_stride : width; + uint8_t* dst_vu = dst_sample + dst_y_stride * height; r = I420ToNV21(y, y_stride, u, u_stride, v, v_stride, dst_sample, dst_sample_stride ? dst_sample_stride : width, dst_vu, dst_sample_stride ? dst_sample_stride : width, width, diff --git a/third-party/libyuv/third_party/libyuv/source/convert_from_argb.cc b/third-party/libyuv/third_party/libyuv/source/convert_from_argb.cc index e14615847d..f7eab0c65a 100644 --- a/third-party/libyuv/third_party/libyuv/source/convert_from_argb.cc +++ b/third-party/libyuv/third_party/libyuv/source/convert_from_argb.cc @@ -68,14 +68,6 @@ int ARGBToI444(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOUV444ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToUV444Row = ARGBToUV444Row_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToUV444Row = ARGBToUV444Row_MMI; - } - } -#endif #if defined(HAS_ARGBTOUV444ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToUV444Row = ARGBToUV444Row_Any_MSA; @@ -84,6 +76,14 @@ int ARGBToI444(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOUV444ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToUV444Row = ARGBToUV444Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToUV444Row = ARGBToUV444Row_LASX; + } + } +#endif #if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { ARGBToYRow = ARGBToYRow_Any_SSSE3; @@ -103,19 +103,11 @@ int ARGBToI444(const uint8_t* src_argb, #if defined(HAS_ARGBTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYRow = ARGBToYRow_NEON; } } #endif -#if defined(HAS_ARGBTOYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYRow = ARGBToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - } - } -#endif #if defined(HAS_ARGBTOYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToYRow = ARGBToYRow_Any_MSA; @@ -124,6 +116,14 @@ int ARGBToI444(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBToUV444Row(src_argb, dst_u, dst_v, width); @@ -170,30 +170,42 @@ int ARGBToI422(const uint8_t* src_argb, height = 1; src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0; } -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; + } + } +#endif #if defined(HAS_ARGBTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYRow = ARGBToYRow_NEON; } } @@ -206,20 +218,6 @@ int ARGBToI422(const uint8_t* src_argb, } } #endif - -#if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYRow = ARGBToYRow_Any_MMI; - ARGBToUVRow = ARGBToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_MMI; - } - } -#endif - #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToYRow = ARGBToYRow_Any_MSA; @@ -233,6 +231,17 @@ int ARGBToI422(const uint8_t* src_argb, } #endif +#if defined(HAS_ARGBTOYROW_LASX) && defined(HAS_ARGBTOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + ARGBToUVRow = ARGBToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + ARGBToUVRow = ARGBToUVRow_LASX; + } + } +#endif + for (y = 0; y < height; ++y) { ARGBToUVRow(src_argb, 0, dst_u, dst_v, width); ARGBToYRow(src_argb, dst_y, width); @@ -271,30 +280,10 @@ int ARGBToNV12(const uint8_t* src_argb, src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; - ARGBToYRow = ARGBToYRow_Any_SSSE3; - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; - ARGBToYRow = ARGBToYRow_SSSE3; - } - } -#endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; - ARGBToYRow = ARGBToYRow_Any_AVX2; - if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; - ARGBToYRow = ARGBToYRow_AVX2; - } - } -#endif #if defined(HAS_ARGBTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYRow = ARGBToYRow_NEON; } } @@ -307,15 +296,35 @@ int ARGBToNV12(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYRow = ARGBToYRow_Any_MMI; - ARGBToUVRow = ARGBToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - } +#if defined(HAS_ARGBTOYROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_MMI; + ARGBToYRow = ARGBToYRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToYRow = ARGBToYRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; } } #endif @@ -331,6 +340,16 @@ int ARGBToNV12(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYROW_LASX) && defined(HAS_ARGBTOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + ARGBToUVRow = ARGBToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + ARGBToUVRow = ARGBToUVRow_LASX; + } + } +#endif #if defined(HAS_MERGEUVROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { MergeUVRow_ = MergeUVRow_Any_SSE2; @@ -355,14 +374,6 @@ int ARGBToNV12(const uint8_t* src_argb, } } #endif -#if defined(HAS_MERGEUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MergeUVRow_ = MergeUVRow_Any_MMI; - if (IS_ALIGNED(halfwidth, 8)) { - MergeUVRow_ = MergeUVRow_MMI; - } - } -#endif #if defined(HAS_MERGEUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { MergeUVRow_ = MergeUVRow_Any_MSA; @@ -370,6 +381,14 @@ int ARGBToNV12(const uint8_t* src_argb, MergeUVRow_ = MergeUVRow_MSA; } } +#endif +#if defined(HAS_MERGEUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + MergeUVRow_ = MergeUVRow_Any_LSX; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow_ = MergeUVRow_LSX; + } + } #endif { // Allocate a rows of uv. @@ -423,30 +442,42 @@ int ARGBToNV21(const uint8_t* src_argb, src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; + } + } +#endif #if defined(HAS_ARGBTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYRow = ARGBToYRow_NEON; } } @@ -459,18 +490,6 @@ int ARGBToNV21(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYRow = ARGBToYRow_Any_MMI; - ARGBToUVRow = ARGBToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_MMI; - } - } -#endif #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToYRow = ARGBToYRow_Any_MSA; @@ -483,6 +502,16 @@ int ARGBToNV21(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYROW_LASX) && defined(HAS_ARGBTOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + ARGBToUVRow = ARGBToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + ARGBToUVRow = ARGBToUVRow_LASX; + } + } +#endif #if defined(HAS_MERGEUVROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { MergeUVRow_ = MergeUVRow_Any_SSE2; @@ -507,14 +536,6 @@ int ARGBToNV21(const uint8_t* src_argb, } } #endif -#if defined(HAS_MERGEUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MergeUVRow_ = MergeUVRow_Any_MMI; - if (IS_ALIGNED(halfwidth, 8)) { - MergeUVRow_ = MergeUVRow_MMI; - } - } -#endif #if defined(HAS_MERGEUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { MergeUVRow_ = MergeUVRow_Any_MSA; @@ -522,6 +543,14 @@ int ARGBToNV21(const uint8_t* src_argb, MergeUVRow_ = MergeUVRow_MSA; } } +#endif +#if defined(HAS_MERGEUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + MergeUVRow_ = MergeUVRow_Any_LSX; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow_ = MergeUVRow_LSX; + } + } #endif { // Allocate a rows of uv. @@ -574,30 +603,42 @@ int ABGRToNV12(const uint8_t* src_abgr, src_abgr = src_abgr + (height - 1) * src_stride_abgr; src_stride_abgr = -src_stride_abgr; } -#if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3) +#if defined(HAS_ABGRTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ABGRToUVRow = ABGRToUVRow_Any_SSSE3; ABGRToYRow = ABGRToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ABGRToUVRow = ABGRToUVRow_SSSE3; ABGRToYRow = ABGRToYRow_SSSE3; } } #endif -#if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2) +#if defined(HAS_ABGRTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ABGRToUVRow = ABGRToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ABGRToUVRow = ABGRToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ABGRTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ABGRToUVRow = ABGRToUVRow_Any_AVX2; ABGRToYRow = ABGRToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ABGRToUVRow = ABGRToUVRow_AVX2; ABGRToYRow = ABGRToYRow_AVX2; } } #endif +#if defined(HAS_ABGRTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ABGRToUVRow = ABGRToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ABGRToUVRow = ABGRToUVRow_AVX2; + } + } +#endif #if defined(HAS_ABGRTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ABGRToYRow = ABGRToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ABGRToYRow = ABGRToYRow_NEON; } } @@ -610,18 +651,6 @@ int ABGRToNV12(const uint8_t* src_abgr, } } #endif -#if defined(HAS_ABGRTOYROW_MMI) && defined(HAS_ABGRTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ABGRToYRow = ABGRToYRow_Any_MMI; - ABGRToUVRow = ABGRToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ABGRToYRow = ABGRToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ABGRToUVRow = ABGRToUVRow_MMI; - } - } -#endif #if defined(HAS_ABGRTOYROW_MSA) && defined(HAS_ABGRTOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ABGRToYRow = ABGRToYRow_Any_MSA; @@ -658,14 +687,6 @@ int ABGRToNV12(const uint8_t* src_abgr, } } #endif -#if defined(HAS_MERGEUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MergeUVRow_ = MergeUVRow_Any_MMI; - if (IS_ALIGNED(halfwidth, 8)) { - MergeUVRow_ = MergeUVRow_MMI; - } - } -#endif #if defined(HAS_MERGEUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { MergeUVRow_ = MergeUVRow_Any_MSA; @@ -673,6 +694,14 @@ int ABGRToNV12(const uint8_t* src_abgr, MergeUVRow_ = MergeUVRow_MSA; } } +#endif +#if defined(HAS_MERGEUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + MergeUVRow_ = MergeUVRow_Any_LSX; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow_ = MergeUVRow_LSX; + } + } #endif { // Allocate a rows of uv. @@ -726,30 +755,42 @@ int ABGRToNV21(const uint8_t* src_abgr, src_abgr = src_abgr + (height - 1) * src_stride_abgr; src_stride_abgr = -src_stride_abgr; } -#if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3) +#if defined(HAS_ABGRTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ABGRToUVRow = ABGRToUVRow_Any_SSSE3; ABGRToYRow = ABGRToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ABGRToUVRow = ABGRToUVRow_SSSE3; ABGRToYRow = ABGRToYRow_SSSE3; } } #endif -#if defined(HAS_ABGRTOYROW_AVX2) && defined(HAS_ABGRTOUVROW_AVX2) +#if defined(HAS_ABGRTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ABGRToUVRow = ABGRToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ABGRToUVRow = ABGRToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ABGRTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ABGRToUVRow = ABGRToUVRow_Any_AVX2; ABGRToYRow = ABGRToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ABGRToUVRow = ABGRToUVRow_AVX2; ABGRToYRow = ABGRToYRow_AVX2; } } #endif +#if defined(HAS_ABGRTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ABGRToUVRow = ABGRToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ABGRToUVRow = ABGRToUVRow_AVX2; + } + } +#endif #if defined(HAS_ABGRTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ABGRToYRow = ABGRToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ABGRToYRow = ABGRToYRow_NEON; } } @@ -762,18 +803,6 @@ int ABGRToNV21(const uint8_t* src_abgr, } } #endif -#if defined(HAS_ABGRTOYROW_MMI) && defined(HAS_ABGRTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ABGRToYRow = ABGRToYRow_Any_MMI; - ABGRToUVRow = ABGRToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ABGRToYRow = ABGRToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ABGRToUVRow = ABGRToUVRow_MMI; - } - } -#endif #if defined(HAS_ABGRTOYROW_MSA) && defined(HAS_ABGRTOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ABGRToYRow = ABGRToYRow_Any_MSA; @@ -810,14 +839,6 @@ int ABGRToNV21(const uint8_t* src_abgr, } } #endif -#if defined(HAS_MERGEUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MergeUVRow_ = MergeUVRow_Any_MMI; - if (IS_ALIGNED(halfwidth, 8)) { - MergeUVRow_ = MergeUVRow_MMI; - } - } -#endif #if defined(HAS_MERGEUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { MergeUVRow_ = MergeUVRow_Any_MSA; @@ -825,6 +846,14 @@ int ABGRToNV21(const uint8_t* src_abgr, MergeUVRow_ = MergeUVRow_MSA; } } +#endif +#if defined(HAS_MERGEUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + MergeUVRow_ = MergeUVRow_Any_LSX; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow_ = MergeUVRow_LSX; + } + } #endif { // Allocate a rows of uv. @@ -883,30 +912,42 @@ int ARGBToYUY2(const uint8_t* src_argb, height = 1; src_stride_argb = dst_stride_yuy2 = 0; } -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; + } + } +#endif #if defined(HAS_ARGBTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYRow = ARGBToYRow_NEON; } } @@ -919,18 +960,6 @@ int ARGBToYUY2(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYRow = ARGBToYRow_Any_MMI; - ARGBToUVRow = ARGBToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_MMI; - } - } -#endif #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToYRow = ARGBToYRow_Any_MSA; @@ -943,6 +972,16 @@ int ARGBToYUY2(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYROW_LASX) && defined(HAS_ARGBTOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + ARGBToUVRow = ARGBToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + ARGBToUVRow = ARGBToUVRow_LASX; + } + } +#endif #if defined(HAS_I422TOYUY2ROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { I422ToYUY2Row = I422ToYUY2Row_Any_SSE2; @@ -967,14 +1006,6 @@ int ARGBToYUY2(const uint8_t* src_argb, } } #endif -#if defined(HAS_I422TOYUY2ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToYUY2Row = I422ToYUY2Row_Any_MMI; - if (IS_ALIGNED(width, 8)) { - I422ToYUY2Row = I422ToYUY2Row_MMI; - } - } -#endif #if defined(HAS_I422TOYUY2ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToYUY2Row = I422ToYUY2Row_Any_MSA; @@ -983,6 +1014,14 @@ int ARGBToYUY2(const uint8_t* src_argb, } } #endif +#if defined(HAS_I422TOYUY2ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToYUY2Row = I422ToYUY2Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToYUY2Row = I422ToYUY2Row_LASX; + } + } +#endif { // Allocate a rows of yuv. @@ -1036,30 +1075,42 @@ int ARGBToUYVY(const uint8_t* src_argb, height = 1; src_stride_argb = dst_stride_uyvy = 0; } -#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) +#if defined(HAS_ARGBTOUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } #endif +#if defined(HAS_ARGBTOUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; + } + } +#endif #if defined(HAS_ARGBTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYRow = ARGBToYRow_NEON; } } @@ -1072,18 +1123,6 @@ int ARGBToUYVY(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOYROW_MMI) && defined(HAS_ARGBTOUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYRow = ARGBToYRow_Any_MMI; - ARGBToUVRow = ARGBToUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ARGBToUVRow = ARGBToUVRow_MMI; - } - } -#endif #if defined(HAS_ARGBTOYROW_MSA) && defined(HAS_ARGBTOUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToYRow = ARGBToYRow_Any_MSA; @@ -1096,6 +1135,16 @@ int ARGBToUYVY(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYROW_LASX) && defined(HAS_ARGBTOUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + ARGBToUVRow = ARGBToUVRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + ARGBToUVRow = ARGBToUVRow_LASX; + } + } +#endif #if defined(HAS_I422TOUYVYROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { I422ToUYVYRow = I422ToUYVYRow_Any_SSE2; @@ -1120,14 +1169,6 @@ int ARGBToUYVY(const uint8_t* src_argb, } } #endif -#if defined(HAS_I422TOUYVYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToUYVYRow = I422ToUYVYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - I422ToUYVYRow = I422ToUYVYRow_MMI; - } - } -#endif #if defined(HAS_I422TOUYVYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToUYVYRow = I422ToUYVYRow_Any_MSA; @@ -1136,6 +1177,14 @@ int ARGBToUYVY(const uint8_t* src_argb, } } #endif +#if defined(HAS_I422TOUYVYROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToUYVYRow = I422ToUYVYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + I422ToUYVYRow = I422ToUYVYRow_LASX; + } + } +#endif { // Allocate a rows of yuv. @@ -1200,19 +1249,11 @@ int ARGBToI400(const uint8_t* src_argb, #if defined(HAS_ARGBTOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYRow = ARGBToYRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYRow = ARGBToYRow_NEON; } } #endif -#if defined(HAS_ARGBTOYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYRow = ARGBToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYRow = ARGBToYRow_MMI; - } - } -#endif #if defined(HAS_ARGBTOYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToYRow = ARGBToYRow_Any_MSA; @@ -1221,6 +1262,14 @@ int ARGBToI400(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYRow = ARGBToYRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYRow = ARGBToYRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBToYRow(src_argb, dst_y, width); @@ -1298,19 +1347,11 @@ int ARGBToRGB24(const uint8_t* src_argb, #if defined(HAS_ARGBTORGB24ROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToRGB24Row = ARGBToRGB24Row_NEON; } } #endif -#if defined(HAS_ARGBTORGB24ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToRGB24Row = ARGBToRGB24Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGBToRGB24Row = ARGBToRGB24Row_MMI; - } - } -#endif #if defined(HAS_ARGBTORGB24ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToRGB24Row = ARGBToRGB24Row_Any_MSA; @@ -1319,6 +1360,14 @@ int ARGBToRGB24(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTORGB24ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToRGB24Row = ARGBToRGB24Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToRGB24Row = ARGBToRGB24Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBToRGB24Row(src_argb, dst_rgb24, width); @@ -1377,14 +1426,6 @@ int ARGBToRAW(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTORAWROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToRAWRow = ARGBToRAWRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGBToRAWRow = ARGBToRAWRow_MMI; - } - } -#endif #if defined(HAS_ARGBTORAWROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToRAWRow = ARGBToRAWRow_Any_MSA; @@ -1393,6 +1434,14 @@ int ARGBToRAW(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTORAWROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToRAWRow = ARGBToRAWRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToRAWRow = ARGBToRAWRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBToRAWRow(src_argb, dst_raw, width); @@ -1455,14 +1504,6 @@ int ARGBToRGB565Dither(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTORGB565DITHERROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_MMI; - } - } -#endif #if defined(HAS_ARGBTORGB565DITHERROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_MSA; @@ -1471,6 +1512,14 @@ int ARGBToRGB565Dither(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTORGB565DITHERROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBToRGB565DitherRow(src_argb, dst_rgb565, @@ -1532,14 +1581,6 @@ int ARGBToRGB565(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTORGB565ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToRGB565Row = ARGBToRGB565Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGBToRGB565Row = ARGBToRGB565Row_MMI; - } - } -#endif #if defined(HAS_ARGBTORGB565ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToRGB565Row = ARGBToRGB565Row_Any_MSA; @@ -1548,6 +1589,14 @@ int ARGBToRGB565(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTORGB565ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToRGB565Row = ARGBToRGB565Row_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBToRGB565Row = ARGBToRGB565Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBToRGB565Row(src_argb, dst_rgb565, width); @@ -1606,14 +1655,6 @@ int ARGBToARGB1555(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOARGB1555ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGBToARGB1555Row = ARGBToARGB1555Row_MMI; - } - } -#endif #if defined(HAS_ARGBTOARGB1555ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MSA; @@ -1622,6 +1663,14 @@ int ARGBToARGB1555(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOARGB1555ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToARGB1555Row = ARGBToARGB1555Row_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBToARGB1555Row = ARGBToARGB1555Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBToARGB1555Row(src_argb, dst_argb1555, width); @@ -1680,14 +1729,6 @@ int ARGBToARGB4444(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOARGB4444ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGBToARGB4444Row = ARGBToARGB4444Row_MMI; - } - } -#endif #if defined(HAS_ARGBTOARGB4444ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MSA; @@ -1696,6 +1737,14 @@ int ARGBToARGB4444(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOARGB4444ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToARGB4444Row = ARGBToARGB4444Row_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBToARGB4444Row = ARGBToARGB4444Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBToARGB4444Row(src_argb, dst_argb4444, width); @@ -1809,19 +1858,19 @@ int ARGBToJ420(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_yj, int dst_stride_yj, - uint8_t* dst_u, - int dst_stride_u, - uint8_t* dst_v, - int dst_stride_v, + uint8_t* dst_uj, + int dst_stride_uj, + uint8_t* dst_vj, + int dst_stride_vj, int width, int height) { int y; void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb, - uint8_t* dst_u, uint8_t* dst_v, int width) = + uint8_t* dst_uj, uint8_t* dst_vj, int width) = ARGBToUVJRow_C; void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) = ARGBToYJRow_C; - if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) { + if (!src_argb || !dst_yj || !dst_uj || !dst_vj || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. @@ -1830,28 +1879,10 @@ int ARGBToJ420(const uint8_t* src_argb, src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } -#if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; - ARGBToYJRow = ARGBToYJRow_Any_SSSE3; - if (IS_ALIGNED(width, 16)) { - ARGBToUVJRow = ARGBToUVJRow_SSSE3; - ARGBToYJRow = ARGBToYJRow_SSSE3; - } - } -#endif -#if defined(HAS_ARGBTOYJROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - ARGBToYJRow = ARGBToYJRow_Any_AVX2; - if (IS_ALIGNED(width, 32)) { - ARGBToYJRow = ARGBToYJRow_AVX2; - } - } -#endif #if defined(HAS_ARGBTOYJROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYJRow = ARGBToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYJRow = ARGBToYJRow_NEON; } } @@ -1864,15 +1895,35 @@ int ARGBToJ420(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOYJROW_MMI) && defined(HAS_ARGBTOUVJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYJRow = ARGBToYJRow_Any_MMI; - ARGBToUVJRow = ARGBToUVJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYJRow = ARGBToYJRow_MMI; - } +#if defined(HAS_ARGBTOYJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToYJRow = ARGBToYJRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVJRow = ARGBToUVJRow_MMI; + ARGBToYJRow = ARGBToYJRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOUVJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVJRow = ARGBToUVJRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToYJRow = ARGBToYJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToYJRow = ARGBToYJRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBTOUVJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVJRow = ARGBToUVJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVJRow = ARGBToUVJRow_AVX2; } } #endif @@ -1888,18 +1939,28 @@ int ARGBToJ420(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYJROW_LASX) && defined(HAS_ARGBTOUVJROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYJRow = ARGBToYJRow_Any_LASX; + ARGBToUVJRow = ARGBToUVJRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYJRow = ARGBToYJRow_LASX; + ARGBToUVJRow = ARGBToUVJRow_LASX; + } + } +#endif for (y = 0; y < height - 1; y += 2) { - ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width); + ARGBToUVJRow(src_argb, src_stride_argb, dst_uj, dst_vj, width); ARGBToYJRow(src_argb, dst_yj, width); ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width); src_argb += src_stride_argb * 2; dst_yj += dst_stride_yj * 2; - dst_u += dst_stride_u; - dst_v += dst_stride_v; + dst_uj += dst_stride_uj; + dst_vj += dst_stride_vj; } if (height & 1) { - ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width); + ARGBToUVJRow(src_argb, 0, dst_uj, dst_vj, width); ARGBToYJRow(src_argb, dst_yj, width); } return 0; @@ -1911,19 +1972,19 @@ int ARGBToJ422(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_yj, int dst_stride_yj, - uint8_t* dst_u, - int dst_stride_u, - uint8_t* dst_v, - int dst_stride_v, + uint8_t* dst_uj, + int dst_stride_uj, + uint8_t* dst_vj, + int dst_stride_vj, int width, int height) { int y; void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb, - uint8_t* dst_u, uint8_t* dst_v, int width) = + uint8_t* dst_uj, uint8_t* dst_vj, int width) = ARGBToUVJRow_C; void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) = ARGBToYJRow_C; - if (!src_argb || !dst_yj || !dst_u || !dst_v || width <= 0 || height == 0) { + if (!src_argb || !dst_yj || !dst_uj || !dst_vj || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. @@ -1934,21 +1995,27 @@ int ARGBToJ422(const uint8_t* src_argb, } // Coalesce rows. if (src_stride_argb == width * 4 && dst_stride_yj == width && - dst_stride_u * 2 == width && dst_stride_v * 2 == width) { + dst_stride_uj * 2 == width && dst_stride_vj * 2 == width) { width *= height; height = 1; - src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0; + src_stride_argb = dst_stride_yj = dst_stride_uj = dst_stride_vj = 0; } -#if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3) +#if defined(HAS_ARGBTOYJROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; ARGBToYJRow = ARGBToYJRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - ARGBToUVJRow = ARGBToUVJRow_SSSE3; ARGBToYJRow = ARGBToYJRow_SSSE3; } } #endif +#if defined(HAS_ARGBTOUVJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToUVJRow = ARGBToUVJRow_SSSE3; + } + } +#endif #if defined(HAS_ARGBTOYJROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { ARGBToYJRow = ARGBToYJRow_Any_AVX2; @@ -1957,10 +2024,18 @@ int ARGBToJ422(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOUVJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVJRow = ARGBToUVJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToUVJRow = ARGBToUVJRow_AVX2; + } + } +#endif #if defined(HAS_ARGBTOYJROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYJRow = ARGBToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYJRow = ARGBToYJRow_NEON; } } @@ -1973,18 +2048,6 @@ int ARGBToJ422(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOYJROW_MMI) && defined(HAS_ARGBTOUVJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYJRow = ARGBToYJRow_Any_MMI; - ARGBToUVJRow = ARGBToUVJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYJRow = ARGBToYJRow_MMI; - } - if (IS_ALIGNED(width, 16)) { - ARGBToUVJRow = ARGBToUVJRow_MMI; - } - } -#endif #if defined(HAS_ARGBTOYJROW_MSA) && defined(HAS_ARGBTOUVJROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToYJRow = ARGBToYJRow_Any_MSA; @@ -1997,14 +2060,470 @@ int ARGBToJ422(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYJROW_LSX) && defined(HAS_ARGBTOUVJROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGBToYJRow = ARGBToYJRow_Any_LSX; + ARGBToUVJRow = ARGBToUVJRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ARGBToYJRow = ARGBToYJRow_LSX; + ARGBToUVJRow = ARGBToUVJRow_LSX; + } + } +#endif +#if defined(HAS_ARGBTOYJROW_LASX) && defined(HAS_ARGBTOUVJROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYJRow = ARGBToYJRow_Any_LASX; + ARGBToUVJRow = ARGBToUVJRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYJRow = ARGBToYJRow_LASX; + ARGBToUVJRow = ARGBToUVJRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { - ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width); + ARGBToUVJRow(src_argb, 0, dst_uj, dst_vj, width); ARGBToYJRow(src_argb, dst_yj, width); src_argb += src_stride_argb; dst_yj += dst_stride_yj; - dst_u += dst_stride_u; - dst_v += dst_stride_v; + dst_uj += dst_stride_uj; + dst_vj += dst_stride_vj; + } + return 0; +} + +// Convert ARGB to J400. +LIBYUV_API +int ARGBToJ400(const uint8_t* src_argb, + int src_stride_argb, + uint8_t* dst_yj, + int dst_stride_yj, + int width, + int height) { + int y; + void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) = + ARGBToYJRow_C; + if (!src_argb || !dst_yj || width <= 0 || height == 0) { + return -1; + } + if (height < 0) { + height = -height; + src_argb = src_argb + (height - 1) * src_stride_argb; + src_stride_argb = -src_stride_argb; + } + // Coalesce rows. + if (src_stride_argb == width * 4 && dst_stride_yj == width) { + width *= height; + height = 1; + src_stride_argb = dst_stride_yj = 0; + } +#if defined(HAS_ARGBTOYJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToYJRow = ARGBToYJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ARGBToYJRow = ARGBToYJRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBTOYJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToYJRow = ARGBToYJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ARGBToYJRow = ARGBToYJRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBTOYJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBToYJRow = ARGBToYJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ARGBToYJRow = ARGBToYJRow_NEON; + } + } +#endif +#if defined(HAS_ARGBTOYJROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + ARGBToYJRow = ARGBToYJRow_Any_MSA; + if (IS_ALIGNED(width, 16)) { + ARGBToYJRow = ARGBToYJRow_MSA; + } + } +#endif + + for (y = 0; y < height; ++y) { + ARGBToYJRow(src_argb, dst_yj, width); + src_argb += src_stride_argb; + dst_yj += dst_stride_yj; + } + return 0; +} + +// Convert RGBA to J400. +LIBYUV_API +int RGBAToJ400(const uint8_t* src_rgba, + int src_stride_rgba, + uint8_t* dst_yj, + int dst_stride_yj, + int width, + int height) { + int y; + void (*RGBAToYJRow)(const uint8_t* src_rgba, uint8_t* dst_yj, int width) = + RGBAToYJRow_C; + if (!src_rgba || !dst_yj || width <= 0 || height == 0) { + return -1; + } + if (height < 0) { + height = -height; + src_rgba = src_rgba + (height - 1) * src_stride_rgba; + src_stride_rgba = -src_stride_rgba; + } + // Coalesce rows. + if (src_stride_rgba == width * 4 && dst_stride_yj == width) { + width *= height; + height = 1; + src_stride_rgba = dst_stride_yj = 0; + } +#if defined(HAS_RGBATOYJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + RGBAToYJRow = RGBAToYJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + RGBAToYJRow = RGBAToYJRow_SSSE3; + } + } +#endif +#if defined(HAS_RGBATOYJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + RGBAToYJRow = RGBAToYJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + RGBAToYJRow = RGBAToYJRow_AVX2; + } + } +#endif +#if defined(HAS_RGBATOYJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + RGBAToYJRow = RGBAToYJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + RGBAToYJRow = RGBAToYJRow_NEON; + } + } +#endif +#if defined(HAS_RGBATOYJROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + RGBAToYJRow = RGBAToYJRow_Any_MSA; + if (IS_ALIGNED(width, 16)) { + RGBAToYJRow = RGBAToYJRow_MSA; + } + } +#endif + + for (y = 0; y < height; ++y) { + RGBAToYJRow(src_rgba, dst_yj, width); + src_rgba += src_stride_rgba; + dst_yj += dst_stride_yj; + } + return 0; +} + +// Convert ABGR to J420. (JPeg full range I420). +LIBYUV_API +int ABGRToJ420(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_yj, + int dst_stride_yj, + uint8_t* dst_uj, + int dst_stride_uj, + uint8_t* dst_vj, + int dst_stride_vj, + int width, + int height) { + int y; + void (*ABGRToUVJRow)(const uint8_t* src_abgr0, int src_stride_abgr, + uint8_t* dst_uj, uint8_t* dst_vj, int width) = + ABGRToUVJRow_C; + void (*ABGRToYJRow)(const uint8_t* src_abgr, uint8_t* dst_yj, int width) = + ABGRToYJRow_C; + if (!src_abgr || !dst_yj || !dst_uj || !dst_vj || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_abgr = src_abgr + (height - 1) * src_stride_abgr; + src_stride_abgr = -src_stride_abgr; + } +#if defined(HAS_ABGRTOYJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ABGRToYJRow = ABGRToYJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_SSSE3; + } + } +#endif +#if defined(HAS_ABGRTOUVJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ABGRToUVJRow = ABGRToUVJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ABGRToUVJRow = ABGRToUVJRow_SSSE3; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ABGRToYJRow = ABGRToYJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ABGRToYJRow = ABGRToYJRow_AVX2; + } + } +#endif +#if defined(HAS_ABGRTOUVJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ABGRToUVJRow = ABGRToUVJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ABGRToUVJRow = ABGRToUVJRow_AVX2; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ABGRToYJRow = ABGRToYJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_NEON; + } + } +#endif +#if defined(HAS_ABGRTOUVJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ABGRToUVJRow = ABGRToUVJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ABGRToUVJRow = ABGRToUVJRow_NEON; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_MSA) && defined(HAS_ABGRTOUVJROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + ABGRToYJRow = ABGRToYJRow_Any_MSA; + ABGRToUVJRow = ABGRToUVJRow_Any_MSA; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_MSA; + ABGRToUVJRow = ABGRToUVJRow_MSA; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_LSX) && defined(HAS_ABGRTOUVJROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ABGRToYJRow = ABGRToYJRow_Any_LSX; + ABGRToUVJRow = ABGRToUVJRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_LSX; + ABGRToUVJRow = ABGRToUVJRow_LSX; + } + } +#endif + + for (y = 0; y < height - 1; y += 2) { + ABGRToUVJRow(src_abgr, src_stride_abgr, dst_uj, dst_vj, width); + ABGRToYJRow(src_abgr, dst_yj, width); + ABGRToYJRow(src_abgr + src_stride_abgr, dst_yj + dst_stride_yj, width); + src_abgr += src_stride_abgr * 2; + dst_yj += dst_stride_yj * 2; + dst_uj += dst_stride_uj; + dst_vj += dst_stride_vj; + } + if (height & 1) { + ABGRToUVJRow(src_abgr, 0, dst_uj, dst_vj, width); + ABGRToYJRow(src_abgr, dst_yj, width); + } + return 0; +} + +// Convert ABGR to J422. (JPeg full range I422). +LIBYUV_API +int ABGRToJ422(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_yj, + int dst_stride_yj, + uint8_t* dst_uj, + int dst_stride_uj, + uint8_t* dst_vj, + int dst_stride_vj, + int width, + int height) { + int y; + void (*ABGRToUVJRow)(const uint8_t* src_abgr0, int src_stride_abgr, + uint8_t* dst_uj, uint8_t* dst_vj, int width) = + ABGRToUVJRow_C; + void (*ABGRToYJRow)(const uint8_t* src_abgr, uint8_t* dst_yj, int width) = + ABGRToYJRow_C; + if (!src_abgr || !dst_yj || !dst_uj || !dst_vj || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_abgr = src_abgr + (height - 1) * src_stride_abgr; + src_stride_abgr = -src_stride_abgr; + } + // Coalesce rows. + if (src_stride_abgr == width * 4 && dst_stride_yj == width && + dst_stride_uj * 2 == width && dst_stride_vj * 2 == width) { + width *= height; + height = 1; + src_stride_abgr = dst_stride_yj = dst_stride_uj = dst_stride_vj = 0; + } +#if defined(HAS_ABGRTOYJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ABGRToYJRow = ABGRToYJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_SSSE3; + } + } +#endif +#if defined(HAS_ABGRTOUVJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ABGRToUVJRow = ABGRToUVJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ABGRToUVJRow = ABGRToUVJRow_SSSE3; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ABGRToYJRow = ABGRToYJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ABGRToYJRow = ABGRToYJRow_AVX2; + } + } +#endif +#if defined(HAS_ABGRTOUVJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ABGRToUVJRow = ABGRToUVJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ABGRToUVJRow = ABGRToUVJRow_AVX2; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ABGRToYJRow = ABGRToYJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_NEON; + } + } +#endif +#if defined(HAS_ABGRTOUVJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ABGRToUVJRow = ABGRToUVJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ABGRToUVJRow = ABGRToUVJRow_NEON; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_MSA) && defined(HAS_ABGRTOUVJROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + ABGRToYJRow = ABGRToYJRow_Any_MSA; + ABGRToUVJRow = ABGRToUVJRow_Any_MSA; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_MSA; + } + if (IS_ALIGNED(width, 32)) { + ABGRToUVJRow = ABGRToUVJRow_MSA; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_LSX) && defined(HAS_ABGRTOUVJROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ABGRToYJRow = ABGRToYJRow_Any_LSX; + ABGRToUVJRow = ABGRToUVJRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_LSX; + ABGRToUVJRow = ABGRToUVJRow_LSX; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_LASX) && defined(HAS_ABGRTOUVJROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ABGRToYJRow = ABGRToYJRow_Any_LASX; + ABGRToUVJRow = ABGRToUVJRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ABGRToYJRow = ABGRToYJRow_LASX; + ABGRToUVJRow = ABGRToUVJRow_LASX; + } + } +#endif + + for (y = 0; y < height; ++y) { + ABGRToUVJRow(src_abgr, 0, dst_uj, dst_vj, width); + ABGRToYJRow(src_abgr, dst_yj, width); + src_abgr += src_stride_abgr; + dst_yj += dst_stride_yj; + dst_uj += dst_stride_uj; + dst_vj += dst_stride_vj; + } + return 0; +} + +// Convert ABGR to J400. +LIBYUV_API +int ABGRToJ400(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_yj, + int dst_stride_yj, + int width, + int height) { + int y; + void (*ABGRToYJRow)(const uint8_t* src_abgr, uint8_t* dst_yj, int width) = + ABGRToYJRow_C; + if (!src_abgr || !dst_yj || width <= 0 || height == 0) { + return -1; + } + if (height < 0) { + height = -height; + src_abgr = src_abgr + (height - 1) * src_stride_abgr; + src_stride_abgr = -src_stride_abgr; + } + // Coalesce rows. + if (src_stride_abgr == width * 4 && dst_stride_yj == width) { + width *= height; + height = 1; + src_stride_abgr = dst_stride_yj = 0; + } +#if defined(HAS_ABGRTOYJROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ABGRToYJRow = ABGRToYJRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_SSSE3; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ABGRToYJRow = ABGRToYJRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + ABGRToYJRow = ABGRToYJRow_AVX2; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ABGRToYJRow = ABGRToYJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_NEON; + } + } +#endif +#if defined(HAS_ABGRTOYJROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + ABGRToYJRow = ABGRToYJRow_Any_MSA; + if (IS_ALIGNED(width, 16)) { + ABGRToYJRow = ABGRToYJRow_MSA; + } + } +#endif + + for (y = 0; y < height; ++y) { + ABGRToYJRow(src_abgr, dst_yj, width); + src_abgr += src_stride_abgr; + dst_yj += dst_stride_yj; } return 0; } @@ -2127,31 +2646,85 @@ int ARGBToAB64(const uint8_t* src_argb, return 0; } -// Convert ARGB to J400. +// Enabled if 1 pass is available +#if defined(HAS_RAWTOYJROW_NEON) || defined(HAS_RAWTOYJROW_MSA) +#define HAS_RAWTOYJROW +#endif + +// RAW to JNV21 full range NV21 LIBYUV_API -int ARGBToJ400(const uint8_t* src_argb, - int src_stride_argb, - uint8_t* dst_yj, - int dst_stride_yj, +int RAWToJNV21(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_vu, + int dst_stride_vu, int width, int height) { int y; - void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_yj, int width) = + int halfwidth = (width + 1) >> 1; +#if defined(HAS_RAWTOYJROW) + void (*RAWToUVJRow)(const uint8_t* src_raw, int src_stride_raw, + uint8_t* dst_uj, uint8_t* dst_vj, int width) = + RAWToUVJRow_C; + void (*RAWToYJRow)(const uint8_t* src_raw, uint8_t* dst_y, int width) = + RAWToYJRow_C; +#else + void (*RAWToARGBRow)(const uint8_t* src_rgb, uint8_t* dst_argb, int width) = + RAWToARGBRow_C; + void (*ARGBToUVJRow)(const uint8_t* src_argb0, int src_stride_argb, + uint8_t* dst_uj, uint8_t* dst_vj, int width) = + ARGBToUVJRow_C; + void (*ARGBToYJRow)(const uint8_t* src_argb, uint8_t* dst_y, int width) = ARGBToYJRow_C; - if (!src_argb || !dst_yj || width <= 0 || height == 0) { +#endif + void (*MergeUVRow_)(const uint8_t* src_uj, const uint8_t* src_vj, + uint8_t* dst_vu, int width) = MergeUVRow_C; + if (!src_raw || !dst_y || !dst_vu || width <= 0 || height == 0) { return -1; } + // Negative height means invert the image. if (height < 0) { height = -height; - src_argb = src_argb + (height - 1) * src_stride_argb; - src_stride_argb = -src_stride_argb; + src_raw = src_raw + (height - 1) * src_stride_raw; + src_stride_raw = -src_stride_raw; } - // Coalesce rows. - if (src_stride_argb == width * 4 && dst_stride_yj == width) { - width *= height; - height = 1; - src_stride_argb = dst_stride_yj = 0; + +#if defined(HAS_RAWTOYJROW) + +// Neon version does direct RAW to YUV. +#if defined(HAS_RAWTOYJROW_NEON) && defined(HAS_RAWTOUVJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + RAWToUVJRow = RAWToUVJRow_Any_NEON; + RAWToYJRow = RAWToYJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + RAWToYJRow = RAWToYJRow_NEON; + RAWToUVJRow = RAWToUVJRow_NEON; + } } +#endif +#if defined(HAS_RAWTOYJROW_MSA) && defined(HAS_RAWTOUVJROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + RAWToUVJRow = RAWToUVJRow_Any_MSA; + RAWToYJRow = RAWToYJRow_Any_MSA; + if (IS_ALIGNED(width, 16)) { + RAWToYJRow = RAWToYJRow_MSA; + RAWToUVJRow = RAWToUVJRow_MSA; + } + } +#endif + +// Other platforms do intermediate conversion from RAW to ARGB. +#else // HAS_RAWTOYJROW + +#if defined(HAS_RAWTOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + RAWToARGBRow = RAWToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + RAWToARGBRow = RAWToARGBRow_SSSE3; + } + } +#endif #if defined(HAS_ARGBTOYJROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { ARGBToYJRow = ARGBToYJRow_Any_SSSE3; @@ -2168,112 +2741,111 @@ int ARGBToJ400(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBTOYJROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToYJRow = ARGBToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - ARGBToYJRow = ARGBToYJRow_NEON; - } - } -#endif -#if defined(HAS_ARGBTOYJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYJRow = ARGBToYJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYJRow = ARGBToYJRow_MMI; - } - } -#endif -#if defined(HAS_ARGBTOYJROW_MSA) - if (TestCpuFlag(kCpuHasMSA)) { - ARGBToYJRow = ARGBToYJRow_Any_MSA; - if (IS_ALIGNED(width, 16)) { - ARGBToYJRow = ARGBToYJRow_MSA; - } - } -#endif - - for (y = 0; y < height; ++y) { - ARGBToYJRow(src_argb, dst_yj, width); - src_argb += src_stride_argb; - dst_yj += dst_stride_yj; - } - return 0; -} - -// Convert RGBA to J400. -LIBYUV_API -int RGBAToJ400(const uint8_t* src_rgba, - int src_stride_rgba, - uint8_t* dst_yj, - int dst_stride_yj, - int width, - int height) { - int y; - void (*RGBAToYJRow)(const uint8_t* src_rgba, uint8_t* dst_yj, int width) = - RGBAToYJRow_C; - if (!src_rgba || !dst_yj || width <= 0 || height == 0) { - return -1; - } - if (height < 0) { - height = -height; - src_rgba = src_rgba + (height - 1) * src_stride_rgba; - src_stride_rgba = -src_stride_rgba; - } - // Coalesce rows. - if (src_stride_rgba == width * 4 && dst_stride_yj == width) { - width *= height; - height = 1; - src_stride_rgba = dst_stride_yj = 0; - } -#if defined(HAS_RGBATOYJROW_SSSE3) +#if defined(HAS_ARGBTOUVJROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - RGBAToYJRow = RGBAToYJRow_Any_SSSE3; + ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { - RGBAToYJRow = RGBAToYJRow_SSSE3; + ARGBToUVJRow = ARGBToUVJRow_SSSE3; } } #endif -#if defined(HAS_RGBATOYJROW_AVX2) +#if defined(HAS_ARGBTOUVJROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - RGBAToYJRow = RGBAToYJRow_Any_AVX2; + ARGBToUVJRow = ARGBToUVJRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - RGBAToYJRow = RGBAToYJRow_AVX2; + ARGBToUVJRow = ARGBToUVJRow_AVX2; } } #endif -#if defined(HAS_RGBATOYJROW_NEON) +#endif // HAS_RAWTOYJROW +#if defined(HAS_MERGEUVROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + MergeUVRow_ = MergeUVRow_Any_SSE2; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow_ = MergeUVRow_SSE2; + } + } +#endif +#if defined(HAS_MERGEUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + MergeUVRow_ = MergeUVRow_Any_AVX2; + if (IS_ALIGNED(halfwidth, 32)) { + MergeUVRow_ = MergeUVRow_AVX2; + } + } +#endif +#if defined(HAS_MERGEUVROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { - RGBAToYJRow = RGBAToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - RGBAToYJRow = RGBAToYJRow_NEON; + MergeUVRow_ = MergeUVRow_Any_NEON; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow_ = MergeUVRow_NEON; } } #endif -#if defined(HAS_RGBATOYJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RGBAToYJRow = RGBAToYJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - RGBAToYJRow = RGBAToYJRow_MMI; - } - } -#endif -#if defined(HAS_RGBATOYJROW_MSA) +#if defined(HAS_MERGEUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { - RGBAToYJRow = RGBAToYJRow_Any_MSA; - if (IS_ALIGNED(width, 16)) { - RGBAToYJRow = RGBAToYJRow_MSA; + MergeUVRow_ = MergeUVRow_Any_MSA; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow_ = MergeUVRow_MSA; } } #endif +#if defined(HAS_MERGEUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + MergeUVRow_ = MergeUVRow_Any_LSX; + if (IS_ALIGNED(halfwidth, 16)) { + MergeUVRow_ = MergeUVRow_LSX; + } + } +#endif + { + // Allocate a row of uv. + align_buffer_64(row_uj, ((halfwidth + 31) & ~31) * 2); + uint8_t* row_vj = row_uj + ((halfwidth + 31) & ~31); +#if !defined(HAS_RAWTOYJROW) + // Allocate 2 rows of ARGB. + const int row_size = (width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); +#endif - for (y = 0; y < height; ++y) { - RGBAToYJRow(src_rgba, dst_yj, width); - src_rgba += src_stride_rgba; - dst_yj += dst_stride_yj; + for (y = 0; y < height - 1; y += 2) { +#if defined(HAS_RAWTOYJROW) + RAWToUVJRow(src_raw, src_stride_raw, row_uj, row_vj, width); + MergeUVRow_(row_vj, row_uj, dst_vu, halfwidth); + RAWToYJRow(src_raw, dst_y, width); + RAWToYJRow(src_raw + src_stride_raw, dst_y + dst_stride_y, width); +#else + RAWToARGBRow(src_raw, row, width); + RAWToARGBRow(src_raw + src_stride_raw, row + row_size, width); + ARGBToUVJRow(row, row_size, row_uj, row_vj, width); + MergeUVRow_(row_vj, row_uj, dst_vu, halfwidth); + ARGBToYJRow(row, dst_y, width); + ARGBToYJRow(row + row_size, dst_y + dst_stride_y, width); +#endif + src_raw += src_stride_raw * 2; + dst_y += dst_stride_y * 2; + dst_vu += dst_stride_vu; + } + if (height & 1) { +#if defined(HAS_RAWTOYJROW) + RAWToUVJRow(src_raw, 0, row_uj, row_vj, width); + MergeUVRow_(row_vj, row_uj, dst_vu, halfwidth); + RAWToYJRow(src_raw, dst_y, width); +#else + RAWToARGBRow(src_raw, row, width); + ARGBToUVJRow(row, 0, row_uj, row_vj, width); + MergeUVRow_(row_vj, row_uj, dst_vu, halfwidth); + ARGBToYJRow(row, dst_y, width); +#endif + } +#if !defined(HAS_RAWTOYJROW) + free_aligned_buffer_64(row); +#endif + free_aligned_buffer_64(row_uj); } return 0; } +#undef HAS_RAWTOYJROW #ifdef __cplusplus } // extern "C" diff --git a/third-party/libyuv/third_party/libyuv/source/cpu_id.cc b/third-party/libyuv/third_party/libyuv/source/cpu_id.cc index 588168d65a..13e3da7bb1 100644 --- a/third-party/libyuv/third_party/libyuv/source/cpu_id.cc +++ b/third-party/libyuv/third_party/libyuv/source/cpu_id.cc @@ -20,7 +20,7 @@ #endif // For ArmCpuCaps() but unittested on all platforms -#include +#include // For fopen() #include #ifdef __cplusplus @@ -108,14 +108,14 @@ void CpuId(int eax, int ecx, int* cpu_info) { // } // For VS2013 and earlier 32 bit, the _xgetbv(0) optimizer produces bad code. // https://code.google.com/p/libyuv/issues/detail?id=529 -#if defined(_M_IX86) && (_MSC_VER < 1900) +#if defined(_M_IX86) && defined(_MSC_VER) && (_MSC_VER < 1900) #pragma optimize("g", off) #endif #if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__x86_64__)) && \ !defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__) // X86 CPUs have xgetbv to detect OS saves high parts of ymm registers. -int GetXCR0() { +static int GetXCR0() { int xcr0 = 0; #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) xcr0 = (int)_xgetbv(0); // VS2010 SP1 required. NOLINT @@ -129,7 +129,7 @@ int GetXCR0() { #define GetXCR0() 0 #endif // defined(_M_IX86) || defined(_M_X64) .. // Return optimization to previous setting. -#if defined(_M_IX86) && (_MSC_VER < 1900) +#if defined(_M_IX86) && defined(_MSC_VER) && (_MSC_VER < 1900) #pragma optimize("g", on) #endif @@ -174,18 +174,12 @@ LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) { } while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) { if (memcmp(cpuinfo_line, "cpu model", 9) == 0) { - // Workaround early kernel without mmi in ASEs line. - if (strstr(cpuinfo_line, "Loongson-3")) { - flag |= kCpuHasMMI; - } else if (strstr(cpuinfo_line, "Loongson-2K")) { - flag |= kCpuHasMMI | kCpuHasMSA; + // Workaround early kernel without MSA in ASEs line. + if (strstr(cpuinfo_line, "Loongson-2K")) { + flag |= kCpuHasMSA; } } if (memcmp(cpuinfo_line, "ASEs implemented", 16) == 0) { - if (strstr(cpuinfo_line, "loongson-mmi") && - strstr(cpuinfo_line, "loongson-ext")) { - flag |= kCpuHasMMI; - } if (strstr(cpuinfo_line, "msa")) { flag |= kCpuHasMSA; } @@ -197,6 +191,27 @@ LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) { return flag; } +// TODO(fbarchard): Consider read_loongarch_ir(). +#define LOONGARCH_CFG2 0x2 +#define LOONGARCH_CFG2_LSX (1 << 6) +#define LOONGARCH_CFG2_LASX (1 << 7) + +#if defined(__loongarch__) +LIBYUV_API SAFEBUFFERS int LoongarchCpuCaps(void) { + int flag = 0x0; + uint32_t cfg2 = 0; + + __asm__ volatile("cpucfg %0, %1 \n\t" : "+&r"(cfg2) : "r"(LOONGARCH_CFG2)); + + if (cfg2 & LOONGARCH_CFG2_LSX) + flag |= kCpuHasLSX; + + if (cfg2 & LOONGARCH_CFG2_LASX) + flag |= kCpuHasLASX; + return flag; +} +#endif + static SAFEBUFFERS int GetCpuFlags(void) { int cpu_info = 0; #if !defined(__pnacl__) && !defined(__CLR_VER) && \ @@ -229,6 +244,7 @@ static SAFEBUFFERS int GetCpuFlags(void) { cpu_info |= (cpu_info7[1] & 0x80000000) ? kCpuHasAVX512VL : 0; cpu_info |= (cpu_info7[2] & 0x00000002) ? kCpuHasAVX512VBMI : 0; cpu_info |= (cpu_info7[2] & 0x00000040) ? kCpuHasAVX512VBMI2 : 0; + cpu_info |= (cpu_info7[2] & 0x00000800) ? kCpuHasAVX512VNNI : 0; cpu_info |= (cpu_info7[2] & 0x00001000) ? kCpuHasAVX512VBITALG : 0; cpu_info |= (cpu_info7[2] & 0x00004000) ? kCpuHasAVX512VPOPCNTDQ : 0; cpu_info |= (cpu_info7[2] & 0x00000100) ? kCpuHasGFNI : 0; @@ -239,6 +255,10 @@ static SAFEBUFFERS int GetCpuFlags(void) { cpu_info = MipsCpuCaps("/proc/cpuinfo"); cpu_info |= kCpuHasMIPS; #endif +#if defined(__loongarch__) && defined(__linux__) + cpu_info = LoongarchCpuCaps(); + cpu_info |= kCpuHasLOONGARCH; +#endif #if defined(__arm__) || defined(__aarch64__) // gcc -mfpu=neon defines __ARM_NEON__ // __ARM_NEON__ generates code that requires Neon. NaCL also requires Neon. diff --git a/third-party/libyuv/third_party/libyuv/source/mjpeg_decoder.cc b/third-party/libyuv/third_party/libyuv/source/mjpeg_decoder.cc index adba832f53..4ccf00a361 100644 --- a/third-party/libyuv/third_party/libyuv/source/mjpeg_decoder.cc +++ b/third-party/libyuv/third_party/libyuv/source/mjpeg_decoder.cc @@ -417,10 +417,6 @@ void init_source(j_decompress_ptr cinfo) { boolean fill_input_buffer(j_decompress_ptr cinfo) { BufferVector* buf_vec = reinterpret_cast(cinfo->client_data); if (buf_vec->pos >= buf_vec->len) { - // Don't assert-fail when fuzzing. -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - assert(0 && "No more data"); -#endif // ERROR: No more data return FALSE; } diff --git a/third-party/libyuv/third_party/libyuv/source/planar_functions.cc b/third-party/libyuv/third_party/libyuv/source/planar_functions.cc index 7cea06c8d7..96914e08d6 100644 --- a/third-party/libyuv/third_party/libyuv/source/planar_functions.cc +++ b/third-party/libyuv/third_party/libyuv/source/planar_functions.cc @@ -35,6 +35,9 @@ void CopyPlane(const uint8_t* src_y, int height) { int y; void (*CopyRow)(const uint8_t* src, uint8_t* dst, int width) = CopyRow_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -81,8 +84,6 @@ void CopyPlane(const uint8_t* src_y, } } -// TODO(fbarchard): Consider support for negative height. -// TODO(fbarchard): Consider stride measured in bytes. LIBYUV_API void CopyPlane_16(const uint16_t* src_y, int src_stride_y, @@ -90,36 +91,8 @@ void CopyPlane_16(const uint16_t* src_y, int dst_stride_y, int width, int height) { - int y; - void (*CopyRow)(const uint16_t* src, uint16_t* dst, int width) = CopyRow_16_C; - // Coalesce rows. - if (src_stride_y == width && dst_stride_y == width) { - width *= height; - height = 1; - src_stride_y = dst_stride_y = 0; - } -#if defined(HAS_COPYROW_16_SSE2) - if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 32)) { - CopyRow = CopyRow_16_SSE2; - } -#endif -#if defined(HAS_COPYROW_16_ERMS) - if (TestCpuFlag(kCpuHasERMS)) { - CopyRow = CopyRow_16_ERMS; - } -#endif -#if defined(HAS_COPYROW_16_NEON) - if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 32)) { - CopyRow = CopyRow_16_NEON; - } -#endif - - // Copy plane - for (y = 0; y < height; ++y) { - CopyRow(src_y, dst_y, width); - src_y += src_stride_y; - dst_y += dst_stride_y; - } + CopyPlane((const uint8_t*)src_y, src_stride_y * 2, (uint8_t*)dst_y, + dst_stride_y * 2, width * 2, height); } // Convert a plane of 16 bit data to 8 bit @@ -135,6 +108,9 @@ void Convert16To8Plane(const uint16_t* src_y, void (*Convert16To8Row)(const uint16_t* src_y, uint8_t* dst_y, int scale, int width) = Convert16To8Row_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -147,6 +123,14 @@ void Convert16To8Plane(const uint16_t* src_y, height = 1; src_stride_y = dst_stride_y = 0; } +#if defined(HAS_CONVERT16TO8ROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + Convert16To8Row = Convert16To8Row_Any_NEON; + if (IS_ALIGNED(width, 16)) { + Convert16To8Row = Convert16To8Row_NEON; + } + } +#endif #if defined(HAS_CONVERT16TO8ROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { Convert16To8Row = Convert16To8Row_Any_SSSE3; @@ -185,6 +169,9 @@ void Convert8To16Plane(const uint8_t* src_y, void (*Convert8To16Row)(const uint8_t* src_y, uint16_t* dst_y, int scale, int width) = Convert8To16Row_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -239,9 +226,12 @@ int I422Copy(const uint8_t* src_y, int width, int height) { int halfwidth = (width + 1) >> 1; - if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { + + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { return -1; } + // Negative height means invert the image. if (height < 0) { height = -height; @@ -277,7 +267,8 @@ int I444Copy(const uint8_t* src_y, int dst_stride_v, int width, int height) { - if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { return -1; } // Negative height means invert the image. @@ -299,6 +290,49 @@ int I444Copy(const uint8_t* src_y, return 0; } +// Copy I210. +LIBYUV_API +int I210Copy(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + uint16_t* dst_y, + int dst_stride_y, + uint16_t* dst_u, + int dst_stride_u, + uint16_t* dst_v, + int dst_stride_v, + int width, + int height) { + int halfwidth = (width + 1) >> 1; + + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { + return -1; + } + + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_y = src_y + (height - 1) * src_stride_y; + src_u = src_u + (height - 1) * src_stride_u; + src_v = src_v + (height - 1) * src_stride_v; + src_stride_y = -src_stride_y; + src_stride_u = -src_stride_u; + src_stride_v = -src_stride_v; + } + + if (dst_y) { + CopyPlane_16(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + } + // Copy UV planes. + CopyPlane_16(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, height); + CopyPlane_16(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, height); + return 0; +} + // Copy I400. LIBYUV_API int I400ToI400(const uint8_t* src_y, @@ -351,6 +385,7 @@ int I420ToI400(const uint8_t* src_y, } // Copy NV12. Supports inverting. +LIBYUV_API int NV12Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_uv, @@ -361,12 +396,13 @@ int NV12Copy(const uint8_t* src_y, int dst_stride_uv, int width, int height) { + int halfwidth = (width + 1) >> 1; + int halfheight = (height + 1) >> 1; + if (!src_y || !dst_y || !src_uv || !dst_uv || width <= 0 || height == 0) { return -1; } - int halfwidth = (width + 1) >> 1; - int halfheight = (height + 1) >> 1; // Negative height means invert the image. if (height < 0) { height = -height; @@ -383,6 +419,7 @@ int NV12Copy(const uint8_t* src_y, } // Copy NV21. Supports inverting. +LIBYUV_API int NV21Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_vu, @@ -411,6 +448,9 @@ void SplitUVPlane(const uint8_t* src_uv, int y; void (*SplitUVRow)(const uint8_t* src_uv, uint8_t* dst_u, uint8_t* dst_v, int width) = SplitUVRow_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -450,14 +490,6 @@ void SplitUVPlane(const uint8_t* src_uv, } } #endif -#if defined(HAS_SPLITUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SplitUVRow = SplitUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - SplitUVRow = SplitUVRow_MMI; - } - } -#endif #if defined(HAS_SPLITUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { SplitUVRow = SplitUVRow_Any_MSA; @@ -466,6 +498,14 @@ void SplitUVPlane(const uint8_t* src_uv, } } #endif +#if defined(HAS_SPLITUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + SplitUVRow = SplitUVRow_Any_LSX; + if (IS_ALIGNED(width, 32)) { + SplitUVRow = SplitUVRow_LSX; + } + } +#endif for (y = 0; y < height; ++y) { // Copy a row of UV. @@ -488,6 +528,9 @@ void MergeUVPlane(const uint8_t* src_u, int y; void (*MergeUVRow)(const uint8_t* src_u, const uint8_t* src_v, uint8_t* dst_uv, int width) = MergeUVRow_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -525,14 +568,6 @@ void MergeUVPlane(const uint8_t* src_u, } } #endif -#if defined(HAS_MERGEUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MergeUVRow = MergeUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - MergeUVRow = MergeUVRow_MMI; - } - } -#endif #if defined(HAS_MERGEUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { MergeUVRow = MergeUVRow_Any_MSA; @@ -541,6 +576,14 @@ void MergeUVPlane(const uint8_t* src_u, } } #endif +#if defined(HAS_MERGEUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + MergeUVRow = MergeUVRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + MergeUVRow = MergeUVRow_LSX; + } + } +#endif for (y = 0; y < height; ++y) { // Merge a row of U and V into a row of UV. @@ -567,6 +610,9 @@ void SplitUVPlane_16(const uint16_t* src_uv, void (*SplitUVRow_16)(const uint16_t* src_uv, uint16_t* dst_u, uint16_t* dst_v, int depth, int width) = SplitUVRow_16_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -624,6 +670,9 @@ void MergeUVPlane_16(const uint16_t* src_u, MergeUVRow_16_C; assert(depth >= 8); assert(depth <= 16); + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -676,6 +725,9 @@ void ConvertToMSBPlane_16(const uint16_t* src_y, int scale = 1 << (16 - depth); void (*MultiplyRow_16)(const uint16_t* src_y, uint16_t* dst_y, int scale, int width) = MultiplyRow_16_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -726,6 +778,9 @@ void ConvertToLSBPlane_16(const uint16_t* src_y, int scale = 1 << depth; void (*DivideRow)(const uint16_t* src_y, uint16_t* dst_y, int scale, int width) = DivideRow_16_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -774,6 +829,9 @@ void SwapUVPlane(const uint8_t* src_uv, int y; void (*SwapUVRow)(const uint8_t* src_uv, uint8_t* dst_vu, int width) = SwapUVRow_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -833,9 +891,11 @@ int NV21ToNV12(const uint8_t* src_y, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; + if (!src_vu || !dst_uv || width <= 0 || height == 0) { return -1; } + if (dst_y) { CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); } @@ -853,6 +913,262 @@ int NV21ToNV12(const uint8_t* src_y, return 0; } +// Test if tile_height is a power of 2 (16 or 32) +#define IS_POWEROFTWO(x) (!((x) & ((x)-1))) + +// Detile a plane of data +// tile width is 16 and assumed. +// tile_height is 16 or 32 for MM21. +// src_stride_y is bytes per row of source ignoring tiling. e.g. 640 +// TODO: More detile row functions. +LIBYUV_API +int DetilePlane(const uint8_t* src_y, + int src_stride_y, + uint8_t* dst_y, + int dst_stride_y, + int width, + int height, + int tile_height) { + const ptrdiff_t src_tile_stride = 16 * tile_height; + int y; + void (*DetileRow)(const uint8_t* src, ptrdiff_t src_tile_stride, uint8_t* dst, + int width) = DetileRow_C; + if (!src_y || !dst_y || width <= 0 || height == 0 || + !IS_POWEROFTWO(tile_height)) { + return -1; + } + + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_y = dst_y + (height - 1) * dst_stride_y; + dst_stride_y = -dst_stride_y; + } + +#if defined(HAS_DETILEROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + DetileRow = DetileRow_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + DetileRow = DetileRow_SSE2; + } + } +#endif +#if defined(HAS_DETILEROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + DetileRow = DetileRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + DetileRow = DetileRow_NEON; + } + } +#endif + + // Detile plane + for (y = 0; y < height; ++y) { + DetileRow(src_y, src_tile_stride, dst_y, width); + dst_y += dst_stride_y; + src_y += 16; + // Advance to next row of tiles. + if ((y & (tile_height - 1)) == (tile_height - 1)) { + src_y = src_y - src_tile_stride + src_stride_y * tile_height; + } + } + return 0; +} + +// Convert a plane of 16 bit tiles of 16 x H to linear. +// tile width is 16 and assumed. +// tile_height is 16 or 32 for MT2T. +LIBYUV_API +int DetilePlane_16(const uint16_t* src_y, + int src_stride_y, + uint16_t* dst_y, + int dst_stride_y, + int width, + int height, + int tile_height) { + const ptrdiff_t src_tile_stride = 16 * tile_height; + int y; + void (*DetileRow_16)(const uint16_t* src, ptrdiff_t src_tile_stride, + uint16_t* dst, int width) = DetileRow_16_C; + if (!src_y || !dst_y || width <= 0 || height == 0 || + !IS_POWEROFTWO(tile_height)) { + return -1; + } + + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_y = dst_y + (height - 1) * dst_stride_y; + dst_stride_y = -dst_stride_y; + } + +#if defined(HAS_DETILEROW_16_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + DetileRow_16 = DetileRow_16_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + DetileRow_16 = DetileRow_16_SSE2; + } + } +#endif +#if defined(HAS_DETILEROW_16_AVX) + if (TestCpuFlag(kCpuHasAVX)) { + DetileRow_16 = DetileRow_16_Any_AVX; + if (IS_ALIGNED(width, 16)) { + DetileRow_16 = DetileRow_16_AVX; + } + } +#endif +#if defined(HAS_DETILEROW_16_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + DetileRow_16 = DetileRow_16_Any_NEON; + if (IS_ALIGNED(width, 16)) { + DetileRow_16 = DetileRow_16_NEON; + } + } +#endif + + // Detile plane + for (y = 0; y < height; ++y) { + DetileRow_16(src_y, src_tile_stride, dst_y, width); + dst_y += dst_stride_y; + src_y += 16; + // Advance to next row of tiles. + if ((y & (tile_height - 1)) == (tile_height - 1)) { + src_y = src_y - src_tile_stride + src_stride_y * tile_height; + } + } + return 0; +} + +LIBYUV_API +void DetileSplitUVPlane(const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height, + int tile_height) { + const ptrdiff_t src_tile_stride = 16 * tile_height; + int y; + void (*DetileSplitUVRow)(const uint8_t* src, ptrdiff_t src_tile_stride, + uint8_t* dst_u, uint8_t* dst_v, int width) = + DetileSplitUVRow_C; + assert(src_stride_uv >= 0); + assert(tile_height > 0); + assert(src_stride_uv > 0); + + if (width <= 0 || height == 0) { + return; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_u = dst_u + (height - 1) * dst_stride_u; + dst_stride_u = -dst_stride_u; + dst_v = dst_v + (height - 1) * dst_stride_v; + dst_stride_v = -dst_stride_v; + } + +#if defined(HAS_DETILESPLITUVROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + DetileSplitUVRow = DetileSplitUVRow_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + DetileSplitUVRow = DetileSplitUVRow_SSSE3; + } + } +#endif +#if defined(HAS_DETILESPLITUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + DetileSplitUVRow = DetileSplitUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + DetileSplitUVRow = DetileSplitUVRow_NEON; + } + } +#endif + + // Detile plane + for (y = 0; y < height; ++y) { + DetileSplitUVRow(src_uv, src_tile_stride, dst_u, dst_v, width); + dst_u += dst_stride_u; + dst_v += dst_stride_v; + src_uv += 16; + // Advance to next row of tiles. + if ((y & (tile_height - 1)) == (tile_height - 1)) { + src_uv = src_uv - src_tile_stride + src_stride_uv * tile_height; + } + } +} + +LIBYUV_API +void DetileToYUY2(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_yuy2, + int dst_stride_yuy2, + int width, + int height, + int tile_height) { + const ptrdiff_t src_y_tile_stride = 16 * tile_height; + const ptrdiff_t src_uv_tile_stride = src_y_tile_stride / 2; + int y; + void (*DetileToYUY2)(const uint8_t* src_y, ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, int width) = DetileToYUY2_C; + assert(src_stride_y >= 0); + assert(src_stride_y > 0); + assert(src_stride_uv >= 0); + assert(src_stride_uv > 0); + assert(tile_height > 0); + + if (width <= 0 || height == 0 || tile_height <= 0) { + return; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2; + dst_stride_yuy2 = -dst_stride_yuy2; + } + +#if defined(HAS_DETILETOYUY2_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + DetileToYUY2 = DetileToYUY2_Any_NEON; + if (IS_ALIGNED(width, 16)) { + DetileToYUY2 = DetileToYUY2_NEON; + } + } +#endif + +#if defined(HAS_DETILETOYUY2_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + DetileToYUY2 = DetileToYUY2_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + DetileToYUY2 = DetileToYUY2_SSE2; + } + } +#endif + + // Detile plane + for (y = 0; y < height; ++y) { + DetileToYUY2(src_y, src_y_tile_stride, src_uv, src_uv_tile_stride, dst_yuy2, + width); + dst_yuy2 += dst_stride_yuy2; + src_y += 16; + + if (y & 0x1) + src_uv += 16; + + // Advance to next row of tiles. + if ((y & (tile_height - 1)) == (tile_height - 1)) { + src_y = src_y - src_y_tile_stride + src_stride_y * tile_height; + src_uv = src_uv - src_uv_tile_stride + src_stride_uv * (tile_height / 2); + } + } +} + // Support function for NV12 etc RGB channels. // Width and height are plane sizes (typically half pixel width). LIBYUV_API @@ -869,6 +1185,9 @@ void SplitRGBPlane(const uint8_t* src_rgb, int y; void (*SplitRGBRow)(const uint8_t* src_rgb, uint8_t* dst_r, uint8_t* dst_g, uint8_t* dst_b, int width) = SplitRGBRow_C; + if (width <= 0 || height == 0) { + return; + } // Negative height means invert the image. if (height < 0) { height = -height; @@ -894,14 +1213,6 @@ void SplitRGBPlane(const uint8_t* src_rgb, } } #endif -#if defined(HAS_SPLITRGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SplitRGBRow = SplitRGBRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - SplitRGBRow = SplitRGBRow_MMI; - } - } -#endif #if defined(HAS_SPLITRGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { SplitRGBRow = SplitRGBRow_Any_NEON; @@ -936,6 +1247,9 @@ void MergeRGBPlane(const uint8_t* src_r, void (*MergeRGBRow)(const uint8_t* src_r, const uint8_t* src_g, const uint8_t* src_b, uint8_t* dst_rgb, int width) = MergeRGBRow_C; + if (width <= 0 || height == 0) { + return; + } // Coalesce rows. // Negative height means invert the image. if (height < 0) { @@ -966,14 +1280,6 @@ void MergeRGBPlane(const uint8_t* src_r, } } #endif -#if defined(HAS_MERGERGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MergeRGBRow = MergeRGBRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - MergeRGBRow = MergeRGBRow_MMI; - } - } -#endif for (y = 0; y < height; ++y) { // Merge a row of U and V into a row of RGB. @@ -986,18 +1292,18 @@ void MergeRGBPlane(const uint8_t* src_r, } LIBYUV_NOINLINE -void SplitARGBPlaneAlpha(const uint8_t* src_argb, - int src_stride_argb, - uint8_t* dst_r, - int dst_stride_r, - uint8_t* dst_g, - int dst_stride_g, - uint8_t* dst_b, - int dst_stride_b, - uint8_t* dst_a, - int dst_stride_a, - int width, - int height) { +static void SplitARGBPlaneAlpha(const uint8_t* src_argb, + int src_stride_argb, + uint8_t* dst_r, + int dst_stride_r, + uint8_t* dst_g, + int dst_stride_g, + uint8_t* dst_b, + int dst_stride_b, + uint8_t* dst_a, + int dst_stride_a, + int width, + int height) { int y; void (*SplitARGBRow)(const uint8_t* src_rgb, uint8_t* dst_r, uint8_t* dst_g, uint8_t* dst_b, uint8_t* dst_a, int width) = @@ -1057,16 +1363,16 @@ void SplitARGBPlaneAlpha(const uint8_t* src_argb, } LIBYUV_NOINLINE -void SplitARGBPlaneOpaque(const uint8_t* src_argb, - int src_stride_argb, - uint8_t* dst_r, - int dst_stride_r, - uint8_t* dst_g, - int dst_stride_g, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height) { +static void SplitARGBPlaneOpaque(const uint8_t* src_argb, + int src_stride_argb, + uint8_t* dst_r, + int dst_stride_r, + uint8_t* dst_g, + int dst_stride_g, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height) { int y; void (*SplitXRGBRow)(const uint8_t* src_rgb, uint8_t* dst_r, uint8_t* dst_g, uint8_t* dst_b, int width) = SplitXRGBRow_C; @@ -1158,18 +1464,18 @@ void SplitARGBPlane(const uint8_t* src_argb, } LIBYUV_NOINLINE -void MergeARGBPlaneAlpha(const uint8_t* src_r, - int src_stride_r, - const uint8_t* src_g, - int src_stride_g, - const uint8_t* src_b, - int src_stride_b, - const uint8_t* src_a, - int src_stride_a, - uint8_t* dst_argb, - int dst_stride_argb, - int width, - int height) { +static void MergeARGBPlaneAlpha(const uint8_t* src_r, + int src_stride_r, + const uint8_t* src_g, + int src_stride_g, + const uint8_t* src_b, + int src_stride_b, + const uint8_t* src_a, + int src_stride_a, + uint8_t* dst_argb, + int dst_stride_argb, + int width, + int height) { int y; void (*MergeARGBRow)(const uint8_t* src_r, const uint8_t* src_g, const uint8_t* src_b, const uint8_t* src_a, @@ -1220,16 +1526,16 @@ void MergeARGBPlaneAlpha(const uint8_t* src_r, } LIBYUV_NOINLINE -void MergeARGBPlaneOpaque(const uint8_t* src_r, - int src_stride_r, - const uint8_t* src_g, - int src_stride_g, - const uint8_t* src_b, - int src_stride_b, - uint8_t* dst_argb, - int dst_stride_argb, - int width, - int height) { +static void MergeARGBPlaneOpaque(const uint8_t* src_r, + int src_stride_r, + const uint8_t* src_g, + int src_stride_g, + const uint8_t* src_b, + int src_stride_b, + uint8_t* dst_argb, + int dst_stride_argb, + int width, + int height) { int y; void (*MergeXRGBRow)(const uint8_t* src_r, const uint8_t* src_g, const uint8_t* src_b, uint8_t* dst_argb, int width) = @@ -1708,16 +2014,6 @@ int YUY2ToI422(const uint8_t* src_yuy2, } } #endif -#if defined(HAS_YUY2TOYROW_MMI) && defined(HAS_YUY2TOUV422ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - YUY2ToYRow = YUY2ToYRow_Any_MMI; - YUY2ToUV422Row = YUY2ToUV422Row_Any_MMI; - if (IS_ALIGNED(width, 8)) { - YUY2ToYRow = YUY2ToYRow_MMI; - YUY2ToUV422Row = YUY2ToUV422Row_MMI; - } - } -#endif #if defined(HAS_YUY2TOYROW_MSA) && defined(HAS_YUY2TOUV422ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { YUY2ToYRow = YUY2ToYRow_Any_MSA; @@ -1728,6 +2024,16 @@ int YUY2ToI422(const uint8_t* src_yuy2, } } #endif +#if defined(HAS_YUY2TOYROW_LASX) && defined(HAS_YUY2TOUV422ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + YUY2ToYRow = YUY2ToYRow_Any_LASX; + YUY2ToUV422Row = YUY2ToUV422Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + YUY2ToYRow = YUY2ToYRow_LASX; + YUY2ToUV422Row = YUY2ToUV422Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { YUY2ToUV422Row(src_yuy2, dst_u, dst_v, width); @@ -1804,16 +2110,6 @@ int UYVYToI422(const uint8_t* src_uyvy, } } #endif -#if defined(HAS_UYVYTOYROW_MMI) && defined(HAS_UYVYTOUV422ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - UYVYToYRow = UYVYToYRow_Any_MMI; - UYVYToUV422Row = UYVYToUV422Row_Any_MMI; - if (IS_ALIGNED(width, 16)) { - UYVYToYRow = UYVYToYRow_MMI; - UYVYToUV422Row = UYVYToUV422Row_MMI; - } - } -#endif #if defined(HAS_UYVYTOYROW_MSA) && defined(HAS_UYVYTOUV422ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { UYVYToYRow = UYVYToYRow_Any_MSA; @@ -1824,6 +2120,16 @@ int UYVYToI422(const uint8_t* src_uyvy, } } #endif +#if defined(HAS_UYVYTOYROW_LASX) && defined(HAS_UYVYTOUV422ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + UYVYToYRow = UYVYToYRow_Any_LASX; + UYVYToUV422Row = UYVYToUV422Row_Any_LASX; + if (IS_ALIGNED(width, 32)) { + UYVYToYRow = UYVYToYRow_LASX; + UYVYToUV422Row = UYVYToUV422Row_LASX; + } + } +#endif for (y = 0; y < height; ++y) { UYVYToUV422Row(src_uyvy, dst_u, dst_v, width); @@ -1886,14 +2192,6 @@ int YUY2ToY(const uint8_t* src_yuy2, } } #endif -#if defined(HAS_YUY2TOYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - YUY2ToYRow = YUY2ToYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - YUY2ToYRow = YUY2ToYRow_MMI; - } - } -#endif #if defined(HAS_YUY2TOYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { YUY2ToYRow = YUY2ToYRow_Any_MSA; @@ -1911,6 +2209,73 @@ int YUY2ToY(const uint8_t* src_yuy2, return 0; } +// Convert UYVY to Y. +LIBYUV_API +int UYVYToY(const uint8_t* src_uyvy, + int src_stride_uyvy, + uint8_t* dst_y, + int dst_stride_y, + int width, + int height) { + int y; + void (*UYVYToYRow)(const uint8_t* src_uyvy, uint8_t* dst_y, int width) = + UYVYToYRow_C; + if (!src_uyvy || !dst_y || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_uyvy = src_uyvy + (height - 1) * src_stride_uyvy; + src_stride_uyvy = -src_stride_uyvy; + } + // Coalesce rows. + if (src_stride_uyvy == width * 2 && dst_stride_y == width) { + width *= height; + height = 1; + src_stride_uyvy = dst_stride_y = 0; + } +#if defined(HAS_UYVYTOYROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + UYVYToYRow = UYVYToYRow_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + UYVYToYRow = UYVYToYRow_SSE2; + } + } +#endif +#if defined(HAS_UYVYTOYROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + UYVYToYRow = UYVYToYRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + UYVYToYRow = UYVYToYRow_AVX2; + } + } +#endif +#if defined(HAS_UYVYTOYROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + UYVYToYRow = UYVYToYRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + UYVYToYRow = UYVYToYRow_NEON; + } + } +#endif +#if defined(HAS_UYVYTOYROW_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + UYVYToYRow = UYVYToYRow_Any_MSA; + if (IS_ALIGNED(width, 32)) { + UYVYToYRow = UYVYToYRow_MSA; + } + } +#endif + + for (y = 0; y < height; ++y) { + UYVYToYRow(src_uyvy, dst_y, width); + src_uyvy += src_stride_uyvy; + dst_y += dst_stride_y; + } + return 0; +} + // Mirror a plane of data. // See Also I400Mirror LIBYUV_API @@ -1952,14 +2317,6 @@ void MirrorPlane(const uint8_t* src_y, } } #endif -#if defined(HAS_MIRRORROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MirrorRow = MirrorRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - MirrorRow = MirrorRow_MMI; - } - } -#endif #if defined(HAS_MIRRORROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { MirrorRow = MirrorRow_Any_MSA; @@ -1968,6 +2325,14 @@ void MirrorPlane(const uint8_t* src_y, } } #endif +#if defined(HAS_MIRRORROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + MirrorRow = MirrorRow_Any_LASX; + if (IS_ALIGNED(width, 64)) { + MirrorRow = MirrorRow_LASX; + } + } +#endif // Mirror plane for (y = 0; y < height; ++y) { @@ -2026,6 +2391,14 @@ void MirrorUVPlane(const uint8_t* src_uv, } } #endif +#if defined(HAS_MIRRORUVROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + MirrorUVRow = MirrorUVRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + MirrorUVRow = MirrorUVRow_LASX; + } + } +#endif // MirrorUV plane for (y = 0; y < height; ++y) { @@ -2075,10 +2448,12 @@ int I420Mirror(const uint8_t* src_y, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; + if (!src_y || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) { return -1; } + // Negative height means invert the image. if (height < 0) { height = -height; @@ -2113,9 +2488,11 @@ int NV12Mirror(const uint8_t* src_y, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; + if (!src_y || !src_uv || !dst_uv || width <= 0 || height == 0) { return -1; } + // Negative height means invert the image. if (height < 0) { height = -height; @@ -2178,14 +2555,6 @@ int ARGBMirror(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBMIRRORROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBMirrorRow = ARGBMirrorRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBMirrorRow = ARGBMirrorRow_MMI; - } - } -#endif #if defined(HAS_ARGBMIRRORROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBMirrorRow = ARGBMirrorRow_Any_MSA; @@ -2194,6 +2563,14 @@ int ARGBMirror(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBMIRRORROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBMirrorRow = ARGBMirrorRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBMirrorRow = ARGBMirrorRow_LASX; + } + } +#endif // Mirror plane for (y = 0; y < height; ++y) { @@ -2268,15 +2645,15 @@ ARGBBlendRow GetARGBBlend() { ARGBBlendRow = ARGBBlendRow_NEON; } #endif -#if defined(HAS_ARGBBLENDROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBBlendRow = ARGBBlendRow_MMI; - } -#endif #if defined(HAS_ARGBBLENDROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBBlendRow = ARGBBlendRow_MSA; } +#endif +#if defined(HAS_ARGBBLENDROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGBBlendRow = ARGBBlendRow_LSX; + } #endif return ARGBBlendRow; } @@ -2370,14 +2747,6 @@ int BlendPlane(const uint8_t* src_y0, } } #endif -#if defined(HAS_BLENDPLANEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - BlendPlaneRow = BlendPlaneRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - BlendPlaneRow = BlendPlaneRow_MMI; - } - } -#endif for (y = 0; y < height; ++y) { BlendPlaneRow(src_y0, src_y1, alpha, dst_y, width); @@ -2422,6 +2791,7 @@ int I420Blend(const uint8_t* src_y0, BlendPlaneRow_C; void (*ScaleRowDown2)(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width) = ScaleRowDown2Box_C; + if (!src_y0 || !src_u0 || !src_v0 || !src_y1 || !src_u1 || !src_v1 || !alpha || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) { return -1; @@ -2453,14 +2823,6 @@ int I420Blend(const uint8_t* src_y0, BlendPlaneRow = BlendPlaneRow_AVX2; } } -#endif -#if defined(HAS_BLENDPLANEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - BlendPlaneRow = BlendPlaneRow_Any_MMI; - if (IS_ALIGNED(halfwidth, 8)) { - BlendPlaneRow = BlendPlaneRow_MMI; - } - } #endif if (!IS_ALIGNED(width, 2)) { ScaleRowDown2 = ScaleRowDown2Box_Odd_C; @@ -2498,17 +2860,6 @@ int I420Blend(const uint8_t* src_y0, } } #endif -#if defined(HAS_SCALEROWDOWN2_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleRowDown2 = ScaleRowDown2Box_Odd_MMI; - if (IS_ALIGNED(width, 2)) { - ScaleRowDown2 = ScaleRowDown2Box_Any_MMI; - if (IS_ALIGNED(halfwidth, 8)) { - ScaleRowDown2 = ScaleRowDown2Box_MMI; - } - } - } -#endif // Row buffer for intermediate alpha pixels. align_buffer_64(halfalpha, halfwidth); @@ -2586,14 +2937,6 @@ int ARGBMultiply(const uint8_t* src_argb0, } } #endif -#if defined(HAS_ARGBMULTIPLYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBMultiplyRow = ARGBMultiplyRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBMultiplyRow = ARGBMultiplyRow_MMI; - } - } -#endif #if defined(HAS_ARGBMULTIPLYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBMultiplyRow = ARGBMultiplyRow_Any_MSA; @@ -2602,6 +2945,14 @@ int ARGBMultiply(const uint8_t* src_argb0, } } #endif +#if defined(HAS_ARGBMULTIPLYROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBMultiplyRow = ARGBMultiplyRow_Any_LASX; + if (IS_ALIGNED(width, 8)) { + ARGBMultiplyRow = ARGBMultiplyRow_LASX; + } + } +#endif // Multiply plane for (y = 0; y < height; ++y) { @@ -2671,14 +3022,6 @@ int ARGBAdd(const uint8_t* src_argb0, } } #endif -#if defined(HAS_ARGBADDROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBAddRow = ARGBAddRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBAddRow = ARGBAddRow_MMI; - } - } -#endif #if defined(HAS_ARGBADDROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBAddRow = ARGBAddRow_Any_MSA; @@ -2687,6 +3030,14 @@ int ARGBAdd(const uint8_t* src_argb0, } } #endif +#if defined(HAS_ARGBADDROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBAddRow = ARGBAddRow_Any_LASX; + if (IS_ALIGNED(width, 8)) { + ARGBAddRow = ARGBAddRow_LASX; + } + } +#endif // Add plane for (y = 0; y < height; ++y) { @@ -2751,14 +3102,6 @@ int ARGBSubtract(const uint8_t* src_argb0, } } #endif -#if defined(HAS_ARGBSUBTRACTROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBSubtractRow = ARGBSubtractRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBSubtractRow = ARGBSubtractRow_MMI; - } - } -#endif #if defined(HAS_ARGBSUBTRACTROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBSubtractRow = ARGBSubtractRow_Any_MSA; @@ -2767,6 +3110,14 @@ int ARGBSubtract(const uint8_t* src_argb0, } } #endif +#if defined(HAS_ARGBSUBTRACTROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBSubtractRow = ARGBSubtractRow_Any_LASX; + if (IS_ALIGNED(width, 8)) { + ARGBSubtractRow = ARGBSubtractRow_LASX; + } + } +#endif // Subtract plane for (y = 0; y < height; ++y) { @@ -2820,14 +3171,6 @@ int RAWToRGB24(const uint8_t* src_raw, } } #endif -#if defined(HAS_RAWTORGB24ROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - RAWToRGB24Row = RAWToRGB24Row_Any_MMI; - if (IS_ALIGNED(width, 4)) { - RAWToRGB24Row = RAWToRGB24Row_MMI; - } - } -#endif #if defined(HAS_RAWTORGB24ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { RAWToRGB24Row = RAWToRGB24Row_Any_MSA; @@ -2836,6 +3179,14 @@ int RAWToRGB24(const uint8_t* src_raw, } } #endif +#if defined(HAS_RAWTORGB24ROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + RAWToRGB24Row = RAWToRGB24Row_Any_LSX; + if (IS_ALIGNED(width, 16)) { + RAWToRGB24Row = RAWToRGB24Row_LSX; + } + } +#endif for (y = 0; y < height; ++y) { RAWToRGB24Row(src_raw, dst_rgb24, width); @@ -2853,6 +3204,10 @@ void SetPlane(uint8_t* dst_y, uint32_t value) { int y; void (*SetRow)(uint8_t * dst, uint8_t value, int width) = SetRow_C; + + if (width <= 0 || height == 0) { + return; + } if (height < 0) { height = -height; dst_y = dst_y + (height - 1) * dst_stride_y; @@ -2890,6 +3245,14 @@ void SetPlane(uint8_t* dst_y, SetRow = SetRow_MSA; } #endif +#if defined(HAS_SETROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + SetRow = SetRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + SetRow = SetRow_LSX; + } + } +#endif // Set plane for (y = 0; y < height; ++y) { @@ -2918,6 +3281,7 @@ int I420Rect(uint8_t* dst_y, uint8_t* start_y = dst_y + y * dst_stride_y + x; uint8_t* start_u = dst_u + (y / 2) * dst_stride_u + (x / 2); uint8_t* start_v = dst_v + (y / 2) * dst_stride_v + (x / 2); + if (!dst_y || !dst_u || !dst_v || width <= 0 || height == 0 || x < 0 || y < 0 || value_y < 0 || value_y > 255 || value_u < 0 || value_u > 255 || value_v < 0 || value_v > 255) { @@ -2971,14 +3335,6 @@ int ARGBRect(uint8_t* dst_argb, ARGBSetRow = ARGBSetRow_X86; } #endif -#if defined(HAS_ARGBSETROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBSetRow = ARGBSetRow_Any_MMI; - if (IS_ALIGNED(width, 4)) { - ARGBSetRow = ARGBSetRow_MMI; - } - } -#endif #if defined(HAS_ARGBSETROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBSetRow = ARGBSetRow_Any_MSA; @@ -2987,6 +3343,14 @@ int ARGBRect(uint8_t* dst_argb, } } #endif +#if defined(HAS_ARGBSETROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGBSetRow = ARGBSetRow_Any_LSX; + if (IS_ALIGNED(width, 4)) { + ARGBSetRow = ARGBSetRow_LSX; + } + } +#endif // Set plane for (y = 0; y < height; ++y) { @@ -3057,14 +3421,6 @@ int ARGBAttenuate(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBATTENUATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBAttenuateRow = ARGBAttenuateRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBAttenuateRow = ARGBAttenuateRow_MMI; - } - } -#endif #if defined(HAS_ARGBATTENUATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_MSA; @@ -3073,6 +3429,14 @@ int ARGBAttenuate(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBATTENUATEROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBAttenuateRow = ARGBAttenuateRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBAttenuateRow(src_argb, dst_argb, width); @@ -3168,16 +3532,16 @@ int ARGBGrayTo(const uint8_t* src_argb, ARGBGrayRow = ARGBGrayRow_NEON; } #endif -#if defined(HAS_ARGBGRAYROW_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(width, 2)) { - ARGBGrayRow = ARGBGrayRow_MMI; - } -#endif #if defined(HAS_ARGBGRAYROW_MSA) if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 8)) { ARGBGrayRow = ARGBGrayRow_MSA; } #endif +#if defined(HAS_ARGBGRAYROW_LASX) + if (TestCpuFlag(kCpuHasLASX) && IS_ALIGNED(width, 16)) { + ARGBGrayRow = ARGBGrayRow_LASX; + } +#endif for (y = 0; y < height; ++y) { ARGBGrayRow(src_argb, dst_argb, width); @@ -3218,16 +3582,16 @@ int ARGBGray(uint8_t* dst_argb, ARGBGrayRow = ARGBGrayRow_NEON; } #endif -#if defined(HAS_ARGBGRAYROW_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(width, 2)) { - ARGBGrayRow = ARGBGrayRow_MMI; - } -#endif #if defined(HAS_ARGBGRAYROW_MSA) if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 8)) { ARGBGrayRow = ARGBGrayRow_MSA; } #endif +#if defined(HAS_ARGBGRAYROW_LASX) + if (TestCpuFlag(kCpuHasLASX) && IS_ALIGNED(width, 16)) { + ARGBGrayRow = ARGBGrayRow_LASX; + } +#endif for (y = 0; y < height; ++y) { ARGBGrayRow(dst, dst, width); @@ -3266,16 +3630,16 @@ int ARGBSepia(uint8_t* dst_argb, ARGBSepiaRow = ARGBSepiaRow_NEON; } #endif -#if defined(HAS_ARGBSEPIAROW_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(width, 2)) { - ARGBSepiaRow = ARGBSepiaRow_MMI; - } -#endif #if defined(HAS_ARGBSEPIAROW_MSA) if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 8)) { ARGBSepiaRow = ARGBSepiaRow_MSA; } #endif +#if defined(HAS_ARGBSEPIAROW_LASX) + if (TestCpuFlag(kCpuHasLASX) && IS_ALIGNED(width, 16)) { + ARGBSepiaRow = ARGBSepiaRow_LASX; + } +#endif for (y = 0; y < height; ++y) { ARGBSepiaRow(dst, width); @@ -3322,15 +3686,15 @@ int ARGBColorMatrix(const uint8_t* src_argb, ARGBColorMatrixRow = ARGBColorMatrixRow_NEON; } #endif -#if defined(HAS_ARGBCOLORMATRIXROW_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(width, 2)) { - ARGBColorMatrixRow = ARGBColorMatrixRow_MMI; - } -#endif #if defined(HAS_ARGBCOLORMATRIXROW_MSA) if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 8)) { ARGBColorMatrixRow = ARGBColorMatrixRow_MSA; } +#endif +#if defined(HAS_ARGBCOLORMATRIXROW_LSX) + if (TestCpuFlag(kCpuHasLSX) && IS_ALIGNED(width, 8)) { + ARGBColorMatrixRow = ARGBColorMatrixRow_LSX; + } #endif for (y = 0; y < height; ++y) { ARGBColorMatrixRow(src_argb, dst_argb, matrix_argb, width); @@ -3496,6 +3860,11 @@ int ARGBQuantize(uint8_t* dst_argb, if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 8)) { ARGBQuantizeRow = ARGBQuantizeRow_MSA; } +#endif +#if defined(HAS_ARGBQUANTIZEROW_LSX) + if (TestCpuFlag(kCpuHasLSX) && IS_ALIGNED(width, 8)) { + ARGBQuantizeRow = ARGBQuantizeRow_LSX; + } #endif for (y = 0; y < height; ++y) { ARGBQuantizeRow(dst, scale, interval_size, interval_offset, width); @@ -3526,11 +3895,6 @@ int ARGBComputeCumulativeSum(const uint8_t* src_argb, ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2; } #endif -#if defined(HAS_CUMULATIVESUMTOAVERAGEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ComputeCumulativeSumRow = ComputeCumulativeSumRow_MMI; - } -#endif memset(dst_cumsum, 0, width * sizeof(dst_cumsum[0]) * 4); // 4 int per pixel. for (y = 0; y < height; ++y) { @@ -3581,7 +3945,7 @@ int ARGBBlur(const uint8_t* src_argb, if (radius > (width / 2 - 1)) { radius = width / 2 - 1; } - if (radius <= 0) { + if (radius <= 0 || height <= 1) { return -1; } #if defined(HAS_CUMULATIVESUMTOAVERAGEROW_SSE2) @@ -3589,11 +3953,6 @@ int ARGBBlur(const uint8_t* src_argb, ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2; CumulativeSumToAverageRow = CumulativeSumToAverageRow_SSE2; } -#endif -#if defined(HAS_CUMULATIVESUMTOAVERAGEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ComputeCumulativeSumRow = ComputeCumulativeSumRow_MMI; - } #endif // Compute enough CumulativeSum for first row to be blurred. After this // one row of CumulativeSum is updated at a time. @@ -3696,16 +4055,16 @@ int ARGBShade(const uint8_t* src_argb, ARGBShadeRow = ARGBShadeRow_NEON; } #endif -#if defined(HAS_ARGBSHADEROW_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(width, 2)) { - ARGBShadeRow = ARGBShadeRow_MMI; - } -#endif #if defined(HAS_ARGBSHADEROW_MSA) if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 4)) { ARGBShadeRow = ARGBShadeRow_MSA; } #endif +#if defined(HAS_ARGBSHADEROW_LASX) + if (TestCpuFlag(kCpuHasLASX) && IS_ALIGNED(width, 8)) { + ARGBShadeRow = ARGBShadeRow_LASX; + } +#endif for (y = 0; y < height; ++y) { ARGBShadeRow(src_argb, dst_argb, width, value); @@ -3769,14 +4128,6 @@ int InterpolatePlane(const uint8_t* src0, } } #endif -#if defined(HAS_INTERPOLATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - InterpolateRow = InterpolateRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - InterpolateRow = InterpolateRow_MMI; - } - } -#endif #if defined(HAS_INTERPOLATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { InterpolateRow = InterpolateRow_Any_MSA; @@ -3785,6 +4136,14 @@ int InterpolatePlane(const uint8_t* src0, } } #endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(width, 32)) { + InterpolateRow = InterpolateRow_LSX; + } + } +#endif for (y = 0; y < height; ++y) { InterpolateRow(dst, src0, src1 - src0, width, interpolation); @@ -3795,6 +4154,86 @@ int InterpolatePlane(const uint8_t* src0, return 0; } +// Interpolate 2 planes by specified amount (0 to 255). +LIBYUV_API +int InterpolatePlane_16(const uint16_t* src0, + int src_stride0, + const uint16_t* src1, + int src_stride1, + uint16_t* dst, + int dst_stride, + int width, + int height, + int interpolation) { + int y; + void (*InterpolateRow_16)(uint16_t * dst_ptr, const uint16_t* src_ptr, + ptrdiff_t src_stride, int dst_width, + int source_y_fraction) = InterpolateRow_16_C; + if (!src0 || !src1 || !dst || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst = dst + (height - 1) * dst_stride; + dst_stride = -dst_stride; + } + // Coalesce rows. + if (src_stride0 == width && src_stride1 == width && dst_stride == width) { + width *= height; + height = 1; + src_stride0 = src_stride1 = dst_stride = 0; + } +#if defined(HAS_INTERPOLATEROW_16_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + InterpolateRow_16 = InterpolateRow_16_Any_SSSE3; + if (IS_ALIGNED(width, 16)) { + InterpolateRow_16 = InterpolateRow_16_SSSE3; + } + } +#endif +#if defined(HAS_INTERPOLATEROW_16_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + InterpolateRow_16 = InterpolateRow_16_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + InterpolateRow_16 = InterpolateRow_16_AVX2; + } + } +#endif +#if defined(HAS_INTERPOLATEROW_16_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + InterpolateRow_16 = InterpolateRow_16_Any_NEON; + if (IS_ALIGNED(width, 8)) { + InterpolateRow_16 = InterpolateRow_16_NEON; + } + } +#endif +#if defined(HAS_INTERPOLATEROW_16_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + InterpolateRow_16 = InterpolateRow_16_Any_MSA; + if (IS_ALIGNED(width, 32)) { + InterpolateRow_16 = InterpolateRow_16_MSA; + } + } +#endif +#if defined(HAS_INTERPOLATEROW_16_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow_16 = InterpolateRow_16_Any_LSX; + if (IS_ALIGNED(width, 32)) { + InterpolateRow_16 = InterpolateRow_16_LSX; + } + } +#endif + + for (y = 0; y < height; ++y) { + InterpolateRow_16(dst, src0, src1 - src0, width, interpolation); + src0 += src_stride0; + src1 += src_stride1; + dst += dst_stride; + } + return 0; +} + // Interpolate 2 ARGB images by specified amount (0 to 255). LIBYUV_API int ARGBInterpolate(const uint8_t* src_argb0, @@ -3836,10 +4275,12 @@ int I420Interpolate(const uint8_t* src0_y, int interpolation) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; + if (!src0_y || !src0_u || !src0_v || !src1_y || !src1_u || !src1_v || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) { return -1; } + InterpolatePlane(src0_y, src0_stride_y, src1_y, src1_stride_y, dst_y, dst_stride_y, width, height, interpolation); InterpolatePlane(src0_u, src0_stride_u, src1_u, src1_stride_u, dst_u, @@ -3900,14 +4341,6 @@ int ARGBShuffle(const uint8_t* src_bgra, } } #endif -#if defined(HAS_ARGBSHUFFLEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBShuffleRow = ARGBShuffleRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBShuffleRow = ARGBShuffleRow_MMI; - } - } -#endif #if defined(HAS_ARGBSHUFFLEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBShuffleRow = ARGBShuffleRow_Any_MSA; @@ -3916,6 +4349,14 @@ int ARGBShuffle(const uint8_t* src_bgra, } } #endif +#if defined(HAS_ARGBSHUFFLEROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBShuffleRow = ARGBShuffleRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBShuffleRow = ARGBShuffleRow_LASX; + } + } +#endif for (y = 0; y < height; ++y) { ARGBShuffleRow(src_bgra, dst_argb, shuffler, width); @@ -3977,14 +4418,6 @@ int AR64Shuffle(const uint16_t* src_ar64, } } #endif -#if defined(HAS_ARGBSHUFFLEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - AR64ShuffleRow = ARGBShuffleRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - AR64ShuffleRow = ARGBShuffleRow_MMI; - } - } -#endif for (y = 0; y < height; ++y) { AR64ShuffleRow((uint8_t*)(src_ar64), (uint8_t*)(dst_ar64), shuffler, @@ -4118,19 +4551,11 @@ static int ARGBSobelize(const uint8_t* src_argb, #if defined(HAS_ARGBTOYJROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBToYJRow = ARGBToYJRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 16)) { ARGBToYJRow = ARGBToYJRow_NEON; } } #endif -#if defined(HAS_ARGBTOYJROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBToYJRow = ARGBToYJRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBToYJRow = ARGBToYJRow_MMI; - } - } -#endif #if defined(HAS_ARGBTOYJROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBToYJRow = ARGBToYJRow_Any_MSA; @@ -4139,6 +4564,22 @@ static int ARGBSobelize(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBTOYJROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGBToYJRow = ARGBToYJRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + ARGBToYJRow = ARGBToYJRow_LSX; + } + } +#endif +#if defined(HAS_ARGBTOYJROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBToYJRow = ARGBToYJRow_Any_LASX; + if (IS_ALIGNED(width, 32)) { + ARGBToYJRow = ARGBToYJRow_LASX; + } + } +#endif #if defined(HAS_SOBELYROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { @@ -4150,11 +4591,6 @@ static int ARGBSobelize(const uint8_t* src_argb, SobelYRow = SobelYRow_NEON; } #endif -#if defined(HAS_SOBELYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SobelYRow = SobelYRow_MMI; - } -#endif #if defined(HAS_SOBELYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { SobelYRow = SobelYRow_MSA; @@ -4170,11 +4606,6 @@ static int ARGBSobelize(const uint8_t* src_argb, SobelXRow = SobelXRow_NEON; } #endif -#if defined(HAS_SOBELXROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SobelXRow = SobelXRow_MMI; - } -#endif #if defined(HAS_SOBELXROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { SobelXRow = SobelXRow_MSA; @@ -4182,16 +4613,16 @@ static int ARGBSobelize(const uint8_t* src_argb, #endif { // 3 rows with edges before/after. - const int kRowSize = (width + kEdge + 31) & ~31; - align_buffer_64(rows, kRowSize * 2 + (kEdge + kRowSize * 3 + kEdge)); + const int row_size = (width + kEdge + 31) & ~31; + align_buffer_64(rows, row_size * 2 + (kEdge + row_size * 3 + kEdge)); uint8_t* row_sobelx = rows; - uint8_t* row_sobely = rows + kRowSize; - uint8_t* row_y = rows + kRowSize * 2; + uint8_t* row_sobely = rows + row_size; + uint8_t* row_y = rows + row_size * 2; // Convert first row. uint8_t* row_y0 = row_y + kEdge; - uint8_t* row_y1 = row_y0 + kRowSize; - uint8_t* row_y2 = row_y1 + kRowSize; + uint8_t* row_y1 = row_y0 + row_size; + uint8_t* row_y2 = row_y1 + row_size; ARGBToYJRow(src_argb, row_y0, width); row_y0[-1] = row_y0[0]; memset(row_y0 + width, row_y0[width - 1], 16); // Extrude 16 for valgrind. @@ -4254,14 +4685,6 @@ int ARGBSobel(const uint8_t* src_argb, } } #endif -#if defined(HAS_SOBELROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SobelRow = SobelRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - SobelRow = SobelRow_MMI; - } - } -#endif #if defined(HAS_SOBELROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { SobelRow = SobelRow_Any_MSA; @@ -4269,6 +4692,14 @@ int ARGBSobel(const uint8_t* src_argb, SobelRow = SobelRow_MSA; } } +#endif +#if defined(HAS_SOBELROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + SobelRow = SobelRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + SobelRow = SobelRow_LSX; + } + } #endif return ARGBSobelize(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width, height, SobelRow); @@ -4300,14 +4731,6 @@ int ARGBSobelToPlane(const uint8_t* src_argb, } } #endif -#if defined(HAS_SOBELTOPLANEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SobelToPlaneRow = SobelToPlaneRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - SobelToPlaneRow = SobelToPlaneRow_MMI; - } - } -#endif #if defined(HAS_SOBELTOPLANEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { SobelToPlaneRow = SobelToPlaneRow_Any_MSA; @@ -4315,6 +4738,14 @@ int ARGBSobelToPlane(const uint8_t* src_argb, SobelToPlaneRow = SobelToPlaneRow_MSA; } } +#endif +#if defined(HAS_SOBELTOPLANEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + SobelToPlaneRow = SobelToPlaneRow_Any_LSX; + if (IS_ALIGNED(width, 32)) { + SobelToPlaneRow = SobelToPlaneRow_LSX; + } + } #endif return ARGBSobelize(src_argb, src_stride_argb, dst_y, dst_stride_y, width, height, SobelToPlaneRow); @@ -4347,14 +4778,6 @@ int ARGBSobelXY(const uint8_t* src_argb, } } #endif -#if defined(HAS_SOBELXYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SobelXYRow = SobelXYRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - SobelXYRow = SobelXYRow_MMI; - } - } -#endif #if defined(HAS_SOBELXYROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { SobelXYRow = SobelXYRow_Any_MSA; @@ -4362,6 +4785,14 @@ int ARGBSobelXY(const uint8_t* src_argb, SobelXYRow = SobelXYRow_MSA; } } +#endif +#if defined(HAS_SOBELXYROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + SobelXYRow = SobelXYRow_Any_LSX; + if (IS_ALIGNED(width, 16)) { + SobelXYRow = SobelXYRow_LSX; + } + } #endif return ARGBSobelize(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width, height, SobelXYRow); @@ -4486,6 +4917,14 @@ int HalfFloatPlane(const uint16_t* src_y, } } #endif +#if defined(HAS_HALFFLOATROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + HalfFloatRow = HalfFloatRow_Any_LSX; + if (IS_ALIGNED(width, 32)) { + HalfFloatRow = HalfFloatRow_LSX; + } + } +#endif for (y = 0; y < height; ++y) { HalfFloatRow(src_y, dst_y, scale, width); @@ -4600,14 +5039,6 @@ int ARGBCopyAlpha(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBCOPYALPHAROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBCopyAlphaRow = ARGBCopyAlphaRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBCopyAlphaRow = ARGBCopyAlphaRow_MMI; - } - } -#endif for (y = 0; y < height; ++y) { ARGBCopyAlphaRow(src_argb, dst_argb, width); @@ -4660,18 +5091,18 @@ int ARGBExtractAlpha(const uint8_t* src_argb, : ARGBExtractAlphaRow_Any_NEON; } #endif -#if defined(HAS_ARGBEXTRACTALPHAROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBExtractAlphaRow = IS_ALIGNED(width, 8) ? ARGBExtractAlphaRow_MMI - : ARGBExtractAlphaRow_Any_MMI; - } -#endif #if defined(HAS_ARGBEXTRACTALPHAROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBExtractAlphaRow = IS_ALIGNED(width, 16) ? ARGBExtractAlphaRow_MSA : ARGBExtractAlphaRow_Any_MSA; } #endif +#if defined(HAS_ARGBEXTRACTALPHAROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ARGBExtractAlphaRow = IS_ALIGNED(width, 16) ? ARGBExtractAlphaRow_LSX + : ARGBExtractAlphaRow_Any_LSX; + } +#endif for (int y = 0; y < height; ++y) { ARGBExtractAlphaRow(src_argb, dst_a, width); @@ -4723,14 +5154,6 @@ int ARGBCopyYToAlpha(const uint8_t* src_y, } } #endif -#if defined(HAS_ARGBCOPYYTOALPHAROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_MMI; - } - } -#endif for (y = 0; y < height; ++y) { ARGBCopyYToAlphaRow(src_y, dst_argb, width); @@ -4740,9 +5163,6 @@ int ARGBCopyYToAlpha(const uint8_t* src_y, return 0; } -// TODO(fbarchard): Consider if width is even Y channel can be split -// directly. A SplitUVRow_Odd function could copy the remaining chroma. - LIBYUV_API int YUY2ToNV12(const uint8_t* src_yuy2, int src_stride_yuy2, @@ -4753,124 +5173,97 @@ int YUY2ToNV12(const uint8_t* src_yuy2, int width, int height) { int y; - int halfwidth = (width + 1) >> 1; - void (*SplitUVRow)(const uint8_t* src_uv, uint8_t* dst_u, uint8_t* dst_v, - int width) = SplitUVRow_C; - void (*InterpolateRow)(uint8_t * dst_ptr, const uint8_t* src_ptr, - ptrdiff_t src_stride, int dst_width, - int source_y_fraction) = InterpolateRow_C; + void (*YUY2ToYRow)(const uint8_t* src_yuy2, uint8_t* dst_y, int width) = + YUY2ToYRow_C; + void (*YUY2ToNVUVRow)(const uint8_t* src_yuy2, int stride_yuy2, + uint8_t* dst_uv, int width) = YUY2ToNVUVRow_C; if (!src_yuy2 || !dst_y || !dst_uv || width <= 0 || height == 0) { return -1; } + // Negative height means invert the image. if (height < 0) { height = -height; src_yuy2 = src_yuy2 + (height - 1) * src_stride_yuy2; src_stride_yuy2 = -src_stride_yuy2; } -#if defined(HAS_SPLITUVROW_SSE2) +#if defined(HAS_YUY2TOYROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { - SplitUVRow = SplitUVRow_Any_SSE2; + YUY2ToYRow = YUY2ToYRow_Any_SSE2; if (IS_ALIGNED(width, 16)) { - SplitUVRow = SplitUVRow_SSE2; + YUY2ToYRow = YUY2ToYRow_SSE2; } } #endif -#if defined(HAS_SPLITUVROW_AVX2) +#if defined(HAS_YUY2TOYROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - SplitUVRow = SplitUVRow_Any_AVX2; + YUY2ToYRow = YUY2ToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { - SplitUVRow = SplitUVRow_AVX2; + YUY2ToYRow = YUY2ToYRow_AVX2; } } #endif -#if defined(HAS_SPLITUVROW_NEON) +#if defined(HAS_YUY2TOYROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { - SplitUVRow = SplitUVRow_Any_NEON; + YUY2ToYRow = YUY2ToYRow_Any_NEON; if (IS_ALIGNED(width, 16)) { - SplitUVRow = SplitUVRow_NEON; + YUY2ToYRow = YUY2ToYRow_NEON; } } #endif -#if defined(HAS_SPLITUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SplitUVRow = SplitUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - SplitUVRow = SplitUVRow_MMI; - } - } -#endif -#if defined(HAS_SPLITUVROW_MSA) +#if defined(HAS_YUY2TOYROW_MSA) && defined(HAS_YUY2TOUV422ROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { - SplitUVRow = SplitUVRow_Any_MSA; + YUY2ToYRow = YUY2ToYRow_Any_MSA; if (IS_ALIGNED(width, 32)) { - SplitUVRow = SplitUVRow_MSA; + YUY2ToYRow = YUY2ToYRow_MSA; } } #endif -#if defined(HAS_INTERPOLATEROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - InterpolateRow = InterpolateRow_Any_SSSE3; - if (IS_ALIGNED(width, 16)) { - InterpolateRow = InterpolateRow_SSSE3; - } - } -#endif -#if defined(HAS_INTERPOLATEROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - InterpolateRow = InterpolateRow_Any_AVX2; +#if defined(HAS_YUY2TOYROW_LASX) && defined(HAS_YUY2TOUV422ROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + YUY2ToYRow = YUY2ToYRow_Any_LASX; if (IS_ALIGNED(width, 32)) { - InterpolateRow = InterpolateRow_AVX2; - } - } -#endif -#if defined(HAS_INTERPOLATEROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - InterpolateRow = InterpolateRow_Any_NEON; - if (IS_ALIGNED(width, 16)) { - InterpolateRow = InterpolateRow_NEON; - } - } -#endif -#if defined(HAS_INTERPOLATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - InterpolateRow = InterpolateRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - InterpolateRow = InterpolateRow_MMI; - } - } -#endif -#if defined(HAS_INTERPOLATEROW_MSA) - if (TestCpuFlag(kCpuHasMSA)) { - InterpolateRow = InterpolateRow_Any_MSA; - if (IS_ALIGNED(width, 32)) { - InterpolateRow = InterpolateRow_MSA; + YUY2ToYRow = YUY2ToYRow_LASX; } } #endif - { - int awidth = halfwidth * 2; - // row of y and 2 rows of uv - align_buffer_64(rows, awidth * 3); +#if defined(HAS_YUY2TONVUVROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + YUY2ToNVUVRow = YUY2ToNVUVRow_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + YUY2ToNVUVRow = YUY2ToNVUVRow_SSE2; + } + } +#endif +#if defined(HAS_YUY2TONVUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + YUY2ToNVUVRow = YUY2ToNVUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + YUY2ToNVUVRow = YUY2ToNVUVRow_AVX2; + } + } +#endif +#if defined(HAS_YUY2TONVUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + YUY2ToNVUVRow = YUY2ToNVUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + YUY2ToNVUVRow = YUY2ToNVUVRow_NEON; + } + } +#endif - for (y = 0; y < height - 1; y += 2) { - // Split Y from UV. - SplitUVRow(src_yuy2, rows, rows + awidth, awidth); - memcpy(dst_y, rows, width); - SplitUVRow(src_yuy2 + src_stride_yuy2, rows, rows + awidth * 2, awidth); - memcpy(dst_y + dst_stride_y, rows, width); - InterpolateRow(dst_uv, rows + awidth, awidth, awidth, 128); - src_yuy2 += src_stride_yuy2 * 2; - dst_y += dst_stride_y * 2; - dst_uv += dst_stride_uv; - } - if (height & 1) { - // Split Y from UV. - SplitUVRow(src_yuy2, rows, dst_uv, awidth); - memcpy(dst_y, rows, width); - } - free_aligned_buffer_64(rows); + for (y = 0; y < height - 1; y += 2) { + YUY2ToYRow(src_yuy2, dst_y, width); + YUY2ToYRow(src_yuy2 + src_stride_yuy2, dst_y + dst_stride_y, width); + YUY2ToNVUVRow(src_yuy2, src_stride_yuy2, dst_uv, width); + src_yuy2 += src_stride_yuy2 * 2; + dst_y += dst_stride_y * 2; + dst_uv += dst_stride_uv; + } + if (height & 1) { + YUY2ToYRow(src_yuy2, dst_y, width); + YUY2ToNVUVRow(src_yuy2, 0, dst_uv, width); } return 0; } @@ -4891,9 +5284,11 @@ int UYVYToNV12(const uint8_t* src_uyvy, void (*InterpolateRow)(uint8_t * dst_ptr, const uint8_t* src_ptr, ptrdiff_t src_stride, int dst_width, int source_y_fraction) = InterpolateRow_C; + if (!src_uyvy || !dst_y || !dst_uv || width <= 0 || height == 0) { return -1; } + // Negative height means invert the image. if (height < 0) { height = -height; @@ -4924,14 +5319,6 @@ int UYVYToNV12(const uint8_t* src_uyvy, } } #endif -#if defined(HAS_SPLITUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - SplitUVRow = SplitUVRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - SplitUVRow = SplitUVRow_MMI; - } - } -#endif #if defined(HAS_SPLITUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { SplitUVRow = SplitUVRow_Any_MSA; @@ -4940,6 +5327,14 @@ int UYVYToNV12(const uint8_t* src_uyvy, } } #endif +#if defined(HAS_SPLITUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + SplitUVRow = SplitUVRow_Any_LSX; + if (IS_ALIGNED(width, 32)) { + SplitUVRow = SplitUVRow_LSX; + } + } +#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -4964,14 +5359,6 @@ int UYVYToNV12(const uint8_t* src_uyvy, } } #endif -#if defined(HAS_INTERPOLATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - InterpolateRow = InterpolateRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - InterpolateRow = InterpolateRow_MMI; - } - } -#endif #if defined(HAS_INTERPOLATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { InterpolateRow = InterpolateRow_Any_MSA; @@ -4980,6 +5367,14 @@ int UYVYToNV12(const uint8_t* src_uyvy, } } #endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(width, 32)) { + InterpolateRow = InterpolateRow_LSX; + } + } +#endif { int awidth = halfwidth * 2; diff --git a/third-party/libyuv/third_party/libyuv/source/rotate.cc b/third-party/libyuv/third_party/libyuv/source/rotate.cc index 32904e4731..f1e83cbd4e 100644 --- a/third-party/libyuv/third_party/libyuv/source/rotate.cc +++ b/third-party/libyuv/third_party/libyuv/source/rotate.cc @@ -29,7 +29,7 @@ void TransposePlane(const uint8_t* src, int width, int height) { int i = height; -#if defined(HAS_TRANSPOSEWX16_MSA) +#if defined(HAS_TRANSPOSEWX16_MSA) || defined(HAS_TRANSPOSEWX16_LSX) void (*TransposeWx16)(const uint8_t* src, int src_stride, uint8_t* dst, int dst_stride, int width) = TransposeWx16_C; #else @@ -37,17 +37,12 @@ void TransposePlane(const uint8_t* src, int dst_stride, int width) = TransposeWx8_C; #endif -#if defined(HAS_TRANSPOSEWX16_MSA) - if (TestCpuFlag(kCpuHasMSA)) { - TransposeWx16 = TransposeWx16_Any_MSA; - if (IS_ALIGNED(width, 16)) { - TransposeWx16 = TransposeWx16_MSA; - } - } -#else #if defined(HAS_TRANSPOSEWX8_NEON) if (TestCpuFlag(kCpuHasNEON)) { - TransposeWx8 = TransposeWx8_NEON; + TransposeWx8 = TransposeWx8_Any_NEON; + if (IS_ALIGNED(width, 8)) { + TransposeWx8 = TransposeWx8_NEON; + } } #endif #if defined(HAS_TRANSPOSEWX8_SSSE3) @@ -58,11 +53,6 @@ void TransposePlane(const uint8_t* src, } } #endif -#if defined(HAS_TRANSPOSEWX8_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - TransposeWx8 = TransposeWx8_MMI; - } -#endif #if defined(HAS_TRANSPOSEWX8_FAST_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { TransposeWx8 = TransposeWx8_Fast_Any_SSSE3; @@ -71,9 +61,24 @@ void TransposePlane(const uint8_t* src, } } #endif -#endif /* defined(HAS_TRANSPOSEWX16_MSA) */ - #if defined(HAS_TRANSPOSEWX16_MSA) + if (TestCpuFlag(kCpuHasMSA)) { + TransposeWx16 = TransposeWx16_Any_MSA; + if (IS_ALIGNED(width, 16)) { + TransposeWx16 = TransposeWx16_MSA; + } + } +#endif +#if defined(HAS_TRANSPOSEWX16_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + TransposeWx16 = TransposeWx16_Any_LSX; + if (IS_ALIGNED(width, 16)) { + TransposeWx16 = TransposeWx16_LSX; + } + } +#endif + +#if defined(HAS_TRANSPOSEWX16_MSA) || defined(HAS_TRANSPOSEWX16_LSX) // Work across the source in 16x16 tiles while (i >= 16) { TransposeWx16(src, src_stride, dst, dst_stride, width); @@ -165,14 +170,6 @@ void RotatePlane180(const uint8_t* src, } } #endif -#if defined(HAS_MIRRORROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - MirrorRow = MirrorRow_Any_MMI; - if (IS_ALIGNED(width, 8)) { - MirrorRow = MirrorRow_MMI; - } - } -#endif #if defined(HAS_MIRRORROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { MirrorRow = MirrorRow_Any_MSA; @@ -181,6 +178,14 @@ void RotatePlane180(const uint8_t* src, } } #endif +#if defined(HAS_MIRRORROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + MirrorRow = MirrorRow_Any_LASX; + if (IS_ALIGNED(width, 64)) { + MirrorRow = MirrorRow_LASX; + } + } +#endif #if defined(HAS_COPYROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { CopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2; @@ -201,11 +206,6 @@ void RotatePlane180(const uint8_t* src, CopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON; } #endif -#if defined(HAS_COPYROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - CopyRow = IS_ALIGNED(width, 8) ? CopyRow_MMI : CopyRow_Any_MMI; - } -#endif // Odd height will harmlessly mirror the middle row twice. for (y = 0; y < half_height; ++y) { @@ -221,19 +221,23 @@ void RotatePlane180(const uint8_t* src, } LIBYUV_API -void TransposeUV(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height) { +void SplitTransposeUV(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height) { int i = height; #if defined(HAS_TRANSPOSEUVWX16_MSA) void (*TransposeUVWx16)(const uint8_t* src, int src_stride, uint8_t* dst_a, int dst_stride_a, uint8_t* dst_b, int dst_stride_b, int width) = TransposeUVWx16_C; +#elif defined(HAS_TRANSPOSEUVWX16_LSX) + void (*TransposeUVWx16)(const uint8_t* src, int src_stride, uint8_t* dst_a, + int dst_stride_a, uint8_t* dst_b, int dst_stride_b, + int width) = TransposeUVWx16_C; #else void (*TransposeUVWx8)(const uint8_t* src, int src_stride, uint8_t* dst_a, int dst_stride_a, uint8_t* dst_b, int dst_stride_b, @@ -247,6 +251,13 @@ void TransposeUV(const uint8_t* src, TransposeUVWx16 = TransposeUVWx16_MSA; } } +#elif defined(HAS_TRANSPOSEUVWX16_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + TransposeUVWx16 = TransposeUVWx16_Any_LSX; + if (IS_ALIGNED(width, 8)) { + TransposeUVWx16 = TransposeUVWx16_LSX; + } + } #else #if defined(HAS_TRANSPOSEUVWX8_NEON) if (TestCpuFlag(kCpuHasNEON)) { @@ -261,14 +272,6 @@ void TransposeUV(const uint8_t* src, } } #endif -#if defined(HAS_TRANSPOSEUVWX8_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - TransposeUVWx8 = TransposeUVWx8_Any_MMI; - if (IS_ALIGNED(width, 4)) { - TransposeUVWx8 = TransposeUVWx8_MMI; - } - } -#endif #endif /* defined(HAS_TRANSPOSEUVWX16_MSA) */ #if defined(HAS_TRANSPOSEUVWX16_MSA) @@ -281,6 +284,16 @@ void TransposeUV(const uint8_t* src, dst_b += 16; // Move over 8 columns. i -= 16; } +#elif defined(HAS_TRANSPOSEUVWX16_LSX) + // Work through the source in 8x8 tiles. + while (i >= 16) { + TransposeUVWx16(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, + width); + src += 16 * src_stride; // Go down 16 rows. + dst_a += 16; // Move over 8 columns. + dst_b += 16; // Move over 8 columns. + i -= 16; + } #else // Work through the source in 8x8 tiles. while (i >= 8) { @@ -300,49 +313,49 @@ void TransposeUV(const uint8_t* src, } LIBYUV_API -void RotateUV90(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height) { +void SplitRotateUV90(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height) { src += src_stride * (height - 1); src_stride = -src_stride; - TransposeUV(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, width, - height); + SplitTransposeUV(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, + width, height); } LIBYUV_API -void RotateUV270(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height) { +void SplitRotateUV270(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height) { dst_a += dst_stride_a * (width - 1); dst_b += dst_stride_b * (width - 1); dst_stride_a = -dst_stride_a; dst_stride_b = -dst_stride_b; - TransposeUV(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, width, - height); + SplitTransposeUV(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, + width, height); } // Rotate 180 is a horizontal and vertical flip. LIBYUV_API -void RotateUV180(const uint8_t* src, - int src_stride, - uint8_t* dst_a, - int dst_stride_a, - uint8_t* dst_b, - int dst_stride_b, - int width, - int height) { +void SplitRotateUV180(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width, + int height) { int i; void (*MirrorSplitUVRow)(const uint8_t* src, uint8_t* dst_u, uint8_t* dst_v, int width) = MirrorSplitUVRow_C; @@ -356,16 +369,16 @@ void RotateUV180(const uint8_t* src, MirrorSplitUVRow = MirrorSplitUVRow_SSSE3; } #endif -#if defined(HAS_MIRRORSPLITUVROW_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(width, 8)) { - MirrorSplitUVRow = MirrorSplitUVRow_MMI; - } -#endif #if defined(HAS_MIRRORSPLITUVROW_MSA) if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 32)) { MirrorSplitUVRow = MirrorSplitUVRow_MSA; } #endif +#if defined(HAS_MIRRORSPLITUVROW_LSX) + if (TestCpuFlag(kCpuHasLSX) && IS_ALIGNED(width, 32)) { + MirrorSplitUVRow = MirrorSplitUVRow_LSX; + } +#endif dst_a += dst_stride_a * (height - 1); dst_b += dst_stride_b * (height - 1); @@ -378,6 +391,52 @@ void RotateUV180(const uint8_t* src, } } +// Rotate UV and split into planar. +// width and height expected to be half size for NV12 +LIBYUV_API +int SplitRotateUV(const uint8_t* src_uv, + int src_stride_uv, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height, + enum RotationMode mode) { + if (!src_uv || width <= 0 || height == 0 || !dst_u || !dst_v) { + return -1; + } + + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_uv = src_uv + (height - 1) * src_stride_uv; + src_stride_uv = -src_stride_uv; + } + + switch (mode) { + case kRotate0: + SplitUVPlane(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, + dst_stride_v, width, height); + return 0; + case kRotate90: + SplitRotateUV90(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, + dst_stride_v, width, height); + return 0; + case kRotate270: + SplitRotateUV270(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, + dst_stride_v, width, height); + return 0; + case kRotate180: + SplitRotateUV180(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, + dst_stride_v, width, height); + return 0; + default: + break; + } + return -1; +} + LIBYUV_API int RotatePlane(const uint8_t* src, int src_stride, @@ -435,8 +494,8 @@ int I420Rotate(const uint8_t* src_y, enum RotationMode mode) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; - if (!src_y || !src_u || !src_v || width <= 0 || height == 0 || !dst_y || - !dst_u || !dst_v) { + if ((!src_y && dst_y) || !src_u || !src_v || width <= 0 || height == 0 || + !dst_y || !dst_u || !dst_v) { return -1; } @@ -485,6 +544,80 @@ int I420Rotate(const uint8_t* src_y, return -1; } +LIBYUV_API +int I422Rotate(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height, + enum RotationMode mode) { + int halfwidth = (width + 1) >> 1; + int halfheight = (height + 1) >> 1; + if (!src_y || !src_u || !src_v || width <= 0 || height == 0 || !dst_y || + !dst_u || !dst_v) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_y = src_y + (height - 1) * src_stride_y; + src_u = src_u + (height - 1) * src_stride_u; + src_v = src_v + (height - 1) * src_stride_v; + src_stride_y = -src_stride_y; + src_stride_u = -src_stride_u; + src_stride_v = -src_stride_v; + } + + switch (mode) { + case kRotate0: + // copy frame + CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, height); + CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, height); + return 0; + case kRotate90: + // We need to rotate and rescale, we use plane Y as temporal storage. + RotatePlane90(src_u, src_stride_u, dst_y, height, halfwidth, height); + ScalePlane(dst_y, height, height, halfwidth, dst_u, halfheight, + halfheight, width, kFilterBilinear); + RotatePlane90(src_v, src_stride_v, dst_y, height, halfwidth, height); + ScalePlane(dst_y, height, height, halfwidth, dst_v, halfheight, + halfheight, width, kFilterLinear); + RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + return 0; + case kRotate270: + // We need to rotate and rescale, we use plane Y as temporal storage. + RotatePlane270(src_u, src_stride_u, dst_y, height, halfwidth, height); + ScalePlane(dst_y, height, height, halfwidth, dst_u, halfheight, + halfheight, width, kFilterBilinear); + RotatePlane270(src_v, src_stride_v, dst_y, height, halfwidth, height); + ScalePlane(dst_y, height, height, halfwidth, dst_v, halfheight, + halfheight, width, kFilterLinear); + RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + + return 0; + case kRotate180: + RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + RotatePlane180(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, + height); + RotatePlane180(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, + height); + return 0; + default: + break; + } + return -1; +} + LIBYUV_API int I444Rotate(const uint8_t* src_y, int src_stride_y, @@ -500,7 +633,7 @@ int I444Rotate(const uint8_t* src_y, int dst_stride_v, int width, int height, - enum libyuv::RotationMode mode) { + enum RotationMode mode) { if (!src_y || !src_u || !src_v || width <= 0 || height == 0 || !dst_y || !dst_u || !dst_v) { return -1; @@ -518,23 +651,23 @@ int I444Rotate(const uint8_t* src_y, } switch (mode) { - case libyuv::kRotate0: + case kRotate0: // copy frame CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height); CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height); return 0; - case libyuv::kRotate90: + case kRotate90: RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height); RotatePlane90(src_u, src_stride_u, dst_u, dst_stride_u, width, height); RotatePlane90(src_v, src_stride_v, dst_v, dst_stride_v, width, height); return 0; - case libyuv::kRotate270: + case kRotate270: RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height); RotatePlane270(src_u, src_stride_u, dst_u, dst_stride_u, width, height); RotatePlane270(src_v, src_stride_v, dst_v, dst_stride_v, width, height); return 0; - case libyuv::kRotate180: + case kRotate180: RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height); RotatePlane180(src_u, src_stride_u, dst_u, dst_stride_u, width, height); RotatePlane180(src_v, src_stride_v, dst_v, dst_stride_v, width, height); @@ -584,18 +717,18 @@ int NV12ToI420Rotate(const uint8_t* src_y, width, height); case kRotate90: RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height); - RotateUV90(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, - dst_stride_v, halfwidth, halfheight); + SplitRotateUV90(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, + dst_stride_v, halfwidth, halfheight); return 0; case kRotate270: RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height); - RotateUV270(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, - dst_stride_v, halfwidth, halfheight); + SplitRotateUV270(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, + dst_stride_v, halfwidth, halfheight); return 0; case kRotate180: RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height); - RotateUV180(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, - dst_stride_v, halfwidth, halfheight); + SplitRotateUV180(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, + dst_stride_v, halfwidth, halfheight); return 0; default: break; @@ -603,6 +736,98 @@ int NV12ToI420Rotate(const uint8_t* src_y, return -1; } +static void SplitPixels(const uint8_t* src_u, + int src_pixel_stride_uv, + uint8_t* dst_u, + int width) { + int i; + for (i = 0; i < width; ++i) { + *dst_u = *src_u; + ++dst_u; + src_u += src_pixel_stride_uv; + } +} + +// Convert Android420 to I420 with Rotate +LIBYUV_API +int Android420ToI420Rotate(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + int src_pixel_stride_uv, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int width, + int height, + enum RotationMode rotation) { + int y; + const ptrdiff_t vu_off = src_v - src_u; + int halfwidth = (width + 1) >> 1; + int halfheight = (height + 1) >> 1; + if ((!src_y && dst_y) || !src_u || !src_v || !dst_u || !dst_v || width <= 0 || + height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + halfheight = (height + 1) >> 1; + src_y = src_y + (height - 1) * src_stride_y; + src_u = src_u + (halfheight - 1) * src_stride_u; + src_v = src_v + (halfheight - 1) * src_stride_v; + src_stride_y = -src_stride_y; + src_stride_u = -src_stride_u; + src_stride_v = -src_stride_v; + } + + if (dst_y) { + RotatePlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height, + rotation); + } + + // Copy UV planes - I420 + if (src_pixel_stride_uv == 1) { + RotatePlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight, + rotation); + RotatePlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight, + rotation); + return 0; + } + // Split UV planes - NV21 + if (src_pixel_stride_uv == 2 && vu_off == -1 && + src_stride_u == src_stride_v) { + SplitRotateUV(src_v, src_stride_v, dst_v, dst_stride_v, dst_u, dst_stride_u, + halfwidth, halfheight, rotation); + return 0; + } + // Split UV planes - NV12 + if (src_pixel_stride_uv == 2 && vu_off == 1 && src_stride_u == src_stride_v) { + SplitRotateUV(src_u, src_stride_u, dst_u, dst_stride_u, dst_v, dst_stride_v, + halfwidth, halfheight, rotation); + return 0; + } + + if (rotation == 0) { + for (y = 0; y < halfheight; ++y) { + SplitPixels(src_u, src_pixel_stride_uv, dst_u, halfwidth); + SplitPixels(src_v, src_pixel_stride_uv, dst_v, halfwidth); + src_u += src_stride_u; + src_v += src_stride_v; + dst_u += dst_stride_u; + dst_v += dst_stride_v; + } + return 0; + } + // unsupported type and/or rotation. + return -1; +} + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/source/rotate_any.cc b/third-party/libyuv/third_party/libyuv/source/rotate_any.cc index b3baf084d0..88ca78765a 100644 --- a/third-party/libyuv/third_party/libyuv/source/rotate_any.cc +++ b/third-party/libyuv/third_party/libyuv/source/rotate_any.cc @@ -35,15 +35,15 @@ TANY(TransposeWx8_Any_NEON, TransposeWx8_NEON, 7) #ifdef HAS_TRANSPOSEWX8_SSSE3 TANY(TransposeWx8_Any_SSSE3, TransposeWx8_SSSE3, 7) #endif -#ifdef HAS_TRANSPOSEWX8_MMI -TANY(TransposeWx8_Any_MMI, TransposeWx8_MMI, 7) -#endif #ifdef HAS_TRANSPOSEWX8_FAST_SSSE3 TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, 15) #endif #ifdef HAS_TRANSPOSEWX16_MSA TANY(TransposeWx16_Any_MSA, TransposeWx16_MSA, 15) #endif +#ifdef HAS_TRANSPOSEWX16_LSX +TANY(TransposeWx16_Any_LSX, TransposeWx16_LSX, 15) +#endif #undef TANY #define TUVANY(NAMEANY, TPOS_SIMD, MASK) \ @@ -65,12 +65,12 @@ TUVANY(TransposeUVWx8_Any_NEON, TransposeUVWx8_NEON, 7) #ifdef HAS_TRANSPOSEUVWX8_SSE2 TUVANY(TransposeUVWx8_Any_SSE2, TransposeUVWx8_SSE2, 7) #endif -#ifdef HAS_TRANSPOSEUVWX8_MMI -TUVANY(TransposeUVWx8_Any_MMI, TransposeUVWx8_MMI, 7) -#endif #ifdef HAS_TRANSPOSEUVWX16_MSA TUVANY(TransposeUVWx16_Any_MSA, TransposeUVWx16_MSA, 7) #endif +#ifdef HAS_TRANSPOSEUVWX16_LSX +TUVANY(TransposeUVWx16_Any_LSX, TransposeUVWx16_LSX, 7) +#endif #undef TUVANY #ifdef __cplusplus diff --git a/third-party/libyuv/third_party/libyuv/source/rotate_argb.cc b/third-party/libyuv/third_party/libyuv/source/rotate_argb.cc index ae65388601..28226210e1 100644 --- a/third-party/libyuv/third_party/libyuv/source/rotate_argb.cc +++ b/third-party/libyuv/third_party/libyuv/source/rotate_argb.cc @@ -8,11 +8,12 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "libyuv/rotate.h" +#include "libyuv/rotate_argb.h" #include "libyuv/convert.h" #include "libyuv/cpu_id.h" #include "libyuv/planar_functions.h" +#include "libyuv/rotate.h" #include "libyuv/row.h" #include "libyuv/scale_row.h" /* for ScaleARGBRowDownEven_ */ @@ -52,14 +53,6 @@ static int ARGBTranspose(const uint8_t* src_argb, } } #endif -#if defined(HAS_SCALEARGBROWDOWNEVEN_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MMI; - if (IS_ALIGNED(height, 4)) { // Width of dest. - ScaleARGBRowDownEven = ScaleARGBRowDownEven_MMI; - } - } -#endif #if defined(HAS_SCALEARGBROWDOWNEVEN_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MSA; @@ -68,6 +61,14 @@ static int ARGBTranspose(const uint8_t* src_argb, } } #endif +#if defined(HAS_SCALEARGBROWDOWNEVEN_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_LSX; + if (IS_ALIGNED(height, 4)) { // Width of dest. + ScaleARGBRowDownEven = ScaleARGBRowDownEven_LSX; + } + } +#endif for (i = 0; i < width; ++i) { // column of source to row of dest. ScaleARGBRowDownEven(src_argb, 0, src_pixel_step, dst_argb, height); @@ -147,14 +148,6 @@ static int ARGBRotate180(const uint8_t* src_argb, } } #endif -#if defined(HAS_ARGBMIRRORROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ARGBMirrorRow = ARGBMirrorRow_Any_MMI; - if (IS_ALIGNED(width, 2)) { - ARGBMirrorRow = ARGBMirrorRow_MMI; - } - } -#endif #if defined(HAS_ARGBMIRRORROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ARGBMirrorRow = ARGBMirrorRow_Any_MSA; @@ -163,6 +156,14 @@ static int ARGBRotate180(const uint8_t* src_argb, } } #endif +#if defined(HAS_ARGBMIRRORROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + ARGBMirrorRow = ARGBMirrorRow_Any_LASX; + if (IS_ALIGNED(width, 16)) { + ARGBMirrorRow = ARGBMirrorRow_LASX; + } + } +#endif #if defined(HAS_COPYROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2; diff --git a/third-party/libyuv/third_party/libyuv/source/rotate_lsx.cc b/third-party/libyuv/third_party/libyuv/source/rotate_lsx.cc new file mode 100644 index 0000000000..94a2b91cd8 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/source/rotate_lsx.cc @@ -0,0 +1,243 @@ +/* + * Copyright 2022 The LibYuv Project Authors. All rights reserved. + * + * Copyright (c) 2022 Loongson Technology Corporation Limited + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "libyuv/rotate_row.h" + +#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) +#include "libyuv/loongson_intrinsics.h" + +#ifdef __cplusplus +namespace libyuv { +extern "C" { +#endif + +#define ILVLH_B(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + DUP2_ARG2(__lsx_vilvl_b, in1, in0, in3, in2, out0, out2); \ + DUP2_ARG2(__lsx_vilvh_b, in1, in0, in3, in2, out1, out3); \ + } + +#define ILVLH_H(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + DUP2_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, out0, out2); \ + DUP2_ARG2(__lsx_vilvh_h, in1, in0, in3, in2, out1, out3); \ + } + +#define ILVLH_W(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + DUP2_ARG2(__lsx_vilvl_w, in1, in0, in3, in2, out0, out2); \ + DUP2_ARG2(__lsx_vilvh_w, in1, in0, in3, in2, out1, out3); \ + } + +#define ILVLH_D(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + DUP2_ARG2(__lsx_vilvl_d, in1, in0, in3, in2, out0, out2); \ + DUP2_ARG2(__lsx_vilvh_d, in1, in0, in3, in2, out1, out3); \ + } + +#define LSX_ST_4(_dst0, _dst1, _dst2, _dst3, _dst, _stride, _stride2, \ + _stride3, _stride4) \ + { \ + __lsx_vst(_dst0, _dst, 0); \ + __lsx_vstx(_dst1, _dst, _stride); \ + __lsx_vstx(_dst2, _dst, _stride2); \ + __lsx_vstx(_dst3, _dst, _stride3); \ + _dst += _stride4; \ + } + +#define LSX_ST_2(_dst0, _dst1, _dst, _stride, _stride2) \ + { \ + __lsx_vst(_dst0, _dst, 0); \ + __lsx_vstx(_dst1, _dst, _stride); \ + _dst += _stride2; \ + } + +void TransposeWx16_C(const uint8_t* src, + int src_stride, + uint8_t* dst, + int dst_stride, + int width) { + TransposeWx8_C(src, src_stride, dst, dst_stride, width); + TransposeWx8_C((src + 8 * src_stride), src_stride, (dst + 8), dst_stride, + width); +} + +void TransposeUVWx16_C(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width) { + TransposeUVWx8_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, + width); + TransposeUVWx8_C((src + 8 * src_stride), src_stride, (dst_a + 8), + dst_stride_a, (dst_b + 8), dst_stride_b, width); +} + +void TransposeWx16_LSX(const uint8_t* src, + int src_stride, + uint8_t* dst, + int dst_stride, + int width) { + int x; + int len = width / 16; + uint8_t* s; + int src_stride2 = src_stride << 1; + int src_stride3 = src_stride + src_stride2; + int src_stride4 = src_stride2 << 1; + int dst_stride2 = dst_stride << 1; + int dst_stride3 = dst_stride + dst_stride2; + int dst_stride4 = dst_stride2 << 1; + __m128i src0, src1, src2, src3, dst0, dst1, dst2, dst3; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; + __m128i res0, res1, res2, res3, res4, res5, res6, res7, res8, res9; + + for (x = 0; x < len; x++) { + s = (uint8_t*)src; + src0 = __lsx_vld(s, 0); + src1 = __lsx_vldx(s, src_stride); + src2 = __lsx_vldx(s, src_stride2); + src3 = __lsx_vldx(s, src_stride3); + s += src_stride4; + ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); + ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg0, reg1, reg2, reg3); + src0 = __lsx_vld(s, 0); + src1 = __lsx_vldx(s, src_stride); + src2 = __lsx_vldx(s, src_stride2); + src3 = __lsx_vldx(s, src_stride3); + s += src_stride4; + ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); + ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg4, reg5, reg6, reg7); + ILVLH_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3); + ILVLH_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); + src0 = __lsx_vld(s, 0); + src1 = __lsx_vldx(s, src_stride); + src2 = __lsx_vldx(s, src_stride2); + src3 = __lsx_vldx(s, src_stride3); + s += src_stride4; + ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); + ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg0, reg1, reg2, reg3); + src0 = __lsx_vld(s, 0); + src1 = __lsx_vldx(s, src_stride); + src2 = __lsx_vldx(s, src_stride2); + src3 = __lsx_vldx(s, src_stride3); + s += src_stride4; + ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); + ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg4, reg5, reg6, reg7); + res8 = __lsx_vilvl_w(reg4, reg0); + res9 = __lsx_vilvh_w(reg4, reg0); + ILVLH_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3); + LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3, + dst_stride4); + res8 = __lsx_vilvl_w(reg5, reg1); + res9 = __lsx_vilvh_w(reg5, reg1); + ILVLH_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3); + LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3, + dst_stride4); + res8 = __lsx_vilvl_w(reg6, reg2); + res9 = __lsx_vilvh_w(reg6, reg2); + ILVLH_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3); + LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3, + dst_stride4); + res8 = __lsx_vilvl_w(reg7, reg3); + res9 = __lsx_vilvh_w(reg7, reg3); + ILVLH_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3); + LSX_ST_4(dst0, dst1, dst2, dst3, dst, dst_stride, dst_stride2, dst_stride3, + dst_stride4); + src += 16; + } +} + +void TransposeUVWx16_LSX(const uint8_t* src, + int src_stride, + uint8_t* dst_a, + int dst_stride_a, + uint8_t* dst_b, + int dst_stride_b, + int width) { + int x; + int len = width / 8; + uint8_t* s; + int src_stride2 = src_stride << 1; + int src_stride3 = src_stride + src_stride2; + int src_stride4 = src_stride2 << 1; + int dst_stride_a2 = dst_stride_a << 1; + int dst_stride_b2 = dst_stride_b << 1; + __m128i src0, src1, src2, src3, dst0, dst1, dst2, dst3; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; + __m128i res0, res1, res2, res3, res4, res5, res6, res7, res8, res9; + + for (x = 0; x < len; x++) { + s = (uint8_t*)src; + src0 = __lsx_vld(s, 0); + src1 = __lsx_vldx(s, src_stride); + src2 = __lsx_vldx(s, src_stride2); + src3 = __lsx_vldx(s, src_stride3); + s += src_stride4; + ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); + ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg0, reg1, reg2, reg3); + src0 = __lsx_vld(s, 0); + src1 = __lsx_vldx(s, src_stride); + src2 = __lsx_vldx(s, src_stride2); + src3 = __lsx_vldx(s, src_stride3); + s += src_stride4; + ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); + ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg4, reg5, reg6, reg7); + ILVLH_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3); + ILVLH_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); + src0 = __lsx_vld(s, 0); + src1 = __lsx_vldx(s, src_stride); + src2 = __lsx_vldx(s, src_stride2); + src3 = __lsx_vldx(s, src_stride3); + s += src_stride4; + ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); + ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg0, reg1, reg2, reg3); + src0 = __lsx_vld(s, 0); + src1 = __lsx_vldx(s, src_stride); + src2 = __lsx_vldx(s, src_stride2); + src3 = __lsx_vldx(s, src_stride3); + s += src_stride4; + ILVLH_B(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); + ILVLH_H(tmp0, tmp2, tmp1, tmp3, reg4, reg5, reg6, reg7); + res8 = __lsx_vilvl_w(reg4, reg0); + res9 = __lsx_vilvh_w(reg4, reg0); + ILVLH_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3); + LSX_ST_2(dst0, dst2, dst_a, dst_stride_a, dst_stride_a2); + LSX_ST_2(dst1, dst3, dst_b, dst_stride_b, dst_stride_b2); + res8 = __lsx_vilvl_w(reg5, reg1); + res9 = __lsx_vilvh_w(reg5, reg1); + ILVLH_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3); + LSX_ST_2(dst0, dst2, dst_a, dst_stride_a, dst_stride_a2); + LSX_ST_2(dst1, dst3, dst_b, dst_stride_b, dst_stride_b2); + res8 = __lsx_vilvl_w(reg6, reg2); + res9 = __lsx_vilvh_w(reg6, reg2); + ILVLH_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3); + LSX_ST_2(dst0, dst2, dst_a, dst_stride_a, dst_stride_a2); + LSX_ST_2(dst1, dst3, dst_b, dst_stride_b, dst_stride_b2); + res8 = __lsx_vilvl_w(reg7, reg3); + res9 = __lsx_vilvh_w(reg7, reg3); + ILVLH_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3); + LSX_ST_2(dst0, dst2, dst_a, dst_stride_a, dst_stride_a2); + LSX_ST_2(dst1, dst3, dst_b, dst_stride_b, dst_stride_b2); + src += 16; + } +} + +#ifdef __cplusplus +} // extern "C" +} // namespace libyuv +#endif + +#endif // !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) diff --git a/third-party/libyuv/third_party/libyuv/source/row_any.cc b/third-party/libyuv/third_party/libyuv/source/row_any.cc index c9a402eda2..3c7dc893ac 100644 --- a/third-party/libyuv/third_party/libyuv/source/row_any.cc +++ b/third-party/libyuv/third_party/libyuv/source/row_any.cc @@ -113,11 +113,8 @@ ANY41C(I444AlphaToARGBRow_Any_MSA, I444AlphaToARGBRow_MSA, 0, 0, 4, 7) #ifdef HAS_I422ALPHATOARGBROW_MSA ANY41C(I422AlphaToARGBRow_Any_MSA, I422AlphaToARGBRow_MSA, 1, 0, 4, 7) #endif -#ifdef HAS_I444ALPHATOARGBROW_MMI -ANY41C(I444AlphaToARGBRow_Any_MMI, I444AlphaToARGBRow_MMI, 0, 0, 4, 7) -#endif -#ifdef HAS_I422ALPHATOARGBROW_MMI -ANY41C(I422AlphaToARGBRow_Any_MMI, I422AlphaToARGBRow_MMI, 1, 0, 4, 7) +#ifdef HAS_I422ALPHATOARGBROW_LASX +ANY41C(I422AlphaToARGBRow_Any_LASX, I422AlphaToARGBRow_LASX, 1, 0, 4, 15) #endif #undef ANY41C @@ -265,9 +262,6 @@ ANY31(MergeRGBRow_Any_SSSE3, MergeRGBRow_SSSE3, 0, 0, 3, 15) #ifdef HAS_MERGERGBROW_NEON ANY31(MergeRGBRow_Any_NEON, MergeRGBRow_NEON, 0, 0, 3, 15) #endif -#ifdef HAS_MERGERGBROW_MMI -ANY31(MergeRGBRow_Any_MMI, MergeRGBRow_MMI, 0, 0, 3, 7) -#endif #ifdef HAS_MERGEXRGBROW_SSE2 ANY31(MergeXRGBRow_Any_SSE2, MergeXRGBRow_SSE2, 0, 0, 4, 7) #endif @@ -291,8 +285,8 @@ ANY31(I422ToYUY2Row_Any_NEON, I422ToYUY2Row_NEON, 1, 1, 4, 15) #ifdef HAS_I422TOYUY2ROW_MSA ANY31(I422ToYUY2Row_Any_MSA, I422ToYUY2Row_MSA, 1, 1, 4, 31) #endif -#ifdef HAS_I422TOYUY2ROW_MMI -ANY31(I422ToYUY2Row_Any_MMI, I422ToYUY2Row_MMI, 1, 1, 4, 7) +#ifdef HAS_I422TOYUY2ROW_LASX +ANY31(I422ToYUY2Row_Any_LASX, I422ToYUY2Row_LASX, 1, 1, 4, 31) #endif #ifdef HAS_I422TOUYVYROW_NEON ANY31(I422ToUYVYRow_Any_NEON, I422ToUYVYRow_NEON, 1, 1, 4, 15) @@ -300,8 +294,8 @@ ANY31(I422ToUYVYRow_Any_NEON, I422ToUYVYRow_NEON, 1, 1, 4, 15) #ifdef HAS_I422TOUYVYROW_MSA ANY31(I422ToUYVYRow_Any_MSA, I422ToUYVYRow_MSA, 1, 1, 4, 31) #endif -#ifdef HAS_I422TOUYVYROW_MMI -ANY31(I422ToUYVYRow_Any_MMI, I422ToUYVYRow_MMI, 1, 1, 4, 7) +#ifdef HAS_I422TOUYVYROW_LASX +ANY31(I422ToUYVYRow_Any_LASX, I422ToUYVYRow_LASX, 1, 1, 4, 31) #endif #ifdef HAS_BLENDPLANEROW_AVX2 ANY31(BlendPlaneRow_Any_AVX2, BlendPlaneRow_AVX2, 0, 0, 1, 31) @@ -309,9 +303,6 @@ ANY31(BlendPlaneRow_Any_AVX2, BlendPlaneRow_AVX2, 0, 0, 1, 31) #ifdef HAS_BLENDPLANEROW_SSSE3 ANY31(BlendPlaneRow_Any_SSSE3, BlendPlaneRow_SSSE3, 0, 0, 1, 7) #endif -#ifdef HAS_BLENDPLANEROW_MMI -ANY31(BlendPlaneRow_Any_MMI, BlendPlaneRow_MMI, 0, 0, 1, 7) -#endif #undef ANY31 // Note that odd width replication includes 444 due to implementation @@ -368,18 +359,27 @@ ANY31C(I422ToAR30Row_Any_AVX2, I422ToAR30Row_AVX2, 1, 0, 4, 15) #ifdef HAS_I444TOARGBROW_SSSE3 ANY31C(I444ToARGBRow_Any_SSSE3, I444ToARGBRow_SSSE3, 0, 0, 4, 7) #endif +#ifdef HAS_I444TORGB24ROW_SSSE3 +ANY31C(I444ToRGB24Row_Any_SSSE3, I444ToRGB24Row_SSSE3, 0, 0, 3, 15) +#endif #ifdef HAS_I422TORGB24ROW_AVX2 ANY31C(I422ToRGB24Row_Any_AVX2, I422ToRGB24Row_AVX2, 1, 0, 3, 31) #endif #ifdef HAS_I422TOARGBROW_AVX2 ANY31C(I422ToARGBRow_Any_AVX2, I422ToARGBRow_AVX2, 1, 0, 4, 15) #endif +#ifdef HAS_I422TOARGBROW_AVX512BW +ANY31C(I422ToARGBRow_Any_AVX512BW, I422ToARGBRow_AVX512BW, 1, 0, 4, 31) +#endif #ifdef HAS_I422TORGBAROW_AVX2 ANY31C(I422ToRGBARow_Any_AVX2, I422ToRGBARow_AVX2, 1, 0, 4, 15) #endif #ifdef HAS_I444TOARGBROW_AVX2 ANY31C(I444ToARGBRow_Any_AVX2, I444ToARGBRow_AVX2, 0, 0, 4, 15) #endif +#ifdef HAS_I444TORGB24ROW_AVX2 +ANY31C(I444ToRGB24Row_Any_AVX2, I444ToRGB24Row_AVX2, 0, 0, 3, 31) +#endif #ifdef HAS_I422TOARGB4444ROW_AVX2 ANY31C(I422ToARGB4444Row_Any_AVX2, I422ToARGB4444Row_AVX2, 1, 0, 2, 15) #endif @@ -389,6 +389,9 @@ ANY31C(I422ToARGB1555Row_Any_AVX2, I422ToARGB1555Row_AVX2, 1, 0, 2, 15) #ifdef HAS_I422TORGB565ROW_AVX2 ANY31C(I422ToRGB565Row_Any_AVX2, I422ToRGB565Row_AVX2, 1, 0, 2, 15) #endif +#ifdef HAS_I444TORGB24ROW_NEON +ANY31C(I444ToRGB24Row_Any_NEON, I444ToRGB24Row_NEON, 0, 0, 3, 7) +#endif #ifdef HAS_I422TOARGBROW_NEON ANY31C(I444ToARGBRow_Any_NEON, I444ToARGBRow_NEON, 0, 0, 4, 7) ANY31C(I422ToARGBRow_Any_NEON, I422ToARGBRow_NEON, 1, 0, 4, 7) @@ -407,14 +410,16 @@ ANY31C(I422ToARGB4444Row_Any_MSA, I422ToARGB4444Row_MSA, 1, 0, 2, 7) ANY31C(I422ToARGB1555Row_Any_MSA, I422ToARGB1555Row_MSA, 1, 0, 2, 7) ANY31C(I422ToRGB565Row_Any_MSA, I422ToRGB565Row_MSA, 1, 0, 2, 7) #endif -#ifdef HAS_I422TOARGBROW_MMI -ANY31C(I444ToARGBRow_Any_MMI, I444ToARGBRow_MMI, 0, 0, 4, 7) -ANY31C(I422ToARGBRow_Any_MMI, I422ToARGBRow_MMI, 1, 0, 4, 7) -ANY31C(I422ToRGB24Row_Any_MMI, I422ToRGB24Row_MMI, 1, 0, 3, 15) -ANY31C(I422ToARGB4444Row_Any_MMI, I422ToARGB4444Row_MMI, 1, 0, 2, 7) -ANY31C(I422ToARGB1555Row_Any_MMI, I422ToARGB1555Row_MMI, 1, 0, 2, 7) -ANY31C(I422ToRGB565Row_Any_MMI, I422ToRGB565Row_MMI, 1, 0, 2, 7) -ANY31C(I422ToRGBARow_Any_MMI, I422ToRGBARow_MMI, 1, 0, 4, 7) +#ifdef HAS_I422TOARGBROW_LASX +ANY31C(I422ToARGBRow_Any_LASX, I422ToARGBRow_LASX, 1, 0, 4, 31) +ANY31C(I422ToRGBARow_Any_LASX, I422ToRGBARow_LASX, 1, 0, 4, 31) +ANY31C(I422ToRGB24Row_Any_LASX, I422ToRGB24Row_LASX, 1, 0, 3, 31) +ANY31C(I422ToRGB565Row_Any_LASX, I422ToRGB565Row_LASX, 1, 0, 2, 31) +ANY31C(I422ToARGB4444Row_Any_LASX, I422ToARGB4444Row_LASX, 1, 0, 2, 31) +ANY31C(I422ToARGB1555Row_Any_LASX, I422ToARGB1555Row_LASX, 1, 0, 2, 31) +#endif +#ifdef HAS_I444TOARGBROW_LSX +ANY31C(I444ToARGBRow_Any_LSX, I444ToARGBRow_LSX, 0, 0, 4, 15) #endif #undef ANY31C @@ -463,9 +468,6 @@ ANY31CT(I410ToARGBRow_Any_AVX2, I410ToARGBRow_AVX2, 0, 0, uint16_t, 2, 4, 15) #ifdef HAS_I410TOAR30ROW_AVX2 ANY31CT(I410ToAR30Row_Any_AVX2, I410ToAR30Row_AVX2, 0, 0, uint16_t, 2, 4, 15) #endif -#ifdef HAS_I210TOARGBROW_MMI -ANY31CT(I210ToARGBRow_Any_MMI, I210ToARGBRow_MMI, 1, 0, uint16_t, 2, 4, 7) -#endif #ifdef HAS_I212TOAR30ROW_SSSE3 ANY31CT(I212ToAR30Row_Any_SSSE3, I212ToAR30Row_SSSE3, 1, 0, uint16_t, 2, 4, 7) #endif @@ -548,18 +550,18 @@ ANY31PT(MergeXRGB16To8Row_Any_NEON, #define ANY21(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, SBPP2, BPP, MASK) \ void NAMEANY(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, \ int width) { \ - SIMD_ALIGNED(uint8_t temp[64 * 3]); \ - memset(temp, 0, 64 * 2); /* for msan */ \ + SIMD_ALIGNED(uint8_t temp[128 * 3]); \ + memset(temp, 0, 128 * 2); /* for msan */ \ int r = width & MASK; \ int n = width & ~MASK; \ if (n > 0) { \ ANY_SIMD(y_buf, uv_buf, dst_ptr, n); \ } \ memcpy(temp, y_buf + n * SBPP, r * SBPP); \ - memcpy(temp + 64, uv_buf + (n >> UVSHIFT) * SBPP2, \ + memcpy(temp + 128, uv_buf + (n >> UVSHIFT) * SBPP2, \ SS(r, UVSHIFT) * SBPP2); \ - ANY_SIMD(temp, temp + 64, temp + 128, MASK + 1); \ - memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \ + ANY_SIMD(temp, temp + 128, temp + 256, MASK + 1); \ + memcpy(dst_ptr + n * BPP, temp + 256, r * BPP); \ } // Merge functions. @@ -575,12 +577,15 @@ ANY21(MergeUVRow_Any_NEON, MergeUVRow_NEON, 0, 1, 1, 2, 15) #ifdef HAS_MERGEUVROW_MSA ANY21(MergeUVRow_Any_MSA, MergeUVRow_MSA, 0, 1, 1, 2, 15) #endif -#ifdef HAS_MERGEUVROW_MMI -ANY21(MergeUVRow_Any_MMI, MergeUVRow_MMI, 0, 1, 1, 2, 7) +#ifdef HAS_MERGEUVROW_LSX +ANY21(MergeUVRow_Any_LSX, MergeUVRow_LSX, 0, 1, 1, 2, 15) #endif #ifdef HAS_NV21TOYUV24ROW_NEON ANY21(NV21ToYUV24Row_Any_NEON, NV21ToYUV24Row_NEON, 1, 1, 2, 3, 15) #endif +#ifdef HAS_NV21TOYUV24ROW_SSSE3 +ANY21(NV21ToYUV24Row_Any_SSSE3, NV21ToYUV24Row_SSSE3, 1, 1, 2, 3, 15) +#endif #ifdef HAS_NV21TOYUV24ROW_AVX2 ANY21(NV21ToYUV24Row_Any_AVX2, NV21ToYUV24Row_AVX2, 1, 1, 2, 3, 31) #endif @@ -615,20 +620,20 @@ ANY21(ARGBSubtractRow_Any_NEON, ARGBSubtractRow_NEON, 0, 4, 4, 4, 7) #ifdef HAS_ARGBMULTIPLYROW_MSA ANY21(ARGBMultiplyRow_Any_MSA, ARGBMultiplyRow_MSA, 0, 4, 4, 4, 3) #endif -#ifdef HAS_ARGBMULTIPLYROW_MMI -ANY21(ARGBMultiplyRow_Any_MMI, ARGBMultiplyRow_MMI, 0, 4, 4, 4, 1) +#ifdef HAS_ARGBMULTIPLYROW_LASX +ANY21(ARGBMultiplyRow_Any_LASX, ARGBMultiplyRow_LASX, 0, 4, 4, 4, 7) #endif #ifdef HAS_ARGBADDROW_MSA ANY21(ARGBAddRow_Any_MSA, ARGBAddRow_MSA, 0, 4, 4, 4, 7) #endif -#ifdef HAS_ARGBADDROW_MMI -ANY21(ARGBAddRow_Any_MMI, ARGBAddRow_MMI, 0, 4, 4, 4, 1) +#ifdef HAS_ARGBADDROW_LASX +ANY21(ARGBAddRow_Any_LASX, ARGBAddRow_LASX, 0, 4, 4, 4, 7) #endif #ifdef HAS_ARGBSUBTRACTROW_MSA ANY21(ARGBSubtractRow_Any_MSA, ARGBSubtractRow_MSA, 0, 4, 4, 4, 7) #endif -#ifdef HAS_ARGBSUBTRACTROW_MMI -ANY21(ARGBSubtractRow_Any_MMI, ARGBSubtractRow_MMI, 0, 4, 4, 4, 1) +#ifdef HAS_ARGBSUBTRACTROW_LASX +ANY21(ARGBSubtractRow_Any_LASX, ARGBSubtractRow_LASX, 0, 4, 4, 4, 7) #endif #ifdef HAS_SOBELROW_SSE2 ANY21(SobelRow_Any_SSE2, SobelRow_SSE2, 0, 1, 1, 4, 15) @@ -639,8 +644,8 @@ ANY21(SobelRow_Any_NEON, SobelRow_NEON, 0, 1, 1, 4, 7) #ifdef HAS_SOBELROW_MSA ANY21(SobelRow_Any_MSA, SobelRow_MSA, 0, 1, 1, 4, 15) #endif -#ifdef HAS_SOBELROW_MMI -ANY21(SobelRow_Any_MMI, SobelRow_MMI, 0, 1, 1, 4, 7) +#ifdef HAS_SOBELROW_LSX +ANY21(SobelRow_Any_LSX, SobelRow_LSX, 0, 1, 1, 4, 15) #endif #ifdef HAS_SOBELTOPLANEROW_SSE2 ANY21(SobelToPlaneRow_Any_SSE2, SobelToPlaneRow_SSE2, 0, 1, 1, 1, 15) @@ -651,8 +656,8 @@ ANY21(SobelToPlaneRow_Any_NEON, SobelToPlaneRow_NEON, 0, 1, 1, 1, 15) #ifdef HAS_SOBELTOPLANEROW_MSA ANY21(SobelToPlaneRow_Any_MSA, SobelToPlaneRow_MSA, 0, 1, 1, 1, 31) #endif -#ifdef HAS_SOBELTOPLANEROW_MMI -ANY21(SobelToPlaneRow_Any_MMI, SobelToPlaneRow_MMI, 0, 1, 1, 1, 7) +#ifdef HAS_SOBELTOPLANEROW_LSX +ANY21(SobelToPlaneRow_Any_LSX, SobelToPlaneRow_LSX, 0, 1, 1, 1, 31) #endif #ifdef HAS_SOBELXYROW_SSE2 ANY21(SobelXYRow_Any_SSE2, SobelXYRow_SSE2, 0, 1, 1, 4, 15) @@ -663,11 +668,40 @@ ANY21(SobelXYRow_Any_NEON, SobelXYRow_NEON, 0, 1, 1, 4, 7) #ifdef HAS_SOBELXYROW_MSA ANY21(SobelXYRow_Any_MSA, SobelXYRow_MSA, 0, 1, 1, 4, 15) #endif -#ifdef HAS_SOBELXYROW_MMI -ANY21(SobelXYRow_Any_MMI, SobelXYRow_MMI, 0, 1, 1, 4, 7) +#ifdef HAS_SOBELXYROW_LSX +ANY21(SobelXYRow_Any_LSX, SobelXYRow_LSX, 0, 1, 1, 4, 15) #endif #undef ANY21 +// Any 2 planes to 1 with stride +// width is measured in source pixels. 4 bytes contains 2 pixels +#define ANY21S(NAMEANY, ANY_SIMD, SBPP, BPP, MASK) \ + void NAMEANY(const uint8_t* src_yuy2, int stride_yuy2, uint8_t* dst_uv, \ + int width) { \ + SIMD_ALIGNED(uint8_t temp[32 * 3]); \ + memset(temp, 0, 32 * 2); /* for msan */ \ + int awidth = (width + 1) / 2; \ + int r = awidth & MASK; \ + int n = awidth & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(src_yuy2, stride_yuy2, dst_uv, n * 2); \ + } \ + memcpy(temp, src_yuy2 + n * SBPP, r * SBPP); \ + memcpy(temp + 32, src_yuy2 + stride_yuy2 + n * SBPP, r * SBPP); \ + ANY_SIMD(temp, 32, temp + 64, MASK + 1); \ + memcpy(dst_uv + n * BPP, temp + 64, r * BPP); \ + } + +#ifdef HAS_YUY2TONVUVROW_NEON +ANY21S(YUY2ToNVUVRow_Any_NEON, YUY2ToNVUVRow_NEON, 4, 2, 7) +#endif +#ifdef HAS_YUY2TONVUVROW_SSE2 +ANY21S(YUY2ToNVUVRow_Any_SSE2, YUY2ToNVUVRow_SSE2, 4, 2, 7) +#endif +#ifdef HAS_YUY2TONVUVROW_AVX2 +ANY21S(YUY2ToNVUVRow_Any_AVX2, YUY2ToNVUVRow_AVX2, 4, 2, 15) +#endif + // Any 2 planes to 1 with yuvconstants #define ANY21C(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, SBPP2, BPP, MASK) \ void NAMEANY(const uint8_t* y_buf, const uint8_t* uv_buf, uint8_t* dst_ptr, \ @@ -699,8 +733,11 @@ ANY21C(NV12ToARGBRow_Any_NEON, NV12ToARGBRow_NEON, 1, 1, 2, 4, 7) #ifdef HAS_NV12TOARGBROW_MSA ANY21C(NV12ToARGBRow_Any_MSA, NV12ToARGBRow_MSA, 1, 1, 2, 4, 7) #endif -#ifdef HAS_NV12TOARGBROW_MMI -ANY21C(NV12ToARGBRow_Any_MMI, NV12ToARGBRow_MMI, 1, 1, 2, 4, 7) +#ifdef HAS_NV12TOARGBROW_LSX +ANY21C(NV12ToARGBRow_Any_LSX, NV12ToARGBRow_LSX, 1, 1, 2, 4, 7) +#endif +#ifdef HAS_NV12TOARGBROW_LASX +ANY21C(NV12ToARGBRow_Any_LASX, NV12ToARGBRow_LASX, 1, 1, 2, 4, 15) #endif #ifdef HAS_NV21TOARGBROW_SSSE3 ANY21C(NV21ToARGBRow_Any_SSSE3, NV21ToARGBRow_SSSE3, 1, 1, 2, 4, 7) @@ -714,8 +751,11 @@ ANY21C(NV21ToARGBRow_Any_NEON, NV21ToARGBRow_NEON, 1, 1, 2, 4, 7) #ifdef HAS_NV21TOARGBROW_MSA ANY21C(NV21ToARGBRow_Any_MSA, NV21ToARGBRow_MSA, 1, 1, 2, 4, 7) #endif -#ifdef HAS_NV21TOARGBROW_MMI -ANY21C(NV21ToARGBRow_Any_MMI, NV21ToARGBRow_MMI, 1, 1, 2, 4, 7) +#ifdef HAS_NV21TOARGBROW_LSX +ANY21C(NV21ToARGBRow_Any_LSX, NV21ToARGBRow_LSX, 1, 1, 2, 4, 7) +#endif +#ifdef HAS_NV21TOARGBROW_LASX +ANY21C(NV21ToARGBRow_Any_LASX, NV21ToARGBRow_LASX, 1, 1, 2, 4, 15) #endif #ifdef HAS_NV12TORGB24ROW_NEON ANY21C(NV12ToRGB24Row_Any_NEON, NV12ToRGB24Row_NEON, 1, 1, 2, 3, 7) @@ -726,9 +766,6 @@ ANY21C(NV21ToRGB24Row_Any_NEON, NV21ToRGB24Row_NEON, 1, 1, 2, 3, 7) #ifdef HAS_NV12TORGB24ROW_SSSE3 ANY21C(NV12ToRGB24Row_Any_SSSE3, NV12ToRGB24Row_SSSE3, 1, 1, 2, 3, 15) #endif -#ifdef HAS_NV12TORGB24ROW_MMI -ANY21C(NV12ToRGB24Row_Any_MMI, NV12ToRGB24Row_MMI, 1, 1, 2, 3, 7) -#endif #ifdef HAS_NV21TORGB24ROW_SSSE3 ANY21C(NV21ToRGB24Row_Any_SSSE3, NV21ToRGB24Row_SSSE3, 1, 1, 2, 3, 15) #endif @@ -738,9 +775,6 @@ ANY21C(NV12ToRGB24Row_Any_AVX2, NV12ToRGB24Row_AVX2, 1, 1, 2, 3, 31) #ifdef HAS_NV21TORGB24ROW_AVX2 ANY21C(NV21ToRGB24Row_Any_AVX2, NV21ToRGB24Row_AVX2, 1, 1, 2, 3, 31) #endif -#ifdef HAS_NV21TORGB24ROW_MMI -ANY21C(NV21ToRGB24Row_Any_MMI, NV21ToRGB24Row_MMI, 1, 1, 2, 3, 7) -#endif #ifdef HAS_NV12TORGB565ROW_SSSE3 ANY21C(NV12ToRGB565Row_Any_SSSE3, NV12ToRGB565Row_SSSE3, 1, 1, 2, 2, 7) #endif @@ -753,8 +787,11 @@ ANY21C(NV12ToRGB565Row_Any_NEON, NV12ToRGB565Row_NEON, 1, 1, 2, 2, 7) #ifdef HAS_NV12TORGB565ROW_MSA ANY21C(NV12ToRGB565Row_Any_MSA, NV12ToRGB565Row_MSA, 1, 1, 2, 2, 7) #endif -#ifdef HAS_NV12TORGB565ROW_MMI -ANY21C(NV12ToRGB565Row_Any_MMI, NV12ToRGB565Row_MMI, 1, 1, 2, 2, 7) +#ifdef HAS_NV12TORGB565ROW_LSX +ANY21C(NV12ToRGB565Row_Any_LSX, NV12ToRGB565Row_LSX, 1, 1, 2, 2, 7) +#endif +#ifdef HAS_NV12TORGB565ROW_LASX +ANY21C(NV12ToRGB565Row_Any_LASX, NV12ToRGB565Row_LASX, 1, 1, 2, 2, 15) #endif #undef ANY21C @@ -917,7 +954,7 @@ ANY11(ARGB1555ToARGBRow_Any_AVX2, ARGB1555ToARGBRow_AVX2, 0, 2, 4, 15) ANY11(ARGB4444ToARGBRow_Any_AVX2, ARGB4444ToARGBRow_AVX2, 0, 2, 4, 15) #endif #if defined(HAS_ARGBTORGB24ROW_NEON) -ANY11(ARGBToRGB24Row_Any_NEON, ARGBToRGB24Row_NEON, 0, 4, 3, 7) +ANY11(ARGBToRGB24Row_Any_NEON, ARGBToRGB24Row_NEON, 0, 4, 3, 15) ANY11(ARGBToRAWRow_Any_NEON, ARGBToRAWRow_NEON, 0, 4, 3, 7) ANY11(ARGBToRGB565Row_Any_NEON, ARGBToRGB565Row_NEON, 0, 4, 2, 7) ANY11(ARGBToARGB1555Row_Any_NEON, ARGBToARGB1555Row_NEON, 0, 4, 2, 7) @@ -932,13 +969,15 @@ ANY11(ARGBToARGB1555Row_Any_MSA, ARGBToARGB1555Row_MSA, 0, 4, 2, 7) ANY11(ARGBToARGB4444Row_Any_MSA, ARGBToARGB4444Row_MSA, 0, 4, 2, 7) ANY11(J400ToARGBRow_Any_MSA, J400ToARGBRow_MSA, 0, 1, 4, 15) #endif -#if defined(HAS_ARGBTORGB24ROW_MMI) -ANY11(ARGBToRGB24Row_Any_MMI, ARGBToRGB24Row_MMI, 0, 4, 3, 3) -ANY11(ARGBToRAWRow_Any_MMI, ARGBToRAWRow_MMI, 0, 4, 3, 3) -ANY11(ARGBToRGB565Row_Any_MMI, ARGBToRGB565Row_MMI, 0, 4, 2, 3) -ANY11(ARGBToARGB1555Row_Any_MMI, ARGBToARGB1555Row_MMI, 0, 4, 2, 3) -ANY11(ARGBToARGB4444Row_Any_MMI, ARGBToARGB4444Row_MMI, 0, 4, 2, 3) -ANY11(J400ToARGBRow_Any_MMI, J400ToARGBRow_MMI, 0, 1, 4, 3) +#if defined(HAS_ARGBTORGB24ROW_LASX) +ANY11(ARGBToRGB24Row_Any_LASX, ARGBToRGB24Row_LASX, 0, 4, 3, 31) +ANY11(ARGBToRAWRow_Any_LASX, ARGBToRAWRow_LASX, 0, 4, 3, 31) +ANY11(ARGBToRGB565Row_Any_LASX, ARGBToRGB565Row_LASX, 0, 4, 2, 15) +ANY11(ARGBToARGB1555Row_Any_LASX, ARGBToARGB1555Row_LASX, 0, 4, 2, 15) +ANY11(ARGBToARGB4444Row_Any_LASX, ARGBToARGB4444Row_LASX, 0, 4, 2, 15) +#endif +#if defined(HAS_J400TOARGBROW_LSX) +ANY11(J400ToARGBRow_Any_LSX, J400ToARGBRow_LSX, 0, 1, 4, 15) #endif #if defined(HAS_RAWTORGB24ROW_NEON) ANY11(RAWToRGB24Row_Any_NEON, RAWToRGB24Row_NEON, 0, 3, 3, 7) @@ -946,8 +985,8 @@ ANY11(RAWToRGB24Row_Any_NEON, RAWToRGB24Row_NEON, 0, 3, 3, 7) #if defined(HAS_RAWTORGB24ROW_MSA) ANY11(RAWToRGB24Row_Any_MSA, RAWToRGB24Row_MSA, 0, 3, 3, 15) #endif -#if defined(HAS_RAWTORGB24ROW_MMI) -ANY11(RAWToRGB24Row_Any_MMI, RAWToRGB24Row_MMI, 0, 3, 3, 3) +#if defined(HAS_RAWTORGB24ROW_LSX) +ANY11(RAWToRGB24Row_Any_LSX, RAWToRGB24Row_LSX, 0, 3, 3, 15) #endif #ifdef HAS_ARGBTOYROW_AVX2 ANY11(ARGBToYRow_Any_AVX2, ARGBToYRow_AVX2, 0, 4, 1, 31) @@ -958,6 +997,9 @@ ANY11(ABGRToYRow_Any_AVX2, ABGRToYRow_AVX2, 0, 4, 1, 31) #ifdef HAS_ARGBTOYJROW_AVX2 ANY11(ARGBToYJRow_Any_AVX2, ARGBToYJRow_AVX2, 0, 4, 1, 31) #endif +#ifdef HAS_ABGRTOYJROW_AVX2 +ANY11(ABGRToYJRow_Any_AVX2, ABGRToYJRow_AVX2, 0, 4, 1, 31) +#endif #ifdef HAS_RGBATOYJROW_AVX2 ANY11(RGBAToYJRow_Any_AVX2, RGBAToYJRow_AVX2, 0, 4, 1, 31) #endif @@ -974,65 +1016,76 @@ ANY11(ARGBToYRow_Any_SSSE3, ARGBToYRow_SSSE3, 0, 4, 1, 15) ANY11(BGRAToYRow_Any_SSSE3, BGRAToYRow_SSSE3, 0, 4, 1, 15) ANY11(ABGRToYRow_Any_SSSE3, ABGRToYRow_SSSE3, 0, 4, 1, 15) ANY11(RGBAToYRow_Any_SSSE3, RGBAToYRow_SSSE3, 0, 4, 1, 15) +#endif +#ifdef HAS_YUY2TOYROW_SSE2 ANY11(YUY2ToYRow_Any_SSE2, YUY2ToYRow_SSE2, 1, 4, 1, 15) ANY11(UYVYToYRow_Any_SSE2, UYVYToYRow_SSE2, 1, 4, 1, 15) #endif #ifdef HAS_ARGBTOYJROW_SSSE3 ANY11(ARGBToYJRow_Any_SSSE3, ARGBToYJRow_SSSE3, 0, 4, 1, 15) #endif +#ifdef HAS_ABGRTOYJROW_SSSE3 +ANY11(ABGRToYJRow_Any_SSSE3, ABGRToYJRow_SSSE3, 0, 4, 1, 15) +#endif #ifdef HAS_RGBATOYJROW_SSSE3 ANY11(RGBAToYJRow_Any_SSSE3, RGBAToYJRow_SSSE3, 0, 4, 1, 15) #endif #ifdef HAS_ARGBTOYROW_NEON -ANY11(ARGBToYRow_Any_NEON, ARGBToYRow_NEON, 0, 4, 1, 7) +ANY11(ARGBToYRow_Any_NEON, ARGBToYRow_NEON, 0, 4, 1, 15) #endif #ifdef HAS_ARGBTOYROW_MSA ANY11(ARGBToYRow_Any_MSA, ARGBToYRow_MSA, 0, 4, 1, 15) #endif -#ifdef HAS_ARGBTOYROW_MMI -ANY11(ARGBToYRow_Any_MMI, ARGBToYRow_MMI, 0, 4, 1, 7) +#ifdef HAS_ARGBTOYROW_LASX +ANY11(ARGBToYRow_Any_LASX, ARGBToYRow_LASX, 0, 4, 1, 31) #endif #ifdef HAS_ARGBTOYJROW_NEON -ANY11(ARGBToYJRow_Any_NEON, ARGBToYJRow_NEON, 0, 4, 1, 7) +ANY11(ARGBToYJRow_Any_NEON, ARGBToYJRow_NEON, 0, 4, 1, 15) +#endif +#ifdef HAS_ABGRTOYJROW_NEON +ANY11(ABGRToYJRow_Any_NEON, ABGRToYJRow_NEON, 0, 4, 1, 15) #endif #ifdef HAS_RGBATOYJROW_NEON -ANY11(RGBAToYJRow_Any_NEON, RGBAToYJRow_NEON, 0, 4, 1, 7) +ANY11(RGBAToYJRow_Any_NEON, RGBAToYJRow_NEON, 0, 4, 1, 15) #endif #ifdef HAS_ARGBTOYJROW_MSA ANY11(ARGBToYJRow_Any_MSA, ARGBToYJRow_MSA, 0, 4, 1, 15) #endif -#ifdef HAS_ARGBTOYJROW_MMI -ANY11(ARGBToYJRow_Any_MMI, ARGBToYJRow_MMI, 0, 4, 1, 7) +#ifdef HAS_ARGBTOYJROW_LSX +ANY11(ARGBToYJRow_Any_LSX, ARGBToYJRow_LSX, 0, 4, 1, 15) +#endif +#ifdef HAS_ARGBTOYJROW_LASX +ANY11(ARGBToYJRow_Any_LASX, ARGBToYJRow_LASX, 0, 4, 1, 31) #endif #ifdef HAS_BGRATOYROW_NEON -ANY11(BGRAToYRow_Any_NEON, BGRAToYRow_NEON, 0, 4, 1, 7) +ANY11(BGRAToYRow_Any_NEON, BGRAToYRow_NEON, 0, 4, 1, 15) #endif #ifdef HAS_BGRATOYROW_MSA ANY11(BGRAToYRow_Any_MSA, BGRAToYRow_MSA, 0, 4, 1, 15) #endif -#ifdef HAS_BGRATOYROW_MMI -ANY11(BGRAToYRow_Any_MMI, BGRAToYRow_MMI, 0, 4, 1, 7) +#ifdef HAS_BGRATOYROW_LSX +ANY11(BGRAToYRow_Any_LSX, BGRAToYRow_LSX, 0, 4, 1, 15) #endif #ifdef HAS_ABGRTOYROW_NEON -ANY11(ABGRToYRow_Any_NEON, ABGRToYRow_NEON, 0, 4, 1, 7) +ANY11(ABGRToYRow_Any_NEON, ABGRToYRow_NEON, 0, 4, 1, 15) #endif #ifdef HAS_ABGRTOYROW_MSA ANY11(ABGRToYRow_Any_MSA, ABGRToYRow_MSA, 0, 4, 1, 7) #endif -#ifdef HAS_ABGRTOYROW_MMI -ANY11(ABGRToYRow_Any_MMI, ABGRToYRow_MMI, 0, 4, 1, 7) +#ifdef HAS_ABGRTOYROW_LSX +ANY11(ABGRToYRow_Any_LSX, ABGRToYRow_LSX, 0, 4, 1, 15) #endif #ifdef HAS_RGBATOYROW_NEON -ANY11(RGBAToYRow_Any_NEON, RGBAToYRow_NEON, 0, 4, 1, 7) +ANY11(RGBAToYRow_Any_NEON, RGBAToYRow_NEON, 0, 4, 1, 15) #endif #ifdef HAS_RGBATOYROW_MSA ANY11(RGBAToYRow_Any_MSA, RGBAToYRow_MSA, 0, 4, 1, 15) #endif -#ifdef HAS_RGBATOYROW_MMI -ANY11(RGBAToYRow_Any_MMI, RGBAToYRow_MMI, 0, 4, 1, 7) +#ifdef HAS_RGBATOYROW_LSX +ANY11(RGBAToYRow_Any_LSX, RGBAToYRow_LSX, 0, 4, 1, 15) #endif #ifdef HAS_RGB24TOYROW_NEON -ANY11(RGB24ToYRow_Any_NEON, RGB24ToYRow_NEON, 0, 3, 1, 7) +ANY11(RGB24ToYRow_Any_NEON, RGB24ToYRow_NEON, 0, 3, 1, 15) #endif #ifdef HAS_RGB24TOYJROW_AVX2 ANY11(RGB24ToYJRow_Any_AVX2, RGB24ToYJRow_AVX2, 0, 3, 1, 31) @@ -1041,16 +1094,19 @@ ANY11(RGB24ToYJRow_Any_AVX2, RGB24ToYJRow_AVX2, 0, 3, 1, 31) ANY11(RGB24ToYJRow_Any_SSSE3, RGB24ToYJRow_SSSE3, 0, 3, 1, 15) #endif #ifdef HAS_RGB24TOYJROW_NEON -ANY11(RGB24ToYJRow_Any_NEON, RGB24ToYJRow_NEON, 0, 3, 1, 7) +ANY11(RGB24ToYJRow_Any_NEON, RGB24ToYJRow_NEON, 0, 3, 1, 15) #endif #ifdef HAS_RGB24TOYROW_MSA ANY11(RGB24ToYRow_Any_MSA, RGB24ToYRow_MSA, 0, 3, 1, 15) #endif -#ifdef HAS_RGB24TOYROW_MMI -ANY11(RGB24ToYRow_Any_MMI, RGB24ToYRow_MMI, 0, 3, 1, 7) +#ifdef HAS_RGB24TOYROW_LSX +ANY11(RGB24ToYRow_Any_LSX, RGB24ToYRow_LSX, 0, 3, 1, 15) +#endif +#ifdef HAS_RGB24TOYROW_LASX +ANY11(RGB24ToYRow_Any_LASX, RGB24ToYRow_LASX, 0, 3, 1, 31) #endif #ifdef HAS_RAWTOYROW_NEON -ANY11(RAWToYRow_Any_NEON, RAWToYRow_NEON, 0, 3, 1, 7) +ANY11(RAWToYRow_Any_NEON, RAWToYRow_NEON, 0, 3, 1, 15) #endif #ifdef HAS_RAWTOYJROW_AVX2 ANY11(RAWToYJRow_Any_AVX2, RAWToYJRow_AVX2, 0, 3, 1, 31) @@ -1059,13 +1115,16 @@ ANY11(RAWToYJRow_Any_AVX2, RAWToYJRow_AVX2, 0, 3, 1, 31) ANY11(RAWToYJRow_Any_SSSE3, RAWToYJRow_SSSE3, 0, 3, 1, 15) #endif #ifdef HAS_RAWTOYJROW_NEON -ANY11(RAWToYJRow_Any_NEON, RAWToYJRow_NEON, 0, 3, 1, 7) +ANY11(RAWToYJRow_Any_NEON, RAWToYJRow_NEON, 0, 3, 1, 15) #endif #ifdef HAS_RAWTOYROW_MSA ANY11(RAWToYRow_Any_MSA, RAWToYRow_MSA, 0, 3, 1, 15) #endif -#ifdef HAS_RAWTOYROW_MMI -ANY11(RAWToYRow_Any_MMI, RAWToYRow_MMI, 0, 3, 1, 7) +#ifdef HAS_RAWTOYROW_LSX +ANY11(RAWToYRow_Any_LSX, RAWToYRow_LSX, 0, 3, 1, 15) +#endif +#ifdef HAS_RAWTOYROW_LASX +ANY11(RAWToYRow_Any_LASX, RAWToYRow_LASX, 0, 3, 1, 31) #endif #ifdef HAS_RGB565TOYROW_NEON ANY11(RGB565ToYRow_Any_NEON, RGB565ToYRow_NEON, 0, 2, 1, 7) @@ -1073,8 +1132,11 @@ ANY11(RGB565ToYRow_Any_NEON, RGB565ToYRow_NEON, 0, 2, 1, 7) #ifdef HAS_RGB565TOYROW_MSA ANY11(RGB565ToYRow_Any_MSA, RGB565ToYRow_MSA, 0, 2, 1, 15) #endif -#ifdef HAS_RGB565TOYROW_MMI -ANY11(RGB565ToYRow_Any_MMI, RGB565ToYRow_MMI, 0, 2, 1, 7) +#ifdef HAS_RGB565TOYROW_LSX +ANY11(RGB565ToYRow_Any_LSX, RGB565ToYRow_LSX, 0, 2, 1, 15) +#endif +#ifdef HAS_RGB565TOYROW_LASX +ANY11(RGB565ToYRow_Any_LASX, RGB565ToYRow_LASX, 0, 2, 1, 31) #endif #ifdef HAS_ARGB1555TOYROW_NEON ANY11(ARGB1555ToYRow_Any_NEON, ARGB1555ToYRow_NEON, 0, 2, 1, 7) @@ -1082,15 +1144,15 @@ ANY11(ARGB1555ToYRow_Any_NEON, ARGB1555ToYRow_NEON, 0, 2, 1, 7) #ifdef HAS_ARGB1555TOYROW_MSA ANY11(ARGB1555ToYRow_Any_MSA, ARGB1555ToYRow_MSA, 0, 2, 1, 15) #endif -#ifdef HAS_ARGB1555TOYROW_MMI -ANY11(ARGB1555ToYRow_Any_MMI, ARGB1555ToYRow_MMI, 0, 2, 1, 7) +#ifdef HAS_ARGB1555TOYROW_LSX +ANY11(ARGB1555ToYRow_Any_LSX, ARGB1555ToYRow_LSX, 0, 2, 1, 15) +#endif +#ifdef HAS_ARGB1555TOYROW_LASX +ANY11(ARGB1555ToYRow_Any_LASX, ARGB1555ToYRow_LASX, 0, 2, 1, 31) #endif #ifdef HAS_ARGB4444TOYROW_NEON ANY11(ARGB4444ToYRow_Any_NEON, ARGB4444ToYRow_NEON, 0, 2, 1, 7) #endif -#ifdef HAS_ARGB4444TOYROW_MMI -ANY11(ARGB4444ToYRow_Any_MMI, ARGB4444ToYRow_MMI, 0, 2, 1, 7) -#endif #ifdef HAS_YUY2TOYROW_NEON ANY11(YUY2ToYRow_Any_NEON, YUY2ToYRow_NEON, 1, 4, 1, 15) #endif @@ -1100,14 +1162,14 @@ ANY11(UYVYToYRow_Any_NEON, UYVYToYRow_NEON, 1, 4, 1, 15) #ifdef HAS_YUY2TOYROW_MSA ANY11(YUY2ToYRow_Any_MSA, YUY2ToYRow_MSA, 1, 4, 1, 31) #endif -#ifdef HAS_YUY2TOYROW_MMI -ANY11(YUY2ToYRow_Any_MMI, YUY2ToYRow_MMI, 1, 4, 1, 7) +#ifdef HAS_YUY2TOYROW_LASX +ANY11(YUY2ToYRow_Any_LASX, YUY2ToYRow_LASX, 1, 4, 1, 31) #endif #ifdef HAS_UYVYTOYROW_MSA ANY11(UYVYToYRow_Any_MSA, UYVYToYRow_MSA, 1, 4, 1, 31) #endif -#ifdef HAS_UYVYTOYROW_MMI -ANY11(UYVYToYRow_Any_MMI, UYVYToYRow_MMI, 1, 4, 1, 15) +#ifdef HAS_UYVYTOYROW_LASX +ANY11(UYVYToYRow_Any_LASX, UYVYToYRow_LASX, 1, 4, 1, 31) #endif #ifdef HAS_AYUVTOYROW_NEON ANY11(AYUVToYRow_Any_NEON, AYUVToYRow_NEON, 0, 4, 1, 15) @@ -1127,8 +1189,11 @@ ANY11(RGB24ToARGBRow_Any_NEON, RGB24ToARGBRow_NEON, 0, 3, 4, 7) #ifdef HAS_RGB24TOARGBROW_MSA ANY11(RGB24ToARGBRow_Any_MSA, RGB24ToARGBRow_MSA, 0, 3, 4, 15) #endif -#ifdef HAS_RGB24TOARGBROW_MMI -ANY11(RGB24ToARGBRow_Any_MMI, RGB24ToARGBRow_MMI, 0, 3, 4, 3) +#ifdef HAS_RGB24TOARGBROW_LSX +ANY11(RGB24ToARGBRow_Any_LSX, RGB24ToARGBRow_LSX, 0, 3, 4, 15) +#endif +#ifdef HAS_RGB24TOARGBROW_LASX +ANY11(RGB24ToARGBRow_Any_LASX, RGB24ToARGBRow_LASX, 0, 3, 4, 31) #endif #ifdef HAS_RAWTOARGBROW_NEON ANY11(RAWToARGBRow_Any_NEON, RAWToARGBRow_NEON, 0, 3, 4, 7) @@ -1139,8 +1204,11 @@ ANY11(RAWToRGBARow_Any_NEON, RAWToRGBARow_NEON, 0, 3, 4, 7) #ifdef HAS_RAWTOARGBROW_MSA ANY11(RAWToARGBRow_Any_MSA, RAWToARGBRow_MSA, 0, 3, 4, 15) #endif -#ifdef HAS_RAWTOARGBROW_MMI -ANY11(RAWToARGBRow_Any_MMI, RAWToARGBRow_MMI, 0, 3, 4, 3) +#ifdef HAS_RAWTOARGBROW_LSX +ANY11(RAWToARGBRow_Any_LSX, RAWToARGBRow_LSX, 0, 3, 4, 15) +#endif +#ifdef HAS_RAWTOARGBROW_LASX +ANY11(RAWToARGBRow_Any_LASX, RAWToARGBRow_LASX, 0, 3, 4, 31) #endif #ifdef HAS_RGB565TOARGBROW_NEON ANY11(RGB565ToARGBRow_Any_NEON, RGB565ToARGBRow_NEON, 0, 2, 4, 7) @@ -1148,8 +1216,11 @@ ANY11(RGB565ToARGBRow_Any_NEON, RGB565ToARGBRow_NEON, 0, 2, 4, 7) #ifdef HAS_RGB565TOARGBROW_MSA ANY11(RGB565ToARGBRow_Any_MSA, RGB565ToARGBRow_MSA, 0, 2, 4, 15) #endif -#ifdef HAS_RGB565TOARGBROW_MMI -ANY11(RGB565ToARGBRow_Any_MMI, RGB565ToARGBRow_MMI, 0, 2, 4, 3) +#ifdef HAS_RGB565TOARGBROW_LSX +ANY11(RGB565ToARGBRow_Any_LSX, RGB565ToARGBRow_LSX, 0, 2, 4, 15) +#endif +#ifdef HAS_RGB565TOARGBROW_LASX +ANY11(RGB565ToARGBRow_Any_LASX, RGB565ToARGBRow_LASX, 0, 2, 4, 31) #endif #ifdef HAS_ARGB1555TOARGBROW_NEON ANY11(ARGB1555ToARGBRow_Any_NEON, ARGB1555ToARGBRow_NEON, 0, 2, 4, 7) @@ -1157,8 +1228,11 @@ ANY11(ARGB1555ToARGBRow_Any_NEON, ARGB1555ToARGBRow_NEON, 0, 2, 4, 7) #ifdef HAS_ARGB1555TOARGBROW_MSA ANY11(ARGB1555ToARGBRow_Any_MSA, ARGB1555ToARGBRow_MSA, 0, 2, 4, 15) #endif -#ifdef HAS_ARGB1555TOARGBROW_MMI -ANY11(ARGB1555ToARGBRow_Any_MMI, ARGB1555ToARGBRow_MMI, 0, 2, 4, 3) +#ifdef HAS_ARGB1555TOARGBROW_LSX +ANY11(ARGB1555ToARGBRow_Any_LSX, ARGB1555ToARGBRow_LSX, 0, 2, 4, 15) +#endif +#ifdef HAS_ARGB1555TOARGBROW_LASX +ANY11(ARGB1555ToARGBRow_Any_LASX, ARGB1555ToARGBRow_LASX, 0, 2, 4, 31) #endif #ifdef HAS_ARGB4444TOARGBROW_NEON ANY11(ARGB4444ToARGBRow_Any_NEON, ARGB4444ToARGBRow_NEON, 0, 2, 4, 7) @@ -1166,8 +1240,11 @@ ANY11(ARGB4444ToARGBRow_Any_NEON, ARGB4444ToARGBRow_NEON, 0, 2, 4, 7) #ifdef HAS_ARGB4444TOARGBROW_MSA ANY11(ARGB4444ToARGBRow_Any_MSA, ARGB4444ToARGBRow_MSA, 0, 2, 4, 15) #endif -#ifdef HAS_ARGB4444TOARGBROW_MMI -ANY11(ARGB4444ToARGBRow_Any_MMI, ARGB4444ToARGBRow_MMI, 0, 2, 4, 3) +#ifdef HAS_ARGB4444TOARGBROW_LSX +ANY11(ARGB4444ToARGBRow_Any_LSX, ARGB4444ToARGBRow_LSX, 0, 2, 4, 15) +#endif +#ifdef HAS_ARGB4444TOARGBROW_LASX +ANY11(ARGB4444ToARGBRow_Any_LASX, ARGB4444ToARGBRow_LASX, 0, 2, 4, 31) #endif #ifdef HAS_ARGBATTENUATEROW_SSSE3 ANY11(ARGBAttenuateRow_Any_SSSE3, ARGBAttenuateRow_SSSE3, 0, 4, 4, 3) @@ -1187,8 +1264,8 @@ ANY11(ARGBAttenuateRow_Any_NEON, ARGBAttenuateRow_NEON, 0, 4, 4, 7) #ifdef HAS_ARGBATTENUATEROW_MSA ANY11(ARGBAttenuateRow_Any_MSA, ARGBAttenuateRow_MSA, 0, 4, 4, 7) #endif -#ifdef HAS_ARGBATTENUATEROW_MMI -ANY11(ARGBAttenuateRow_Any_MMI, ARGBAttenuateRow_MMI, 0, 4, 4, 1) +#ifdef HAS_ARGBATTENUATEROW_LASX +ANY11(ARGBAttenuateRow_Any_LASX, ARGBAttenuateRow_LASX, 0, 4, 4, 15) #endif #ifdef HAS_ARGBEXTRACTALPHAROW_SSE2 ANY11(ARGBExtractAlphaRow_Any_SSE2, ARGBExtractAlphaRow_SSE2, 0, 4, 1, 7) @@ -1202,8 +1279,8 @@ ANY11(ARGBExtractAlphaRow_Any_NEON, ARGBExtractAlphaRow_NEON, 0, 4, 1, 15) #ifdef HAS_ARGBEXTRACTALPHAROW_MSA ANY11(ARGBExtractAlphaRow_Any_MSA, ARGBExtractAlphaRow_MSA, 0, 4, 1, 15) #endif -#ifdef HAS_ARGBEXTRACTALPHAROW_MMI -ANY11(ARGBExtractAlphaRow_Any_MMI, ARGBExtractAlphaRow_MMI, 0, 4, 1, 7) +#ifdef HAS_ARGBEXTRACTALPHAROW_LSX +ANY11(ARGBExtractAlphaRow_Any_LSX, ARGBExtractAlphaRow_LSX, 0, 4, 1, 15) #endif #undef ANY11 @@ -1229,18 +1306,12 @@ ANY11B(ARGBCopyAlphaRow_Any_AVX2, ARGBCopyAlphaRow_AVX2, 0, 4, 4, 15) #ifdef HAS_ARGBCOPYALPHAROW_SSE2 ANY11B(ARGBCopyAlphaRow_Any_SSE2, ARGBCopyAlphaRow_SSE2, 0, 4, 4, 7) #endif -#ifdef HAS_ARGBCOPYALPHAROW_MMI -ANY11B(ARGBCopyAlphaRow_Any_MMI, ARGBCopyAlphaRow_MMI, 0, 4, 4, 1) -#endif #ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2 ANY11B(ARGBCopyYToAlphaRow_Any_AVX2, ARGBCopyYToAlphaRow_AVX2, 0, 1, 4, 15) #endif #ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2 ANY11B(ARGBCopyYToAlphaRow_Any_SSE2, ARGBCopyYToAlphaRow_SSE2, 0, 1, 4, 7) #endif -#ifdef HAS_ARGBCOPYYTOALPHAROW_MMI -ANY11B(ARGBCopyYToAlphaRow_Any_MMI, ARGBCopyYToAlphaRow_MMI, 0, 1, 4, 7) -#endif #undef ANY11B // Any 1 to 1 with parameter. @@ -1290,13 +1361,13 @@ ANY11P(I400ToARGBRow_Any_MSA, 4, 15) #endif -#if defined(HAS_I400TOARGBROW_MMI) -ANY11P(I400ToARGBRow_Any_MMI, - I400ToARGBRow_MMI, +#if defined(HAS_I400TOARGBROW_LSX) +ANY11P(I400ToARGBRow_Any_LSX, + I400ToARGBRow_LSX, const struct YuvConstants*, 1, 4, - 7) + 15) #endif #if defined(HAS_ARGBTORGB565DITHERROW_SSE2) @@ -1331,13 +1402,13 @@ ANY11P(ARGBToRGB565DitherRow_Any_MSA, 2, 7) #endif -#if defined(HAS_ARGBTORGB565DITHERROW_MMI) -ANY11P(ARGBToRGB565DitherRow_Any_MMI, - ARGBToRGB565DitherRow_MMI, +#if defined(HAS_ARGBTORGB565DITHERROW_LASX) +ANY11P(ARGBToRGB565DitherRow_Any_LASX, + ARGBToRGB565DitherRow_LASX, const uint32_t, 4, 2, - 3) + 15) #endif #ifdef HAS_ARGBSHUFFLEROW_SSSE3 ANY11P(ARGBShuffleRow_Any_SSSE3, ARGBShuffleRow_SSSE3, const uint8_t*, 4, 4, 7) @@ -1351,8 +1422,8 @@ ANY11P(ARGBShuffleRow_Any_NEON, ARGBShuffleRow_NEON, const uint8_t*, 4, 4, 3) #ifdef HAS_ARGBSHUFFLEROW_MSA ANY11P(ARGBShuffleRow_Any_MSA, ARGBShuffleRow_MSA, const uint8_t*, 4, 4, 7) #endif -#ifdef HAS_ARGBSHUFFLEROW_MMI -ANY11P(ARGBShuffleRow_Any_MMI, ARGBShuffleRow_MMI, const uint8_t*, 4, 4, 1) +#ifdef HAS_ARGBSHUFFLEROW_LASX +ANY11P(ARGBShuffleRow_Any_LASX, ARGBShuffleRow_LASX, const uint8_t*, 4, 4, 15) #endif #undef ANY11P #undef ANY11P @@ -1457,6 +1528,15 @@ ANY11C(Convert16To8Row_Any_AVX2, uint8_t, 31) #endif +#ifdef HAS_CONVERT16TO8ROW_NEON +ANY11C(Convert16To8Row_Any_NEON, + Convert16To8Row_NEON, + 2, + 1, + uint16_t, + uint8_t, + 15) +#endif #ifdef HAS_CONVERT8TO16ROW_SSE2 ANY11C(Convert8To16Row_Any_SSE2, Convert8To16Row_SSE2, @@ -1549,6 +1629,9 @@ ANY11P16(HalfFloatRow_Any_MSA, HalfFloatRow_MSA, uint16_t, uint16_t, 2, 2, 31) #ifdef HAS_BYTETOFLOATROW_NEON ANY11P16(ByteToFloatRow_Any_NEON, ByteToFloatRow_NEON, uint8_t, float, 1, 3, 7) #endif +#ifdef HAS_HALFFLOATROW_LSX +ANY11P16(HalfFloatRow_Any_LSX, HalfFloatRow_LSX, uint16_t, uint16_t, 2, 2, 31) +#endif #undef ANY11P16 // Any 1 to 1 with yuvconstants @@ -1582,46 +1665,108 @@ ANY11C(UYVYToARGBRow_Any_NEON, UYVYToARGBRow_NEON, 1, 4, 4, 7) ANY11C(YUY2ToARGBRow_Any_MSA, YUY2ToARGBRow_MSA, 1, 4, 4, 7) ANY11C(UYVYToARGBRow_Any_MSA, UYVYToARGBRow_MSA, 1, 4, 4, 7) #endif -#if defined(HAS_YUY2TOARGBROW_MMI) -ANY11C(YUY2ToARGBRow_Any_MMI, YUY2ToARGBRow_MMI, 1, 4, 4, 7) -ANY11C(UYVYToARGBRow_Any_MMI, UYVYToARGBRow_MMI, 1, 4, 4, 7) +#if defined(HAS_YUY2TOARGBROW_LSX) +ANY11C(YUY2ToARGBRow_Any_LSX, YUY2ToARGBRow_LSX, 1, 4, 4, 7) +ANY11C(UYVYToARGBRow_Any_LSX, UYVYToARGBRow_LSX, 1, 4, 4, 7) #endif #undef ANY11C // Any 1 to 1 interpolate. Takes 2 rows of source via stride. -#define ANY11I(NAMEANY, ANY_SIMD, SBPP, BPP, MASK) \ - void NAMEANY(uint8_t* dst_ptr, const uint8_t* src_ptr, ptrdiff_t src_stride, \ - int width, int source_y_fraction) { \ - SIMD_ALIGNED(uint8_t temp[64 * 3]); \ - memset(temp, 0, 64 * 2); /* for msan */ \ - int r = width & MASK; \ - int n = width & ~MASK; \ - if (n > 0) { \ - ANY_SIMD(dst_ptr, src_ptr, src_stride, n, source_y_fraction); \ - } \ - memcpy(temp, src_ptr + n * SBPP, r * SBPP); \ - memcpy(temp + 64, src_ptr + src_stride + n * SBPP, r * SBPP); \ - ANY_SIMD(temp + 128, temp, 64, MASK + 1, source_y_fraction); \ - memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \ +#define ANY11I(NAMEANY, ANY_SIMD, TD, TS, SBPP, BPP, MASK) \ + void NAMEANY(TD* dst_ptr, const TS* src_ptr, ptrdiff_t src_stride, \ + int width, int source_y_fraction) { \ + SIMD_ALIGNED(TS temps[64 * 2]); \ + SIMD_ALIGNED(TD tempd[64]); \ + memset(temps, 0, sizeof(temps)); /* for msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(dst_ptr, src_ptr, src_stride, n, source_y_fraction); \ + } \ + memcpy(temps, src_ptr + n * SBPP, r * SBPP * sizeof(TS)); \ + if (source_y_fraction) { \ + memcpy(temps + 64, src_ptr + src_stride + n * SBPP, \ + r * SBPP * sizeof(TS)); \ + } \ + ANY_SIMD(tempd, temps, 64, MASK + 1, source_y_fraction); \ + memcpy(dst_ptr + n * BPP, tempd, r * BPP * sizeof(TD)); \ } #ifdef HAS_INTERPOLATEROW_AVX2 -ANY11I(InterpolateRow_Any_AVX2, InterpolateRow_AVX2, 1, 1, 31) +ANY11I(InterpolateRow_Any_AVX2, InterpolateRow_AVX2, uint8_t, uint8_t, 1, 1, 31) #endif #ifdef HAS_INTERPOLATEROW_SSSE3 -ANY11I(InterpolateRow_Any_SSSE3, InterpolateRow_SSSE3, 1, 1, 15) +ANY11I(InterpolateRow_Any_SSSE3, + InterpolateRow_SSSE3, + uint8_t, + uint8_t, + 1, + 1, + 15) #endif #ifdef HAS_INTERPOLATEROW_NEON -ANY11I(InterpolateRow_Any_NEON, InterpolateRow_NEON, 1, 1, 15) +ANY11I(InterpolateRow_Any_NEON, InterpolateRow_NEON, uint8_t, uint8_t, 1, 1, 15) #endif #ifdef HAS_INTERPOLATEROW_MSA -ANY11I(InterpolateRow_Any_MSA, InterpolateRow_MSA, 1, 1, 31) +ANY11I(InterpolateRow_Any_MSA, InterpolateRow_MSA, uint8_t, uint8_t, 1, 1, 31) #endif -#ifdef HAS_INTERPOLATEROW_MMI -ANY11I(InterpolateRow_Any_MMI, InterpolateRow_MMI, 1, 1, 7) +#ifdef HAS_INTERPOLATEROW_LSX +ANY11I(InterpolateRow_Any_LSX, InterpolateRow_LSX, uint8_t, uint8_t, 1, 1, 31) +#endif + +#ifdef HAS_INTERPOLATEROW_16_NEON +ANY11I(InterpolateRow_16_Any_NEON, + InterpolateRow_16_NEON, + uint16_t, + uint16_t, + 1, + 1, + 7) #endif #undef ANY11I +// Any 1 to 1 interpolate with scale param +#define ANY11IS(NAMEANY, ANY_SIMD, TD, TS, SBPP, BPP, MASK) \ + void NAMEANY(TD* dst_ptr, const TS* src_ptr, ptrdiff_t src_stride, \ + int scale, int width, int source_y_fraction) { \ + SIMD_ALIGNED(TS temps[64 * 2]); \ + SIMD_ALIGNED(TD tempd[64]); \ + memset(temps, 0, sizeof(temps)); /* for msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(dst_ptr, src_ptr, src_stride, scale, n, source_y_fraction); \ + } \ + memcpy(temps, src_ptr + n * SBPP, r * SBPP * sizeof(TS)); \ + if (source_y_fraction) { \ + memcpy(temps + 64, src_ptr + src_stride + n * SBPP, \ + r * SBPP * sizeof(TS)); \ + } \ + ANY_SIMD(tempd, temps, 64, scale, MASK + 1, source_y_fraction); \ + memcpy(dst_ptr + n * BPP, tempd, r * BPP * sizeof(TD)); \ + } + +#ifdef HAS_INTERPOLATEROW_16TO8_NEON +ANY11IS(InterpolateRow_16To8_Any_NEON, + InterpolateRow_16To8_NEON, + uint8_t, + uint16_t, + 1, + 1, + 7) +#endif +#ifdef HAS_INTERPOLATEROW_16TO8_AVX2 +ANY11IS(InterpolateRow_16To8_Any_AVX2, + InterpolateRow_16To8_AVX2, + uint8_t, + uint16_t, + 1, + 1, + 31) +#endif + +#undef ANY11IS + // Any 1 to 1 mirror. #define ANY11M(NAMEANY, ANY_SIMD, BPP, MASK) \ void NAMEANY(const uint8_t* src_ptr, uint8_t* dst_ptr, int width) { \ @@ -1649,8 +1794,8 @@ ANY11M(MirrorRow_Any_NEON, MirrorRow_NEON, 1, 31) #ifdef HAS_MIRRORROW_MSA ANY11M(MirrorRow_Any_MSA, MirrorRow_MSA, 1, 63) #endif -#ifdef HAS_MIRRORROW_MMI -ANY11M(MirrorRow_Any_MMI, MirrorRow_MMI, 1, 7) +#ifdef HAS_MIRRORROW_LASX +ANY11M(MirrorRow_Any_LASX, MirrorRow_LASX, 1, 63) #endif #ifdef HAS_MIRRORUVROW_AVX2 ANY11M(MirrorUVRow_Any_AVX2, MirrorUVRow_AVX2, 2, 15) @@ -1664,6 +1809,9 @@ ANY11M(MirrorUVRow_Any_NEON, MirrorUVRow_NEON, 2, 31) #ifdef HAS_MIRRORUVROW_MSA ANY11M(MirrorUVRow_Any_MSA, MirrorUVRow_MSA, 2, 7) #endif +#ifdef HAS_MIRRORUVROW_LASX +ANY11M(MirrorUVRow_Any_LASX, MirrorUVRow_LASX, 2, 15) +#endif #ifdef HAS_ARGBMIRRORROW_AVX2 ANY11M(ARGBMirrorRow_Any_AVX2, ARGBMirrorRow_AVX2, 4, 7) #endif @@ -1676,8 +1824,8 @@ ANY11M(ARGBMirrorRow_Any_NEON, ARGBMirrorRow_NEON, 4, 7) #ifdef HAS_ARGBMIRRORROW_MSA ANY11M(ARGBMirrorRow_Any_MSA, ARGBMirrorRow_MSA, 4, 15) #endif -#ifdef HAS_ARGBMIRRORROW_MMI -ANY11M(ARGBMirrorRow_Any_MMI, ARGBMirrorRow_MMI, 4, 1) +#ifdef HAS_ARGBMIRRORROW_LASX +ANY11M(ARGBMirrorRow_Any_LASX, ARGBMirrorRow_LASX, 4, 15) #endif #ifdef HAS_RGB24MIRRORROW_SSSE3 ANY11M(RGB24MirrorRow_Any_SSSE3, RGB24MirrorRow_SSSE3, 3, 15) @@ -1707,14 +1855,17 @@ ANY1(SetRow_Any_X86, SetRow_X86, uint8_t, 1, 3) #ifdef HAS_SETROW_NEON ANY1(SetRow_Any_NEON, SetRow_NEON, uint8_t, 1, 15) #endif +#ifdef HAS_SETROW_LSX +ANY1(SetRow_Any_LSX, SetRow_LSX, uint8_t, 1, 15) +#endif #ifdef HAS_ARGBSETROW_NEON ANY1(ARGBSetRow_Any_NEON, ARGBSetRow_NEON, uint32_t, 4, 3) #endif #ifdef HAS_ARGBSETROW_MSA ANY1(ARGBSetRow_Any_MSA, ARGBSetRow_MSA, uint32_t, 4, 3) #endif -#ifdef HAS_ARGBSETROW_MMI -ANY1(ARGBSetRow_Any_MMI, ARGBSetRow_MMI, uint32_t, 4, 3) +#ifdef HAS_ARGBSETROW_LSX +ANY1(ARGBSetRow_Any_LSX, ARGBSetRow_LSX, uint32_t, 4, 3) #endif #undef ANY1 @@ -1747,8 +1898,8 @@ ANY12(SplitUVRow_Any_NEON, SplitUVRow_NEON, 0, 2, 0, 15) #ifdef HAS_SPLITUVROW_MSA ANY12(SplitUVRow_Any_MSA, SplitUVRow_MSA, 0, 2, 0, 31) #endif -#ifdef HAS_SPLITUVROW_MMI -ANY12(SplitUVRow_Any_MMI, SplitUVRow_MMI, 0, 2, 0, 7) +#ifdef HAS_SPLITUVROW_LSX +ANY12(SplitUVRow_Any_LSX, SplitUVRow_LSX, 0, 2, 0, 31) #endif #ifdef HAS_ARGBTOUV444ROW_SSSE3 ANY12(ARGBToUV444Row_Any_SSSE3, ARGBToUV444Row_SSSE3, 0, 4, 0, 15) @@ -1771,10 +1922,10 @@ ANY12(ARGBToUV444Row_Any_MSA, ARGBToUV444Row_MSA, 0, 4, 0, 15) ANY12(YUY2ToUV422Row_Any_MSA, YUY2ToUV422Row_MSA, 1, 4, 1, 31) ANY12(UYVYToUV422Row_Any_MSA, UYVYToUV422Row_MSA, 1, 4, 1, 31) #endif -#ifdef HAS_YUY2TOUV422ROW_MMI -ANY12(ARGBToUV444Row_Any_MMI, ARGBToUV444Row_MMI, 0, 4, 0, 7) -ANY12(UYVYToUV422Row_Any_MMI, UYVYToUV422Row_MMI, 1, 4, 1, 15) -ANY12(YUY2ToUV422Row_Any_MMI, YUY2ToUV422Row_MMI, 1, 4, 1, 15) +#ifdef HAS_YUY2TOUV422ROW_LASX +ANY12(ARGBToUV444Row_Any_LASX, ARGBToUV444Row_LASX, 0, 4, 0, 31) +ANY12(YUY2ToUV422Row_Any_LASX, YUY2ToUV422Row_LASX, 1, 4, 1, 31) +ANY12(UYVYToUV422Row_Any_LASX, UYVYToUV422Row_LASX, 1, 4, 1, 31) #endif #undef ANY12 @@ -1828,9 +1979,6 @@ ANY13(SplitRGBRow_Any_SSSE3, SplitRGBRow_SSSE3, 3, 15) #ifdef HAS_SPLITRGBROW_NEON ANY13(SplitRGBRow_Any_NEON, SplitRGBRow_NEON, 3, 15) #endif -#ifdef HAS_SPLITRGBROW_MMI -ANY13(SplitRGBRow_Any_MMI, SplitRGBRow_MMI, 3, 3) -#endif #ifdef HAS_SPLITXRGBROW_SSE2 ANY13(SplitXRGBRow_Any_SSE2, SplitXRGBRow_SSE2, 4, 7) #endif @@ -1912,9 +2060,17 @@ ANY12S(ABGRToUVRow_Any_AVX2, ABGRToUVRow_AVX2, 0, 4, 31) #ifdef HAS_ARGBTOUVJROW_AVX2 ANY12S(ARGBToUVJRow_Any_AVX2, ARGBToUVJRow_AVX2, 0, 4, 31) #endif +#ifdef HAS_ABGRTOUVJROW_AVX2 +ANY12S(ABGRToUVJRow_Any_AVX2, ABGRToUVJRow_AVX2, 0, 4, 31) +#endif +#ifdef HAS_ARGBTOUVJROW_SSSE3 +ANY12S(ARGBToUVJRow_Any_SSSE3, ARGBToUVJRow_SSSE3, 0, 4, 15) +#endif +#ifdef HAS_ABGRTOUVJROW_SSSE3 +ANY12S(ABGRToUVJRow_Any_SSSE3, ABGRToUVJRow_SSSE3, 0, 4, 15) +#endif #ifdef HAS_ARGBTOUVROW_SSSE3 ANY12S(ARGBToUVRow_Any_SSSE3, ARGBToUVRow_SSSE3, 0, 4, 15) -ANY12S(ARGBToUVJRow_Any_SSSE3, ARGBToUVJRow_SSSE3, 0, 4, 15) ANY12S(BGRAToUVRow_Any_SSSE3, BGRAToUVRow_SSSE3, 0, 4, 15) ANY12S(ABGRToUVRow_Any_SSSE3, ABGRToUVRow_SSSE3, 0, 4, 15) ANY12S(RGBAToUVRow_Any_SSSE3, RGBAToUVRow_SSSE3, 0, 4, 15) @@ -1933,17 +2089,23 @@ ANY12S(ARGBToUVRow_Any_NEON, ARGBToUVRow_NEON, 0, 4, 15) #ifdef HAS_ARGBTOUVROW_MSA ANY12S(ARGBToUVRow_Any_MSA, ARGBToUVRow_MSA, 0, 4, 31) #endif -#ifdef HAS_ARGBTOUVROW_MMI -ANY12S(ARGBToUVRow_Any_MMI, ARGBToUVRow_MMI, 0, 4, 15) +#ifdef HAS_ARGBTOUVROW_LASX +ANY12S(ARGBToUVRow_Any_LASX, ARGBToUVRow_LASX, 0, 4, 31) #endif #ifdef HAS_ARGBTOUVJROW_NEON ANY12S(ARGBToUVJRow_Any_NEON, ARGBToUVJRow_NEON, 0, 4, 15) #endif +#ifdef HAS_ABGRTOUVJROW_NEON +ANY12S(ABGRToUVJRow_Any_NEON, ABGRToUVJRow_NEON, 0, 4, 15) +#endif #ifdef HAS_ARGBTOUVJROW_MSA ANY12S(ARGBToUVJRow_Any_MSA, ARGBToUVJRow_MSA, 0, 4, 31) #endif -#ifdef HAS_ARGBTOUVJROW_MMI -ANY12S(ARGBToUVJRow_Any_MMI, ARGBToUVJRow_MMI, 0, 4, 15) +#ifdef HAS_ARGBTOUVJROW_LSX +ANY12S(ARGBToUVJRow_Any_LSX, ARGBToUVJRow_LSX, 0, 4, 15) +#endif +#ifdef HAS_ARGBTOUVJROW_LASX +ANY12S(ARGBToUVJRow_Any_LASX, ARGBToUVJRow_LASX, 0, 4, 31) #endif #ifdef HAS_BGRATOUVROW_NEON ANY12S(BGRAToUVRow_Any_NEON, BGRAToUVRow_NEON, 0, 4, 15) @@ -1951,8 +2113,8 @@ ANY12S(BGRAToUVRow_Any_NEON, BGRAToUVRow_NEON, 0, 4, 15) #ifdef HAS_BGRATOUVROW_MSA ANY12S(BGRAToUVRow_Any_MSA, BGRAToUVRow_MSA, 0, 4, 15) #endif -#ifdef HAS_BGRATOUVROW_MMI -ANY12S(BGRAToUVRow_Any_MMI, BGRAToUVRow_MMI, 0, 4, 15) +#ifdef HAS_BGRATOUVROW_LSX +ANY12S(BGRAToUVRow_Any_LSX, BGRAToUVRow_LSX, 0, 4, 15) #endif #ifdef HAS_ABGRTOUVROW_NEON ANY12S(ABGRToUVRow_Any_NEON, ABGRToUVRow_NEON, 0, 4, 15) @@ -1960,8 +2122,8 @@ ANY12S(ABGRToUVRow_Any_NEON, ABGRToUVRow_NEON, 0, 4, 15) #ifdef HAS_ABGRTOUVROW_MSA ANY12S(ABGRToUVRow_Any_MSA, ABGRToUVRow_MSA, 0, 4, 15) #endif -#ifdef HAS_ABGRTOUVROW_MMI -ANY12S(ABGRToUVRow_Any_MMI, ABGRToUVRow_MMI, 0, 4, 15) +#ifdef HAS_ABGRTOUVROW_LSX +ANY12S(ABGRToUVRow_Any_LSX, ABGRToUVRow_LSX, 0, 4, 15) #endif #ifdef HAS_RGBATOUVROW_NEON ANY12S(RGBAToUVRow_Any_NEON, RGBAToUVRow_NEON, 0, 4, 15) @@ -1969,26 +2131,38 @@ ANY12S(RGBAToUVRow_Any_NEON, RGBAToUVRow_NEON, 0, 4, 15) #ifdef HAS_RGBATOUVROW_MSA ANY12S(RGBAToUVRow_Any_MSA, RGBAToUVRow_MSA, 0, 4, 15) #endif -#ifdef HAS_RGBATOUVROW_MMI -ANY12S(RGBAToUVRow_Any_MMI, RGBAToUVRow_MMI, 0, 4, 15) +#ifdef HAS_RGBATOUVROW_LSX +ANY12S(RGBAToUVRow_Any_LSX, RGBAToUVRow_LSX, 0, 4, 15) #endif #ifdef HAS_RGB24TOUVROW_NEON ANY12S(RGB24ToUVRow_Any_NEON, RGB24ToUVRow_NEON, 0, 3, 15) #endif +#ifdef HAS_RGB24TOUVJROW_NEON +ANY12S(RGB24ToUVJRow_Any_NEON, RGB24ToUVJRow_NEON, 0, 3, 15) +#endif #ifdef HAS_RGB24TOUVROW_MSA ANY12S(RGB24ToUVRow_Any_MSA, RGB24ToUVRow_MSA, 0, 3, 15) #endif -#ifdef HAS_RGB24TOUVROW_MMI -ANY12S(RGB24ToUVRow_Any_MMI, RGB24ToUVRow_MMI, 0, 3, 15) +#ifdef HAS_RGB24TOUVROW_LSX +ANY12S(RGB24ToUVRow_Any_LSX, RGB24ToUVRow_LSX, 0, 3, 15) +#endif +#ifdef HAS_RGB24TOUVROW_LASX +ANY12S(RGB24ToUVRow_Any_LASX, RGB24ToUVRow_LASX, 0, 3, 31) #endif #ifdef HAS_RAWTOUVROW_NEON ANY12S(RAWToUVRow_Any_NEON, RAWToUVRow_NEON, 0, 3, 15) #endif +#ifdef HAS_RAWTOUVJROW_NEON +ANY12S(RAWToUVJRow_Any_NEON, RAWToUVJRow_NEON, 0, 3, 15) +#endif #ifdef HAS_RAWTOUVROW_MSA ANY12S(RAWToUVRow_Any_MSA, RAWToUVRow_MSA, 0, 3, 15) #endif -#ifdef HAS_RAWTOUVROW_MMI -ANY12S(RAWToUVRow_Any_MMI, RAWToUVRow_MMI, 0, 3, 15) +#ifdef HAS_RAWTOUVROW_LSX +ANY12S(RAWToUVRow_Any_LSX, RAWToUVRow_LSX, 0, 3, 15) +#endif +#ifdef HAS_RAWTOUVROW_LASX +ANY12S(RAWToUVRow_Any_LASX, RAWToUVRow_LASX, 0, 3, 31) #endif #ifdef HAS_RGB565TOUVROW_NEON ANY12S(RGB565ToUVRow_Any_NEON, RGB565ToUVRow_NEON, 0, 2, 15) @@ -1996,8 +2170,11 @@ ANY12S(RGB565ToUVRow_Any_NEON, RGB565ToUVRow_NEON, 0, 2, 15) #ifdef HAS_RGB565TOUVROW_MSA ANY12S(RGB565ToUVRow_Any_MSA, RGB565ToUVRow_MSA, 0, 2, 15) #endif -#ifdef HAS_RGB565TOUVROW_MMI -ANY12S(RGB565ToUVRow_Any_MMI, RGB565ToUVRow_MMI, 0, 2, 15) +#ifdef HAS_RGB565TOUVROW_LSX +ANY12S(RGB565ToUVRow_Any_LSX, RGB565ToUVRow_LSX, 0, 2, 15) +#endif +#ifdef HAS_RGB565TOUVROW_LASX +ANY12S(RGB565ToUVRow_Any_LASX, RGB565ToUVRow_LASX, 0, 2, 31) #endif #ifdef HAS_ARGB1555TOUVROW_NEON ANY12S(ARGB1555ToUVRow_Any_NEON, ARGB1555ToUVRow_NEON, 0, 2, 15) @@ -2005,15 +2182,15 @@ ANY12S(ARGB1555ToUVRow_Any_NEON, ARGB1555ToUVRow_NEON, 0, 2, 15) #ifdef HAS_ARGB1555TOUVROW_MSA ANY12S(ARGB1555ToUVRow_Any_MSA, ARGB1555ToUVRow_MSA, 0, 2, 15) #endif -#ifdef HAS_ARGB1555TOUVROW_MMI -ANY12S(ARGB1555ToUVRow_Any_MMI, ARGB1555ToUVRow_MMI, 0, 2, 15) +#ifdef HAS_ARGB1555TOUVROW_LSX +ANY12S(ARGB1555ToUVRow_Any_LSX, ARGB1555ToUVRow_LSX, 0, 2, 15) +#endif +#ifdef HAS_ARGB1555TOUVROW_LASX +ANY12S(ARGB1555ToUVRow_Any_LASX, ARGB1555ToUVRow_LASX, 0, 2, 31) #endif #ifdef HAS_ARGB4444TOUVROW_NEON ANY12S(ARGB4444ToUVRow_Any_NEON, ARGB4444ToUVRow_NEON, 0, 2, 15) #endif -#ifdef HAS_ARGB4444TOUVROW_MMI -ANY12S(ARGB4444ToUVRow_Any_MMI, ARGB4444ToUVRow_MMI, 0, 2, 15) -#endif #ifdef HAS_YUY2TOUVROW_NEON ANY12S(YUY2ToUVRow_Any_NEON, YUY2ToUVRow_NEON, 1, 4, 15) #endif @@ -2023,14 +2200,14 @@ ANY12S(UYVYToUVRow_Any_NEON, UYVYToUVRow_NEON, 1, 4, 15) #ifdef HAS_YUY2TOUVROW_MSA ANY12S(YUY2ToUVRow_Any_MSA, YUY2ToUVRow_MSA, 1, 4, 31) #endif -#ifdef HAS_YUY2TOUVROW_MMI -ANY12S(YUY2ToUVRow_Any_MMI, YUY2ToUVRow_MMI, 1, 4, 15) +#ifdef HAS_YUY2TOUVROW_LASX +ANY12S(YUY2ToUVRow_Any_LASX, YUY2ToUVRow_LASX, 1, 4, 31) #endif #ifdef HAS_UYVYTOUVROW_MSA ANY12S(UYVYToUVRow_Any_MSA, UYVYToUVRow_MSA, 1, 4, 31) #endif -#ifdef HAS_UYVYTOUVROW_MMI -ANY12S(UYVYToUVRow_Any_MMI, UYVYToUVRow_MMI, 1, 4, 15) +#ifdef HAS_UYVYTOUVROW_LASX +ANY12S(UYVYToUVRow_Any_LASX, UYVYToUVRow_LASX, 1, 4, 31) #endif #undef ANY12S @@ -2065,6 +2242,86 @@ ANY11S(AYUVToVURow_Any_NEON, AYUVToVURow_NEON, 0, 4, 15) #endif #undef ANY11S +#define ANYDETILE(NAMEANY, ANY_SIMD, T, BPP, MASK) \ + void NAMEANY(const T* src, ptrdiff_t src_tile_stride, T* dst, int width) { \ + SIMD_ALIGNED(T temp[16 * 2]); \ + memset(temp, 0, 16 * BPP); /* for msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(src, src_tile_stride, dst, n); \ + } \ + memcpy(temp, src + (n / 16) * src_tile_stride, r * BPP); \ + ANY_SIMD(temp, src_tile_stride, temp + 16, MASK + 1); \ + memcpy(dst + n, temp + 16, r * BPP); \ + } + +#ifdef HAS_DETILEROW_NEON +ANYDETILE(DetileRow_Any_NEON, DetileRow_NEON, uint8_t, 1, 15) +#endif +#ifdef HAS_DETILEROW_SSE2 +ANYDETILE(DetileRow_Any_SSE2, DetileRow_SSE2, uint8_t, 1, 15) +#endif +#ifdef HAS_DETILEROW_16_NEON +ANYDETILE(DetileRow_16_Any_NEON, DetileRow_16_NEON, uint16_t, 2, 15) +#endif +#ifdef HAS_DETILEROW_16_SSE2 +ANYDETILE(DetileRow_16_Any_SSE2, DetileRow_16_SSE2, uint16_t, 2, 15) +#endif +#ifdef HAS_DETILEROW_16_AVX +ANYDETILE(DetileRow_16_Any_AVX, DetileRow_16_AVX, uint16_t, 2, 15) +#endif + +#define ANYDETILESPLITUV(NAMEANY, ANY_SIMD, MASK) \ + void NAMEANY(const uint8_t* src_uv, ptrdiff_t src_tile_stride, \ + uint8_t* dst_u, uint8_t* dst_v, int width) { \ + SIMD_ALIGNED(uint8_t temp[16 * 2]); \ + memset(temp, 0, 16 * 2); /* for msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(src_uv, src_tile_stride, dst_u, dst_v, n); \ + } \ + memcpy(temp, src_uv + (n / 16) * src_tile_stride, r); \ + ANY_SIMD(temp, src_tile_stride, temp + 16, temp + 24, r); \ + memcpy(dst_u + n / 2, temp + 16, (r + 1) / 2); \ + memcpy(dst_v + n / 2, temp + 24, (r + 1) / 2); \ + } + +#ifdef HAS_DETILESPLITUVROW_NEON +ANYDETILESPLITUV(DetileSplitUVRow_Any_NEON, DetileSplitUVRow_NEON, 15) +#endif +#ifdef HAS_DETILESPLITUVROW_SSSE3 +ANYDETILESPLITUV(DetileSplitUVRow_Any_SSSE3, DetileSplitUVRow_SSSE3, 15) +#endif + +#define ANYDETILEMERGE(NAMEANY, ANY_SIMD, MASK) \ + void NAMEANY(const uint8_t* src_y, ptrdiff_t src_y_tile_stride, \ + const uint8_t* src_uv, ptrdiff_t src_uv_tile_stride, \ + uint8_t* dst_yuy2, int width) { \ + SIMD_ALIGNED(uint8_t temp[16 * 4]); \ + memset(temp, 0, 16 * 4); /* for msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(src_y, src_y_tile_stride, src_uv, src_uv_tile_stride, dst_yuy2, \ + n); \ + } \ + memcpy(temp, src_y + (n / 16) * src_y_tile_stride, r); \ + memcpy(temp + 16, src_uv + (n / 16) * src_uv_tile_stride, r); \ + ANY_SIMD(temp, src_y_tile_stride, temp + 16, src_uv_tile_stride, \ + temp + 32, r); \ + memcpy(dst_yuy2 + 2 * n, temp + 32, 2 * r); \ + } + +#ifdef HAS_DETILETOYUY2_NEON +ANYDETILEMERGE(DetileToYUY2_Any_NEON, DetileToYUY2_NEON, 15) +#endif + +#ifdef HAS_DETILETOYUY2_SSE2 +ANYDETILEMERGE(DetileToYUY2_Any_SSE2, DetileToYUY2_SSE2, 15) +#endif + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/source/row_common.cc b/third-party/libyuv/third_party/libyuv/source/row_common.cc index 517b70562f..5ee5b17f08 100644 --- a/third-party/libyuv/third_party/libyuv/source/row_common.cc +++ b/third-party/libyuv/third_party/libyuv/source/row_common.cc @@ -11,7 +11,6 @@ #include "libyuv/row.h" #include -#include #include // For memcpy and memset. #include "libyuv/basic_types.h" @@ -22,21 +21,31 @@ namespace libyuv { extern "C" { #endif -// This macro control YUV to RGB using unsigned math to extend range of +// This macro controls YUV to RGB using unsigned math to extend range of // YUV to RGB coefficients to 0 to 4 instead of 0 to 2 for more accuracy on B: // LIBYUV_UNLIMITED_DATA +// Macros to enable unlimited data for each colorspace +// LIBYUV_UNLIMITED_BT601 +// LIBYUV_UNLIMITED_BT709 +// LIBYUV_UNLIMITED_BT2020 + // The following macro from row_win makes the C code match the row_win code, // which is 7 bit fixed point for ARGBToI420: -#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && \ - !defined(__clang__) && (defined(_M_IX86) || defined(_M_X64)) +#if !defined(LIBYUV_BIT_EXACT) && !defined(LIBYUV_DISABLE_X86) && \ + defined(_MSC_VER) && !defined(__clang__) && \ + (defined(_M_IX86) || defined(_M_X64)) #define LIBYUV_RGB7 1 #endif -#if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \ - defined(_M_IX86) +#if !defined(LIBYUV_BIT_EXACT) && (defined(__x86_64__) || defined(_M_X64) || \ + defined(__i386__) || defined(_M_IX86)) #define LIBYUV_ARGBTOUV_PAVGB 1 #define LIBYUV_RGBTOU_TRUNCATE 1 +#define LIBYUV_ATTENUATE_DUP 1 +#endif +#if defined(LIBYUV_BIT_EXACT) +#define LIBYUV_UNATTENUATE_DUP 1 #endif // llvm x86 is poor at ternary operator, so use branchless min/max. @@ -337,8 +346,8 @@ void ARGBToRGB565DitherRow_C(const uint8_t* src_argb, uint8_t b1 = clamp255(src_argb[4] + dither1) >> 3; uint8_t g1 = clamp255(src_argb[5] + dither1) >> 2; uint8_t r1 = clamp255(src_argb[6] + dither1) >> 3; - WRITEWORD(dst_rgb, b0 | (g0 << 5) | (r0 << 11) | (b1 << 16) | (g1 << 21) | - (r1 << 27)); + *(uint16_t*)(dst_rgb + 0) = b0 | (g0 << 5) | (r0 << 11); + *(uint16_t*)(dst_rgb + 2) = b1 | (g1 << 5) | (r1 << 11); dst_rgb += 4; src_argb += 8; } @@ -362,8 +371,8 @@ void ARGBToARGB1555Row_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width) { uint8_t g1 = src_argb[5] >> 3; uint8_t r1 = src_argb[6] >> 3; uint8_t a1 = src_argb[7] >> 7; - *(uint32_t*)(dst_rgb) = b0 | (g0 << 5) | (r0 << 10) | (a0 << 15) | - (b1 << 16) | (g1 << 21) | (r1 << 26) | (a1 << 31); + *(uint16_t*)(dst_rgb + 0) = b0 | (g0 << 5) | (r0 << 10) | (a0 << 15); + *(uint16_t*)(dst_rgb + 2) = b1 | (g1 << 5) | (r1 << 10) | (a1 << 15); dst_rgb += 4; src_argb += 8; } @@ -387,8 +396,8 @@ void ARGBToARGB4444Row_C(const uint8_t* src_argb, uint8_t* dst_rgb, int width) { uint8_t g1 = src_argb[5] >> 4; uint8_t r1 = src_argb[6] >> 4; uint8_t a1 = src_argb[7] >> 4; - *(uint32_t*)(dst_rgb) = b0 | (g0 << 4) | (r0 << 8) | (a0 << 12) | - (b1 << 16) | (g1 << 20) | (r1 << 24) | (a1 << 28); + *(uint16_t*)(dst_rgb + 0) = b0 | (g0 << 4) | (r0 << 8) | (a0 << 12); + *(uint16_t*)(dst_rgb + 2) = b1 | (g1 << 4) | (r1 << 8) | (a1 << 12); dst_rgb += 4; src_argb += 8; } @@ -430,10 +439,14 @@ void ARGBToAR30Row_C(const uint8_t* src_argb, uint8_t* dst_ar30, int width) { void ARGBToAR64Row_C(const uint8_t* src_argb, uint16_t* dst_ar64, int width) { int x; for (x = 0; x < width; ++x) { - dst_ar64[0] = src_argb[0] * 0x0101; - dst_ar64[1] = src_argb[1] * 0x0101; - dst_ar64[2] = src_argb[2] * 0x0101; - dst_ar64[3] = src_argb[3] * 0x0101; + uint16_t b = src_argb[0] * 0x0101; + uint16_t g = src_argb[1] * 0x0101; + uint16_t r = src_argb[2] * 0x0101; + uint16_t a = src_argb[3] * 0x0101; + dst_ar64[0] = b; + dst_ar64[1] = g; + dst_ar64[2] = r; + dst_ar64[3] = a; dst_ar64 += 4; src_argb += 4; } @@ -442,10 +455,14 @@ void ARGBToAR64Row_C(const uint8_t* src_argb, uint16_t* dst_ar64, int width) { void ARGBToAB64Row_C(const uint8_t* src_argb, uint16_t* dst_ab64, int width) { int x; for (x = 0; x < width; ++x) { - dst_ab64[0] = src_argb[2] * 0x0101; - dst_ab64[1] = src_argb[1] * 0x0101; - dst_ab64[2] = src_argb[0] * 0x0101; - dst_ab64[3] = src_argb[3] * 0x0101; + uint16_t b = src_argb[0] * 0x0101; + uint16_t g = src_argb[1] * 0x0101; + uint16_t r = src_argb[2] * 0x0101; + uint16_t a = src_argb[3] * 0x0101; + dst_ab64[0] = r; + dst_ab64[1] = g; + dst_ab64[2] = b; + dst_ab64[3] = a; dst_ab64 += 4; src_argb += 4; } @@ -454,10 +471,14 @@ void ARGBToAB64Row_C(const uint8_t* src_argb, uint16_t* dst_ab64, int width) { void AR64ToARGBRow_C(const uint16_t* src_ar64, uint8_t* dst_argb, int width) { int x; for (x = 0; x < width; ++x) { - dst_argb[0] = src_ar64[0] >> 8; - dst_argb[1] = src_ar64[1] >> 8; - dst_argb[2] = src_ar64[2] >> 8; - dst_argb[3] = src_ar64[3] >> 8; + uint8_t b = src_ar64[0] >> 8; + uint8_t g = src_ar64[1] >> 8; + uint8_t r = src_ar64[2] >> 8; + uint8_t a = src_ar64[3] >> 8; + dst_argb[0] = b; + dst_argb[1] = g; + dst_argb[2] = r; + dst_argb[3] = a; dst_argb += 4; src_ar64 += 4; } @@ -466,10 +487,14 @@ void AR64ToARGBRow_C(const uint16_t* src_ar64, uint8_t* dst_argb, int width) { void AB64ToARGBRow_C(const uint16_t* src_ab64, uint8_t* dst_argb, int width) { int x; for (x = 0; x < width; ++x) { - dst_argb[0] = src_ab64[2] >> 8; - dst_argb[1] = src_ab64[1] >> 8; - dst_argb[2] = src_ab64[0] >> 8; - dst_argb[3] = src_ab64[3] >> 8; + uint8_t r = src_ab64[0] >> 8; + uint8_t g = src_ab64[1] >> 8; + uint8_t b = src_ab64[2] >> 8; + uint8_t a = src_ab64[3] >> 8; + dst_argb[0] = b; + dst_argb[1] = g; + dst_argb[2] = r; + dst_argb[3] = a; dst_argb += 4; src_ab64 += 4; } @@ -522,6 +547,7 @@ static __inline int RGBToY(uint8_t r, uint8_t g, uint8_t b) { #define AVGB(a, b) (((a) + (b) + 1) >> 1) +// LIBYUV_RGBTOU_TRUNCATE mimics x86 code that does not round. #ifdef LIBYUV_RGBTOU_TRUNCATE static __inline int RGBToU(uint8_t r, uint8_t g, uint8_t b) { return (112 * b - 74 * g - 38 * r + 0x8000) >> 8; @@ -530,7 +556,7 @@ static __inline int RGBToV(uint8_t r, uint8_t g, uint8_t b) { return (112 * r - 94 * g - 18 * b + 0x8000) >> 8; } #else -// TODO(fbarchard): Add rounding to SIMD and use this +// TODO(fbarchard): Add rounding to x86 SIMD and use this static __inline int RGBToU(uint8_t r, uint8_t g, uint8_t b) { return (112 * b - 74 * g - 38 * r + 0x8080) >> 8; } @@ -539,6 +565,7 @@ static __inline int RGBToV(uint8_t r, uint8_t g, uint8_t b) { } #endif +// LIBYUV_ARGBTOUV_PAVGB mimics x86 code that subsamples with 2 pavgb. #if !defined(LIBYUV_ARGBTOUV_PAVGB) static __inline int RGB2xToU(uint16_t r, uint16_t g, uint16_t b) { return ((112 / 2) * b - (74 / 2) * g - (38 / 2) * r + 0x8080) >> 8; @@ -551,7 +578,6 @@ static __inline int RGB2xToV(uint16_t r, uint16_t g, uint16_t b) { // ARGBToY_C and ARGBToUV_C // Intel version mimic SSE/AVX which does 2 pavgb #if LIBYUV_ARGBTOUV_PAVGB - #define MAKEROWY(NAME, R, G, B, BPP) \ void NAME##ToYRow_C(const uint8_t* src_rgb, uint8_t* dst_y, int width) { \ int x; \ @@ -772,6 +798,7 @@ static __inline int RGB2xToVJ(uint16_t r, uint16_t g, uint16_t b) { #endif MAKEROWYJ(ARGB, 2, 1, 0, 4) +MAKEROWYJ(ABGR, 0, 1, 2, 4) MAKEROWYJ(RGBA, 3, 2, 1, 4) MAKEROWYJ(RGB24, 2, 1, 0, 3) MAKEROWYJ(RAW, 0, 1, 2, 3) @@ -1457,7 +1484,7 @@ void J400ToARGBRow_C(const uint8_t* src_y, uint8_t* dst_argb, int width) { // KR = 0.299; KB = 0.114 // U and V contributions to R,G,B. -#ifdef LIBYUV_UNLIMITED_DATA +#if defined(LIBYUV_UNLIMITED_DATA) || defined(LIBYUV_UNLIMITED_BT601) #define UB 129 /* round(2.018 * 64) */ #else #define UB 128 /* max(128, round(2.018 * 64)) */ @@ -1511,7 +1538,7 @@ MAKEYUVCONSTANTS(JPEG, YG, YB, UB, UG, VG, VR) // KR = 0.2126, KB = 0.0722 // U and V contributions to R,G,B. -#ifdef LIBYUV_UNLIMITED_DATA +#if defined(LIBYUV_UNLIMITED_DATA) || defined(LIBYUV_UNLIMITED_BT709) #define UB 135 /* round(2.112 * 64) */ #else #define UB 128 /* max(128, round(2.112 * 64)) */ @@ -1565,7 +1592,7 @@ MAKEYUVCONSTANTS(F709, YG, YB, UB, UG, VG, VR) // KR = 0.2627; KB = 0.0593 // U and V contributions to R,G,B. -#ifdef LIBYUV_UNLIMITED_DATA +#if defined(LIBYUV_UNLIMITED_DATA) || defined(LIBYUV_UNLIMITED_BT2020) #define UB 137 /* round(2.142 * 64) */ #else #define UB 128 /* max(128, round(2.142 * 64)) */ @@ -1644,8 +1671,8 @@ MAKEYUVCONSTANTS(V2020, YG, YB, UB, UG, VG, VR) #define CALC_RGB16 \ int32_t y1 = ((uint32_t)(y32 * yg) >> 16) + yb; \ - int8_t ui = u; \ - int8_t vi = v; \ + int8_t ui = (int8_t)u; \ + int8_t vi = (int8_t)v; \ ui -= 0x80; \ vi -= 0x80; \ int b16 = y1 + (ui * ub); \ @@ -1696,7 +1723,7 @@ static __inline void YuvPixel10_16(uint16_t y, int* r, const struct YuvConstants* yuvconstants) { LOAD_YUV_CONSTANTS; - uint32_t y32 = y << 6; + uint32_t y32 = (y << 6) | (y >> 4); u = clamp255(u >> 2); v = clamp255(v >> 2); CALC_RGB16; @@ -1715,7 +1742,7 @@ static __inline void YuvPixel12_16(int16_t y, int* r, const struct YuvConstants* yuvconstants) { LOAD_YUV_CONSTANTS; - uint32_t y32 = y << 4; + uint32_t y32 = (y << 4) | (y >> 8); u = clamp255(u >> 4); v = clamp255(v >> 4); CALC_RGB16; @@ -1836,6 +1863,23 @@ void I444ToARGBRow_C(const uint8_t* src_y, } } +void I444ToRGB24Row_C(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* rgb_buf, + const struct YuvConstants* yuvconstants, + int width) { + int x; + for (x = 0; x < width; ++x) { + YuvPixel(src_y[0], src_u[0], src_v[0], rgb_buf + 0, rgb_buf + 1, + rgb_buf + 2, yuvconstants); + src_y += 1; + src_u += 1; + src_v += 1; + rgb_buf += 3; // Advance 1 pixel. + } +} + // Also used for 420 void I422ToARGBRow_C(const uint8_t* src_y, const uint8_t* src_u, @@ -2273,8 +2317,8 @@ void I422ToARGB4444Row_C(const uint8_t* src_y, b1 = b1 >> 4; g1 = g1 >> 4; r1 = r1 >> 4; - *(uint32_t*)(dst_argb4444) = b0 | (g0 << 4) | (r0 << 8) | (b1 << 16) | - (g1 << 20) | (r1 << 24) | 0xf000f000; + *(uint16_t*)(dst_argb4444 + 0) = b0 | (g0 << 4) | (r0 << 8) | 0xf000; + *(uint16_t*)(dst_argb4444 + 2) = b1 | (g1 << 4) | (r1 << 8) | 0xf000; src_y += 2; src_u += 1; src_v += 1; @@ -2311,8 +2355,8 @@ void I422ToARGB1555Row_C(const uint8_t* src_y, b1 = b1 >> 3; g1 = g1 >> 3; r1 = r1 >> 3; - *(uint32_t*)(dst_argb1555) = b0 | (g0 << 5) | (r0 << 10) | (b1 << 16) | - (g1 << 21) | (r1 << 26) | 0x80008000; + *(uint16_t*)(dst_argb1555 + 0) = b0 | (g0 << 5) | (r0 << 10) | 0x8000; + *(uint16_t*)(dst_argb1555 + 2) = b1 | (g1 << 5) | (r1 << 10) | 0x8000; src_y += 2; src_u += 1; src_v += 1; @@ -2349,8 +2393,8 @@ void I422ToRGB565Row_C(const uint8_t* src_y, b1 = b1 >> 3; g1 = g1 >> 2; r1 = r1 >> 3; - *(uint32_t*)(dst_rgb565) = - b0 | (g0 << 5) | (r0 << 11) | (b1 << 16) | (g1 << 21) | (r1 << 27); + *(uint16_t*)(dst_rgb565 + 0) = b0 | (g0 << 5) | (r0 << 11); // for ubsan + *(uint16_t*)(dst_rgb565 + 2) = b1 | (g1 << 5) | (r1 << 11); src_y += 2; src_u += 1; src_v += 1; @@ -2476,8 +2520,8 @@ void NV12ToRGB565Row_C(const uint8_t* src_y, b1 = b1 >> 3; g1 = g1 >> 2; r1 = r1 >> 3; - *(uint32_t*)(dst_rgb565) = - b0 | (g0 << 5) | (r0 << 11) | (b1 << 16) | (g1 << 21) | (r1 << 27); + *(uint16_t*)(dst_rgb565 + 0) = b0 | (g0 << 5) | (r0 << 11); + *(uint16_t*)(dst_rgb565 + 2) = b1 | (g1 << 5) | (r1 << 11); src_y += 2; src_uv += 2; dst_rgb565 += 4; // Advance 2 pixels. @@ -2689,6 +2733,74 @@ void MergeUVRow_C(const uint8_t* src_u, } } +void DetileRow_C(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width) { + int x; + for (x = 0; x < width - 15; x += 16) { + memcpy(dst, src, 16); + dst += 16; + src += src_tile_stride; + } + if (width & 15) { + memcpy(dst, src, width & 15); + } +} + +void DetileRow_16_C(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width) { + int x; + for (x = 0; x < width - 15; x += 16) { + memcpy(dst, src, 16 * sizeof(uint16_t)); + dst += 16; + src += src_tile_stride; + } + if (width & 15) { + memcpy(dst, src, (width & 15) * sizeof(uint16_t)); + } +} + +void DetileSplitUVRow_C(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + for (x = 0; x < width - 15; x += 16) { + SplitUVRow_C(src_uv, dst_u, dst_v, 8); + dst_u += 8; + dst_v += 8; + src_uv += src_tile_stride; + } + if (width & 15) { + SplitUVRow_C(src_uv, dst_u, dst_v, ((width & 15) + 1) / 2); + } +} + +void DetileToYUY2_C(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width) { + for (int x = 0; x < width - 15; x += 16) { + for (int i = 0; i < 8; i++) { + dst_yuy2[0] = src_y[0]; + dst_yuy2[1] = src_uv[0]; + dst_yuy2[2] = src_y[1]; + dst_yuy2[3] = src_uv[1]; + dst_yuy2 += 4; + src_y += 2; + src_uv += 2; + } + src_y += src_y_tile_stride - 16; + src_uv += src_uv_tile_stride - 16; + } +} + void SplitRGBRow_C(const uint8_t* src_rgb, uint8_t* dst_r, uint8_t* dst_g, @@ -2936,6 +3048,9 @@ void DivideRow_16_C(const uint16_t* src_y, // 16384 = 10 bits // 4096 = 12 bits // 256 = 16 bits +// TODO(fbarchard): change scale to bits +#define C16TO8(v, scale) clamp255(((v) * (scale)) >> 16) + void Convert16To8Row_C(const uint16_t* src_y, uint8_t* dst_y, int scale, @@ -2945,7 +3060,7 @@ void Convert16To8Row_C(const uint16_t* src_y, assert(scale <= 32768); for (x = 0; x < width; ++x) { - dst_y[x] = clamp255((src_y[x] * scale) >> 16); + dst_y[x] = C16TO8(src_y[x], scale); } } @@ -2998,6 +3113,21 @@ void YUY2ToUVRow_C(const uint8_t* src_yuy2, } } +// Filter 2 rows of YUY2 UV's (422) into UV (NV12). +void YUY2ToNVUVRow_C(const uint8_t* src_yuy2, + int src_stride_yuy2, + uint8_t* dst_uv, + int width) { + // Output a row of UV values, filtering 2 rows of YUY2. + int x; + for (x = 0; x < width; x += 2) { + dst_uv[0] = (src_yuy2[1] + src_yuy2[src_stride_yuy2 + 1] + 1) >> 1; + dst_uv[1] = (src_yuy2[3] + src_yuy2[src_stride_yuy2 + 3] + 1) >> 1; + src_yuy2 += 4; + dst_uv += 2; + } +} + // Copy row of YUY2 UV's (422) into U and V (422). void YUY2ToUV422Row_C(const uint8_t* src_yuy2, uint8_t* dst_u, @@ -3151,11 +3281,11 @@ void BlendPlaneRow_C(const uint8_t* src0, } #undef UBLEND -#if defined(__aarch64__) || defined(__arm__) -#define ATTENUATE(f, a) (f * a + 128) >> 8 -#else +#if LIBYUV_ATTENUATE_DUP // This code mimics the SSSE3 version for better testability. #define ATTENUATE(f, a) (a | (a << 8)) * (f | (f << 8)) >> 24 +#else +#define ATTENUATE(f, a) (f * a + 128) >> 8 #endif // Multiply source RGB by alpha and store to destination. @@ -3242,6 +3372,14 @@ const uint32_t fixed_invtbl8[256] = { T(0xfc), T(0xfd), T(0xfe), 0x01000100}; #undef T +#if LIBYUV_UNATTENUATE_DUP +// This code mimics the Intel SIMD version for better testability. +#define UNATTENUATE(f, ia) clamp255(((f | (f << 8)) * ia) >> 16) +#else +#define UNATTENUATE(f, ia) clamp255((f * ia) >> 8) +#endif + +// mimics the Intel SIMD code for exactness. void ARGBUnattenuateRow_C(const uint8_t* src_argb, uint8_t* dst_argb, int width) { @@ -3252,13 +3390,11 @@ void ARGBUnattenuateRow_C(const uint8_t* src_argb, uint32_t r = src_argb[2]; const uint32_t a = src_argb[3]; const uint32_t ia = fixed_invtbl8[a] & 0xffff; // 8.8 fixed point - b = (b * ia) >> 8; - g = (g * ia) >> 8; - r = (r * ia) >> 8; + // Clamping should not be necessary but is free in assembly. - dst_argb[0] = clamp255(b); - dst_argb[1] = clamp255(g); - dst_argb[2] = clamp255(r); + dst_argb[0] = UNATTENUATE(b, ia); + dst_argb[1] = UNATTENUATE(g, ia); + dst_argb[2] = UNATTENUATE(r, ia); dst_argb[3] = a; src_argb += 4; dst_argb += 4; @@ -3289,8 +3425,11 @@ void CumulativeSumToAverageRow_C(const int32_t* tl, int area, uint8_t* dst, int count) { - float ooa = 1.0f / area; + float ooa; int i; + assert(area != 0); + + ooa = 1.0f / area; for (i = 0; i < count; ++i) { dst[0] = (uint8_t)((bl[w + 0] + tl[0] - bl[0] - tl[w + 0]) * ooa); dst[1] = (uint8_t)((bl[w + 1] + tl[1] - bl[1] - tl[w + 1]) * ooa); @@ -3346,6 +3485,17 @@ static void HalfRow_16_C(const uint16_t* src_uv, } } +static void HalfRow_16To8_C(const uint16_t* src_uv, + ptrdiff_t src_uv_stride, + uint8_t* dst_uv, + int scale, + int width) { + int x; + for (x = 0; x < width; ++x) { + dst_uv[x] = C16TO8((src_uv[x] + src_uv[src_uv_stride + x] + 1) >> 1, scale); + } +} + // C version 2x2 -> 2x1. void InterpolateRow_C(uint8_t* dst_ptr, const uint8_t* src_ptr, @@ -3356,6 +3506,9 @@ void InterpolateRow_C(uint8_t* dst_ptr, int y0_fraction = 256 - y1_fraction; const uint8_t* src_ptr1 = src_ptr + src_stride; int x; + assert(source_y_fraction >= 0); + assert(source_y_fraction < 256); + if (y1_fraction == 0) { memcpy(dst_ptr, src_ptr, width); return; @@ -3364,21 +3517,16 @@ void InterpolateRow_C(uint8_t* dst_ptr, HalfRow_C(src_ptr, src_stride, dst_ptr, width); return; } - for (x = 0; x < width - 1; x += 2) { - dst_ptr[0] = - (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction + 128) >> 8; - dst_ptr[1] = - (src_ptr[1] * y0_fraction + src_ptr1[1] * y1_fraction + 128) >> 8; - src_ptr += 2; - src_ptr1 += 2; - dst_ptr += 2; - } - if (width & 1) { + for (x = 0; x < width; ++x) { dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction + 128) >> 8; + ++src_ptr; + ++src_ptr1; + ++dst_ptr; } } +// C version 2x2 -> 2x1. void InterpolateRow_16_C(uint16_t* dst_ptr, const uint16_t* src_ptr, ptrdiff_t src_stride, @@ -3388,23 +3536,62 @@ void InterpolateRow_16_C(uint16_t* dst_ptr, int y0_fraction = 256 - y1_fraction; const uint16_t* src_ptr1 = src_ptr + src_stride; int x; - if (source_y_fraction == 0) { + assert(source_y_fraction >= 0); + assert(source_y_fraction < 256); + + if (y1_fraction == 0) { memcpy(dst_ptr, src_ptr, width * 2); return; } - if (source_y_fraction == 128) { + if (y1_fraction == 128) { HalfRow_16_C(src_ptr, src_stride, dst_ptr, width); return; } - for (x = 0; x < width - 1; x += 2) { - dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8; - dst_ptr[1] = (src_ptr[1] * y0_fraction + src_ptr1[1] * y1_fraction) >> 8; - src_ptr += 2; - src_ptr1 += 2; - dst_ptr += 2; + for (x = 0; x < width; ++x) { + dst_ptr[0] = + (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction + 128) >> 8; + ++src_ptr; + ++src_ptr1; + ++dst_ptr; } - if (width & 1) { - dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8; +} + +// C version 2x2 16 bit-> 2x1 8 bit. +// Use scale to convert lsb formats to msb, depending how many bits there are: +// 32768 = 9 bits +// 16384 = 10 bits +// 4096 = 12 bits +// 256 = 16 bits +// TODO(fbarchard): change scale to bits + +void InterpolateRow_16To8_C(uint8_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int scale, + int width, + int source_y_fraction) { + int y1_fraction = source_y_fraction; + int y0_fraction = 256 - y1_fraction; + const uint16_t* src_ptr1 = src_ptr + src_stride; + int x; + assert(source_y_fraction >= 0); + assert(source_y_fraction < 256); + + if (source_y_fraction == 0) { + Convert16To8Row_C(src_ptr, dst_ptr, scale, width); + return; + } + if (source_y_fraction == 128) { + HalfRow_16To8_C(src_ptr, src_stride, dst_ptr, scale, width); + return; + } + for (x = 0; x < width; ++x) { + dst_ptr[0] = C16TO8( + (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction + 128) >> 8, + scale); + src_ptr += 1; + src_ptr1 += 1; + dst_ptr += 1; } } @@ -3921,6 +4108,32 @@ void I422ToRGB24Row_AVX2(const uint8_t* src_y, } #endif +#if defined(HAS_I444TORGB24ROW_AVX2) +void I444ToRGB24Row_AVX2(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width) { + // Row buffer for intermediate ARGB pixels. + SIMD_ALIGNED(uint8_t row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + I444ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); +#if defined(HAS_ARGBTORGB24ROW_AVX2) + ARGBToRGB24Row_AVX2(row, dst_rgb24, twidth); +#else + ARGBToRGB24Row_SSSE3(row, dst_rgb24, twidth); +#endif + src_y += twidth; + src_u += twidth; + src_v += twidth; + dst_rgb24 += twidth * 3; + width -= twidth; + } +} +#endif + #if defined(HAS_NV12TORGB565ROW_AVX2) void NV12ToRGB565Row_AVX2(const uint8_t* src_y, const uint8_t* src_uv, @@ -4009,6 +4222,26 @@ void RAWToYJRow_SSSE3(const uint8_t* src_raw, uint8_t* dst_yj, int width) { } #endif // HAS_RAWTOYJROW_SSSE3 +#ifdef HAS_INTERPOLATEROW_16TO8_AVX2 +void InterpolateRow_16To8_AVX2(uint8_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int scale, + int width, + int source_y_fraction) { + // Row buffer for intermediate 16 bit pixels. + SIMD_ALIGNED(uint16_t row[MAXTWIDTH]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + InterpolateRow_16_C(row, src_ptr, src_stride, twidth, source_y_fraction); + Convert16To8Row_AVX2(row, dst_ptr, scale, twidth); + src_ptr += twidth; + dst_ptr += twidth; + width -= twidth; + } +} +#endif // HAS_INTERPOLATEROW_16TO8_AVX2 + float ScaleSumSamples_C(const float* src, float* dst, float scale, int width) { float fsum = 0.f; int i; diff --git a/third-party/libyuv/third_party/libyuv/source/row_gcc.cc b/third-party/libyuv/third_party/libyuv/source/row_gcc.cc index 001c353dbe..f36d0cf01b 100644 --- a/third-party/libyuv/third_party/libyuv/source/row_gcc.cc +++ b/third-party/libyuv/third_party/libyuv/source/row_gcc.cc @@ -9,7 +9,6 @@ */ #include "libyuv/row.h" - #ifdef __cplusplus namespace libyuv { extern "C" { @@ -28,6 +27,9 @@ static const uvec8 kARGBToY = {25u, 129u, 66u, 0u, 25u, 129u, 66u, 0u, static const uvec8 kARGBToYJ = {29u, 150u, 77u, 0u, 29u, 150u, 77u, 0u, 29u, 150u, 77u, 0u, 29u, 150u, 77u, 0u}; +static const uvec8 kABGRToYJ = {77u, 150u, 29u, 0u, 77u, 150u, 29u, 0u, + 77u, 150u, 29u, 0u, 77u, 150u, 29u, 0u}; + static const uvec8 kRGBAToYJ = {0u, 29u, 150u, 77u, 0u, 29u, 150u, 77u, 0u, 29u, 150u, 77u, 0u, 29u, 150u, 77u}; #endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3) @@ -40,12 +42,18 @@ static const vec8 kARGBToU = {112, -74, -38, 0, 112, -74, -38, 0, static const vec8 kARGBToUJ = {127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0}; +static const vec8 kABGRToUJ = {-43, -84, 127, 0, -43, -84, 127, 0, + -43, -84, 127, 0, -43, -84, 127, 0}; + static const vec8 kARGBToV = {-18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0}; static const vec8 kARGBToVJ = {-20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0}; +static const vec8 kABGRToVJ = {127, -107, -20, 0, 127, -107, -20, 0, + 127, -107, -20, 0, 127, -107, -20, 0}; + // Constants for BGRA static const uvec8 kBGRAToY = {0u, 66u, 129u, 25u, 0u, 66u, 129u, 25u, 0u, 66u, 129u, 25u, 0u, 66u, 129u, 25u}; @@ -1202,6 +1210,7 @@ void ARGBToAR64Row_AVX2(const uint8_t* src_argb, "lea 0x40(%1),%1 \n" "sub $0x8,%2 \n" "jg 1b \n" + "vzeroupper \n" : "+r"(src_argb), // %0 "+r"(dst_ar64), // %1 "+r"(width) // %2 @@ -1229,6 +1238,7 @@ void ARGBToAB64Row_AVX2(const uint8_t* src_argb, "lea 0x40(%1),%1 \n" "sub $0x8,%2 \n" "jg 1b \n" + "vzeroupper \n" : "+r"(src_argb), // %0 "+r"(dst_ab64), // %1 "+r"(width) // %2 @@ -1257,6 +1267,7 @@ void AR64ToARGBRow_AVX2(const uint16_t* src_ar64, "lea 0x20(%1),%1 \n" "sub $0x8,%2 \n" "jg 1b \n" + "vzeroupper \n" : "+r"(src_ar64), // %0 "+r"(dst_argb), // %1 "+r"(width) // %2 @@ -1285,6 +1296,7 @@ void AB64ToARGBRow_AVX2(const uint16_t* src_ab64, "lea 0x20(%1),%1 \n" "sub $0x8,%2 \n" "jg 1b \n" + "vzeroupper \n" : "+r"(src_ab64), // %0 "+r"(dst_argb), // %1 "+r"(width) // %2 @@ -1399,6 +1411,24 @@ void ARGBToYJRow_SSSE3(const uint8_t* src_argb, uint8_t* dst_y, int width) { } #endif // HAS_ARGBTOYJROW_SSSE3 +#ifdef HAS_ABGRTOYJROW_SSSE3 +// Convert 16 ABGR pixels (64 bytes) to 16 YJ values. +// Same as ABGRToYRow but different coefficients, no add 16. +void ABGRToYJRow_SSSE3(const uint8_t* src_abgr, uint8_t* dst_y, int width) { + asm volatile( + "movdqa %3,%%xmm4 \n" + "movdqa %4,%%xmm5 \n" + + LABELALIGN RGBTOY(xmm5) + : "+r"(src_abgr), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "m"(kABGRToYJ), // %3 + "m"(kSub128) // %4 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"); +} +#endif // HAS_ABGRTOYJROW_SSSE3 + #ifdef HAS_RGBATOYJROW_SSSE3 // Convert 16 ARGB pixels (64 bytes) to 16 YJ values. // Same as ARGBToYRow but different coefficients, no add 16. @@ -1417,9 +1447,13 @@ void RGBAToYJRow_SSSE3(const uint8_t* src_rgba, uint8_t* dst_y, int width) { } #endif // HAS_RGBATOYJROW_SSSE3 -#ifdef HAS_ARGBTOYROW_AVX2 +#if defined(HAS_ARGBTOYROW_AVX2) || defined(HAS_ABGRTOYROW_AVX2) || \ + defined(HAS_ARGBEXTRACTALPHAROW_AVX2) // vpermd for vphaddw + vpackuswb vpermd. static const lvec32 kPermdARGBToY_AVX = {0, 4, 1, 5, 2, 6, 3, 7}; +#endif + +#ifdef HAS_ARGBTOYROW_AVX2 // Convert 32 ARGB pixels (128 bytes) to 32 Y values. void ARGBToYRow_AVX2(const uint8_t* src_argb, uint8_t* dst_y, int width) { @@ -1427,9 +1461,8 @@ void ARGBToYRow_AVX2(const uint8_t* src_argb, uint8_t* dst_y, int width) { "vbroadcastf128 %3,%%ymm4 \n" "vbroadcastf128 %4,%%ymm5 \n" "vbroadcastf128 %5,%%ymm7 \n" - "vmovdqu %6,%%ymm6 \n" - - LABELALIGN RGBTOY_AVX2(ymm7) + "vmovdqu %6,%%ymm6 \n" LABELALIGN RGBTOY_AVX2( + ymm7) "vzeroupper \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 "+r"(width) // %2 @@ -1449,9 +1482,8 @@ void ABGRToYRow_AVX2(const uint8_t* src_abgr, uint8_t* dst_y, int width) { "vbroadcastf128 %3,%%ymm4 \n" "vbroadcastf128 %4,%%ymm5 \n" "vbroadcastf128 %5,%%ymm7 \n" - "vmovdqu %6,%%ymm6 \n" - - LABELALIGN RGBTOY_AVX2(ymm7) + "vmovdqu %6,%%ymm6 \n" LABELALIGN RGBTOY_AVX2( + ymm7) "vzeroupper \n" : "+r"(src_abgr), // %0 "+r"(dst_y), // %1 "+r"(width) // %2 @@ -1470,9 +1502,8 @@ void ARGBToYJRow_AVX2(const uint8_t* src_argb, uint8_t* dst_y, int width) { asm volatile( "vbroadcastf128 %3,%%ymm4 \n" "vbroadcastf128 %4,%%ymm5 \n" - "vmovdqu %5,%%ymm6 \n" - - LABELALIGN RGBTOY_AVX2(ymm5) + "vmovdqu %5,%%ymm6 \n" LABELALIGN RGBTOY_AVX2( + ymm5) "vzeroupper \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 "+r"(width) // %2 @@ -1484,15 +1515,32 @@ void ARGBToYJRow_AVX2(const uint8_t* src_argb, uint8_t* dst_y, int width) { } #endif // HAS_ARGBTOYJROW_AVX2 +#ifdef HAS_ABGRTOYJROW_AVX2 +// Convert 32 ABGR pixels (128 bytes) to 32 Y values. +void ABGRToYJRow_AVX2(const uint8_t* src_abgr, uint8_t* dst_y, int width) { + asm volatile( + "vbroadcastf128 %3,%%ymm4 \n" + "vbroadcastf128 %4,%%ymm5 \n" + "vmovdqu %5,%%ymm6 \n" LABELALIGN RGBTOY_AVX2( + ymm5) "vzeroupper \n" + : "+r"(src_abgr), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "m"(kABGRToYJ), // %3 + "m"(kSub128), // %4 + "m"(kPermdARGBToY_AVX) // %5 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", + "xmm7"); +} +#endif // HAS_ABGRTOYJROW_AVX2 + #ifdef HAS_RGBATOYJROW_AVX2 // Convert 32 ARGB pixels (128 bytes) to 32 Y values. void RGBAToYJRow_AVX2(const uint8_t* src_rgba, uint8_t* dst_y, int width) { asm volatile( "vbroadcastf128 %3,%%ymm4 \n" "vbroadcastf128 %4,%%ymm5 \n" - "vmovdqu %5,%%ymm6 \n" - - LABELALIGN RGBTOY_AVX2( + "vmovdqu %5,%%ymm6 \n" LABELALIGN RGBTOY_AVX2( ymm5) "vzeroupper \n" : "+r"(src_rgba), // %0 "+r"(dst_y), // %1 @@ -1569,11 +1617,15 @@ void ARGBToUVRow_SSSE3(const uint8_t* src_argb, } #endif // HAS_ARGBTOUVROW_SSSE3 -#ifdef HAS_ARGBTOUVROW_AVX2 +#if defined(HAS_ARGBTOUVROW_AVX2) || defined(HAS_ABGRTOUVROW_AVX2) || \ + defined(HAS_ARGBTOUVJROW_AVX2) || defined(HAS_ABGRTOUVJROW_AVX2) // vpshufb for vphaddw + vpackuswb packed to shorts. static const lvec8 kShufARGBToUV_AVX = { 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15, 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15}; +#endif + +#if defined(HAS_ARGBTOUVROW_AVX2) void ARGBToUVRow_AVX2(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_u, @@ -1763,6 +1815,71 @@ void ARGBToUVJRow_AVX2(const uint8_t* src_argb, } #endif // HAS_ARGBTOUVJROW_AVX2 +// TODO(fbarchard): Pass kABGRToVJ / kABGRToUJ as matrix +#ifdef HAS_ABGRTOUVJROW_AVX2 +void ABGRToUVJRow_AVX2(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + asm volatile( + "vbroadcastf128 %5,%%ymm5 \n" + "vbroadcastf128 %6,%%ymm6 \n" + "vbroadcastf128 %7,%%ymm7 \n" + "sub %1,%2 \n" + + LABELALIGN + "1: \n" + "vmovdqu (%0),%%ymm0 \n" + "vmovdqu 0x20(%0),%%ymm1 \n" + "vmovdqu 0x40(%0),%%ymm2 \n" + "vmovdqu 0x60(%0),%%ymm3 \n" + "vpavgb 0x00(%0,%4,1),%%ymm0,%%ymm0 \n" + "vpavgb 0x20(%0,%4,1),%%ymm1,%%ymm1 \n" + "vpavgb 0x40(%0,%4,1),%%ymm2,%%ymm2 \n" + "vpavgb 0x60(%0,%4,1),%%ymm3,%%ymm3 \n" + "lea 0x80(%0),%0 \n" + "vshufps $0x88,%%ymm1,%%ymm0,%%ymm4 \n" + "vshufps $0xdd,%%ymm1,%%ymm0,%%ymm0 \n" + "vpavgb %%ymm4,%%ymm0,%%ymm0 \n" + "vshufps $0x88,%%ymm3,%%ymm2,%%ymm4 \n" + "vshufps $0xdd,%%ymm3,%%ymm2,%%ymm2 \n" + "vpavgb %%ymm4,%%ymm2,%%ymm2 \n" + + "vpmaddubsw %%ymm7,%%ymm0,%%ymm1 \n" + "vpmaddubsw %%ymm7,%%ymm2,%%ymm3 \n" + "vpmaddubsw %%ymm6,%%ymm0,%%ymm0 \n" + "vpmaddubsw %%ymm6,%%ymm2,%%ymm2 \n" + "vphaddw %%ymm3,%%ymm1,%%ymm1 \n" + "vphaddw %%ymm2,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm5,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm5,%%ymm1,%%ymm1 \n" + "vpsraw $0x8,%%ymm1,%%ymm1 \n" + "vpsraw $0x8,%%ymm0,%%ymm0 \n" + "vpacksswb %%ymm0,%%ymm1,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vpshufb %8,%%ymm0,%%ymm0 \n" + + "vextractf128 $0x0,%%ymm0,(%1) \n" + "vextractf128 $0x1,%%ymm0,0x0(%1,%2,1) \n" + "lea 0x10(%1),%1 \n" + "sub $0x20,%3 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_abgr), // %0 + "+r"(dst_u), // %1 + "+r"(dst_v), // %2 + "+rm"(width) // %3 + : "r"((intptr_t)(src_stride_abgr)), // %4 + "m"(kSub128), // %5 + "m"(kABGRToVJ), // %6 + "m"(kABGRToUJ), // %7 + "m"(kShufARGBToUV_AVX) // %8 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", + "xmm7"); +} +#endif // HAS_ABGRTOUVJROW_AVX2 + #ifdef HAS_ARGBTOUVJROW_SSSE3 void ARGBToUVJRow_SSSE3(const uint8_t* src_argb, int src_stride_argb, @@ -1829,6 +1946,72 @@ void ARGBToUVJRow_SSSE3(const uint8_t* src_argb, } #endif // HAS_ARGBTOUVJROW_SSSE3 +#ifdef HAS_ABGRTOUVJROW_SSSE3 +void ABGRToUVJRow_SSSE3(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + asm volatile( + "movdqa %5,%%xmm3 \n" + "movdqa %6,%%xmm4 \n" + "movdqa %7,%%xmm5 \n" + "sub %1,%2 \n" + + LABELALIGN + "1: \n" + "movdqu (%0),%%xmm0 \n" + "movdqu 0x00(%0,%4,1),%%xmm7 \n" + "pavgb %%xmm7,%%xmm0 \n" + "movdqu 0x10(%0),%%xmm1 \n" + "movdqu 0x10(%0,%4,1),%%xmm7 \n" + "pavgb %%xmm7,%%xmm1 \n" + "movdqu 0x20(%0),%%xmm2 \n" + "movdqu 0x20(%0,%4,1),%%xmm7 \n" + "pavgb %%xmm7,%%xmm2 \n" + "movdqu 0x30(%0),%%xmm6 \n" + "movdqu 0x30(%0,%4,1),%%xmm7 \n" + "pavgb %%xmm7,%%xmm6 \n" + + "lea 0x40(%0),%0 \n" + "movdqa %%xmm0,%%xmm7 \n" + "shufps $0x88,%%xmm1,%%xmm0 \n" + "shufps $0xdd,%%xmm1,%%xmm7 \n" + "pavgb %%xmm7,%%xmm0 \n" + "movdqa %%xmm2,%%xmm7 \n" + "shufps $0x88,%%xmm6,%%xmm2 \n" + "shufps $0xdd,%%xmm6,%%xmm7 \n" + "pavgb %%xmm7,%%xmm2 \n" + "movdqa %%xmm0,%%xmm1 \n" + "movdqa %%xmm2,%%xmm6 \n" + "pmaddubsw %%xmm4,%%xmm0 \n" + "pmaddubsw %%xmm4,%%xmm2 \n" + "pmaddubsw %%xmm3,%%xmm1 \n" + "pmaddubsw %%xmm3,%%xmm6 \n" + "phaddw %%xmm2,%%xmm0 \n" + "phaddw %%xmm6,%%xmm1 \n" + "paddw %%xmm5,%%xmm0 \n" + "paddw %%xmm5,%%xmm1 \n" + "psraw $0x8,%%xmm0 \n" + "psraw $0x8,%%xmm1 \n" + "packsswb %%xmm1,%%xmm0 \n" + "movlps %%xmm0,(%1) \n" + "movhps %%xmm0,0x00(%1,%2,1) \n" + "lea 0x8(%1),%1 \n" + "sub $0x10,%3 \n" + "jg 1b \n" + : "+r"(src_abgr), // %0 + "+r"(dst_u), // %1 + "+r"(dst_v), // %2 + "+rm"(width) // %3 + : "r"((intptr_t)(src_stride_abgr)), // %4 + "m"(kABGRToVJ), // %5 + "m"(kABGRToUJ), // %6 + "m"(kSub128) // %7 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm6", "xmm7"); +} +#endif // HAS_ABGRTOUVJROW_SSSE3 + #ifdef HAS_ARGBTOUV444ROW_SSSE3 void ARGBToUV444Row_SSSE3(const uint8_t* src_argb, uint8_t* dst_u, @@ -2151,9 +2334,6 @@ void RGBAToUVRow_SSSE3(const uint8_t* src_rgba, "lea 0x8(%[y_buf]),%[y_buf] \n" // Read 4 UV from 422 10 bit, upsample to 8 UV -// TODO(fbarchard): Consider shufb to replace pack/unpack -// TODO(fbarchard): Consider pmulhuw to replace psraw -// TODO(fbarchard): Consider pmullw to replace psllw and allow different bits. #define READYUV210 \ "movq (%[u_buf]),%%xmm3 \n" \ "movq 0x00(%[u_buf],%[v_buf],1),%%xmm1 \n" \ @@ -2163,7 +2343,10 @@ void RGBAToUVRow_SSSE3(const uint8_t* src_rgba, "packuswb %%xmm3,%%xmm3 \n" \ "punpcklwd %%xmm3,%%xmm3 \n" \ "movdqu (%[y_buf]),%%xmm4 \n" \ + "movdqa %%xmm4,%%xmm2 \n" \ "psllw $6,%%xmm4 \n" \ + "psrlw $4,%%xmm2 \n" \ + "paddw %%xmm2,%%xmm4 \n" \ "lea 0x10(%[y_buf]),%[y_buf] \n" #define READYUVA210 \ @@ -2175,7 +2358,10 @@ void RGBAToUVRow_SSSE3(const uint8_t* src_rgba, "packuswb %%xmm3,%%xmm3 \n" \ "punpcklwd %%xmm3,%%xmm3 \n" \ "movdqu (%[y_buf]),%%xmm4 \n" \ + "movdqa %%xmm4,%%xmm2 \n" \ "psllw $6,%%xmm4 \n" \ + "psrlw $4,%%xmm2 \n" \ + "paddw %%xmm2,%%xmm4 \n" \ "lea 0x10(%[y_buf]),%[y_buf] \n" \ "movdqu (%[a_buf]),%%xmm5 \n" \ "psraw $2,%%xmm5 \n" \ @@ -2194,7 +2380,10 @@ void RGBAToUVRow_SSSE3(const uint8_t* src_rgba, "punpckhwd %%xmm2,%%xmm1 \n" \ "packuswb %%xmm1,%%xmm3 \n" \ "movdqu (%[y_buf]),%%xmm4 \n" \ + "movdqa %%xmm4,%%xmm2 \n" \ "psllw $6,%%xmm4 \n" \ + "psrlw $4,%%xmm2 \n" \ + "paddw %%xmm2,%%xmm4 \n" \ "lea 0x10(%[y_buf]),%[y_buf] \n" // Read 8 UV from 444 10 bit. With 8 Alpha. @@ -2209,7 +2398,10 @@ void RGBAToUVRow_SSSE3(const uint8_t* src_rgba, "punpckhwd %%xmm2,%%xmm1 \n" \ "packuswb %%xmm1,%%xmm3 \n" \ "movdqu (%[y_buf]),%%xmm4 \n" \ - "psllw $0x6,%%xmm4 \n" \ + "movdqa %%xmm4,%%xmm2 \n" \ + "psllw $6,%%xmm4 \n" \ + "psrlw $4,%%xmm2 \n" \ + "paddw %%xmm2,%%xmm4 \n" \ "lea 0x10(%[y_buf]),%[y_buf] \n" \ "movdqu (%[a_buf]),%%xmm5 \n" \ "psraw $2,%%xmm5 \n" \ @@ -2226,7 +2418,10 @@ void RGBAToUVRow_SSSE3(const uint8_t* src_rgba, "packuswb %%xmm3,%%xmm3 \n" \ "punpcklwd %%xmm3,%%xmm3 \n" \ "movdqu (%[y_buf]),%%xmm4 \n" \ - "psllw $0x4,%%xmm4 \n" \ + "movdqa %%xmm4,%%xmm2 \n" \ + "psllw $4,%%xmm4 \n" \ + "psrlw $8,%%xmm2 \n" \ + "paddw %%xmm2,%%xmm4 \n" \ "lea 0x10(%[y_buf]),%[y_buf] \n" // Read 4 UV from 422, upsample to 8 UV. With 8 Alpha. @@ -2397,6 +2592,20 @@ void RGBAToUVRow_SSSE3(const uint8_t* src_rgba, "movdqu %%xmm0,0x10(%[dst_rgba]) \n" \ "lea 0x20(%[dst_rgba]),%[dst_rgba] \n" +// Store 8 RGB24 values. +#define STORERGB24 \ + "punpcklbw %%xmm1,%%xmm0 \n" \ + "punpcklbw %%xmm2,%%xmm2 \n" \ + "movdqa %%xmm0,%%xmm1 \n" \ + "punpcklwd %%xmm2,%%xmm0 \n" \ + "punpckhwd %%xmm2,%%xmm1 \n" \ + "pshufb %%xmm5,%%xmm0 \n" \ + "pshufb %%xmm6,%%xmm1 \n" \ + "palignr $0xc,%%xmm0,%%xmm1 \n" \ + "movq %%xmm0,(%[dst_rgb24]) \n" \ + "movdqu %%xmm1,0x8(%[dst_rgb24]) \n" \ + "lea 0x18(%[dst_rgb24]),%[dst_rgb24] \n" + // Store 8 AR30 values. #define STOREAR30 \ "psraw $0x4,%%xmm0 \n" \ @@ -2506,17 +2715,43 @@ void OMITFP I422ToRGB24Row_SSSE3(const uint8_t* y_buf, "1: \n" READYUV422 YUVTORGB(yuvconstants) - "punpcklbw %%xmm1,%%xmm0 \n" - "punpcklbw %%xmm2,%%xmm2 \n" - "movdqa %%xmm0,%%xmm1 \n" - "punpcklwd %%xmm2,%%xmm0 \n" - "punpckhwd %%xmm2,%%xmm1 \n" - "pshufb %%xmm5,%%xmm0 \n" - "pshufb %%xmm6,%%xmm1 \n" - "palignr $0xc,%%xmm0,%%xmm1 \n" - "movq %%xmm0,(%[dst_rgb24]) \n" - "movdqu %%xmm1,0x8(%[dst_rgb24]) \n" - "lea 0x18(%[dst_rgb24]),%[dst_rgb24] \n" + STORERGB24 + "subl $0x8,%[width] \n" + "jg 1b \n" + : [y_buf]"+r"(y_buf), // %[y_buf] + [u_buf]"+r"(u_buf), // %[u_buf] + [v_buf]"+r"(v_buf), // %[v_buf] + [dst_rgb24]"+r"(dst_rgb24), // %[dst_rgb24] +#if defined(__i386__) + [width]"+m"(width) // %[width] +#else + [width]"+rm"(width) // %[width] +#endif + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] + [kShuffleMaskARGBToRGB24_0]"m"(kShuffleMaskARGBToRGB24_0), + [kShuffleMaskARGBToRGB24]"m"(kShuffleMaskARGBToRGB24) + : "memory", "cc", YUVTORGB_REGS + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6" + ); +} + +void OMITFP I444ToRGB24Row_SSSE3(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP(yuvconstants) + "movdqa %[kShuffleMaskARGBToRGB24_0],%%xmm5 \n" + "movdqa %[kShuffleMaskARGBToRGB24],%%xmm6 \n" + "sub %[u_buf],%[v_buf] \n" + + LABELALIGN + "1: \n" + READYUV444 + YUVTORGB(yuvconstants) + STORERGB24 "subl $0x8,%[width] \n" "jg 1b \n" : [y_buf]"+r"(y_buf), // %[y_buf] @@ -3178,6 +3413,21 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpunpcklbw %%ymm4,%%ymm4,%%ymm4 \n" \ "lea 0x10(%[y_buf]),%[y_buf] \n" +#define READYUV422_AVX512BW \ + "vmovdqu (%[u_buf]),%%xmm3 \n" \ + "vmovdqu 0x00(%[u_buf],%[v_buf],1),%%xmm1 \n" \ + "vpermq %%zmm3,%%zmm16,%%zmm3 \n" \ + "vpermq %%zmm1,%%zmm16,%%zmm1 \n" \ + "lea 0x10(%[u_buf]),%[u_buf] \n" \ + "vpunpcklbw %%zmm1,%%zmm3,%%zmm3 \n" \ + "vpermq $0xd8,%%zmm3,%%zmm3 \n" \ + "vpunpcklwd %%zmm3,%%zmm3,%%zmm3 \n" \ + "vmovdqu8 (%[y_buf]),%%ymm4 \n" \ + "vpermq %%zmm4,%%zmm17,%%zmm4 \n" \ + "vpermq $0xd8,%%zmm4,%%zmm4 \n" \ + "vpunpcklbw %%zmm4,%%zmm4,%%zmm4 \n" \ + "lea 0x20(%[y_buf]),%[y_buf] \n" + // Read 8 UV from 210, upsample to 16 UV // TODO(fbarchard): Consider vshufb to replace pack/unpack // TODO(fbarchard): Consider vunpcklpd to combine the 2 registers into 1. @@ -3192,7 +3442,9 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpackuswb %%ymm3,%%ymm3,%%ymm3 \n" \ "vpunpcklwd %%ymm3,%%ymm3,%%ymm3 \n" \ "vmovdqu (%[y_buf]),%%ymm4 \n" \ - "vpsllw $6,%%ymm4,%%ymm4 \n" \ + "vpsllw $6,%%ymm4,%%ymm2 \n" \ + "vpsrlw $4,%%ymm4,%%ymm4 \n" \ + "vpaddw %%ymm2,%%ymm4,%%ymm4 \n" \ "lea 0x20(%[y_buf]),%[y_buf] \n" // Read 8 UV from 210, upsample to 16 UV. With 16 Alpha. @@ -3207,7 +3459,9 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpackuswb %%ymm3,%%ymm3,%%ymm3 \n" \ "vpunpcklwd %%ymm3,%%ymm3,%%ymm3 \n" \ "vmovdqu (%[y_buf]),%%ymm4 \n" \ - "vpsllw $6,%%ymm4,%%ymm4 \n" \ + "vpsllw $6,%%ymm4,%%ymm2 \n" \ + "vpsrlw $4,%%ymm4,%%ymm4 \n" \ + "vpaddw %%ymm2,%%ymm4,%%ymm4 \n" \ "lea 0x20(%[y_buf]),%[y_buf] \n" \ "vmovdqu (%[a_buf]),%%ymm5 \n" \ "vpsraw $2,%%ymm5,%%ymm5 \n" \ @@ -3225,7 +3479,9 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpunpcklwd %%ymm2,%%ymm3,%%ymm3 \n" \ "vpackuswb %%ymm1,%%ymm3,%%ymm3 \n" \ "vmovdqu (%[y_buf]),%%ymm4 \n" \ - "vpsllw $6,%%ymm4,%%ymm4 \n" \ + "vpsllw $6,%%ymm4,%%ymm2 \n" \ + "vpsrlw $4,%%ymm4,%%ymm4 \n" \ + "vpaddw %%ymm2,%%ymm4,%%ymm4 \n" \ "lea 0x20(%[y_buf]),%[y_buf] \n" // Read 8 UV from 212 12 bit, upsample to 16 UV @@ -3240,7 +3496,9 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpackuswb %%ymm3,%%ymm3,%%ymm3 \n" \ "vpunpcklwd %%ymm3,%%ymm3,%%ymm3 \n" \ "vmovdqu (%[y_buf]),%%ymm4 \n" \ - "vpsllw $0x4,%%ymm4,%%ymm4 \n" \ + "vpsllw $4,%%ymm4,%%ymm2 \n" \ + "vpsrlw $8,%%ymm4,%%ymm4 \n" \ + "vpaddw %%ymm2,%%ymm4,%%ymm4 \n" \ "lea 0x20(%[y_buf]),%[y_buf] \n" // Read 16 UV from 410. With 16 Alpha. @@ -3254,7 +3512,9 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpunpcklwd %%ymm2,%%ymm3,%%ymm3 \n" \ "vpackuswb %%ymm1,%%ymm3,%%ymm3 \n" \ "vmovdqu (%[y_buf]),%%ymm4 \n" \ - "vpsllw $6,%%ymm4,%%ymm4 \n" \ + "vpsllw $6,%%ymm4,%%ymm2 \n" \ + "vpsrlw $4,%%ymm4,%%ymm4 \n" \ + "vpaddw %%ymm2,%%ymm4,%%ymm4 \n" \ "lea 0x20(%[y_buf]),%[y_buf] \n" \ "vmovdqu (%[a_buf]),%%ymm5 \n" \ "vpsraw $2,%%ymm5,%%ymm5 \n" \ @@ -3353,6 +3613,7 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpshufb %[kShuffleUYVYUV], %%ymm3, %%ymm3 \n" \ "lea 0x20(%[uyvy_buf]),%[uyvy_buf] \n" +// TODO(fbarchard): Remove broadcastb #if defined(__x86_64__) #define YUVTORGB_SETUP_AVX2(yuvconstants) \ "vpcmpeqb %%xmm13,%%xmm13,%%xmm13 \n" \ @@ -3364,6 +3625,24 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vmovdqa 96(%[yuvconstants]),%%ymm11 \n" \ "vmovdqa 128(%[yuvconstants]),%%ymm12 \n" +#define YUVTORGB_SETUP_AVX512BW(yuvconstants) \ + "vpcmpeqb %%xmm13,%%xmm13,%%xmm13 \n" \ + "movdqa (%[yuvconstants]),%%xmm8 \n" \ + "vpbroadcastq %%xmm8, %%zmm8 \n" \ + "vpsllw $7,%%xmm13,%%xmm13 \n" \ + "vpbroadcastb %%xmm13,%%zmm13 \n" \ + "movq 32(%[yuvconstants]),%%xmm9 \n" \ + "vpbroadcastq %%xmm9,%%zmm9 \n" \ + "movq 64(%[yuvconstants]),%%xmm10 \n" \ + "vpbroadcastq %%xmm10,%%zmm10 \n" \ + "movq 96(%[yuvconstants]),%%xmm11 \n" \ + "vpbroadcastq %%xmm11,%%zmm11 \n" \ + "movq 128(%[yuvconstants]),%%xmm12 \n" \ + "vpbroadcastq %%xmm12,%%zmm12 \n" \ + "vmovdqu8 (%[quadsplitperm]),%%zmm16 \n" \ + "vmovdqu8 (%[dquadsplitperm]),%%zmm17 \n" \ + "vmovdqu8 (%[unperm]),%%zmm18 \n" + #define YUVTORGB16_AVX2(yuvconstants) \ "vpsubb %%ymm13,%%ymm3,%%ymm3 \n" \ "vpmulhuw %%ymm11,%%ymm4,%%ymm4 \n" \ @@ -3375,7 +3654,20 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpsubsw %%ymm1,%%ymm4,%%ymm1 \n" \ "vpaddsw %%ymm4,%%ymm2,%%ymm2 \n" +#define YUVTORGB16_AVX512BW(yuvconstants) \ + "vpsubb %%zmm13,%%zmm3,%%zmm3 \n" \ + "vpmulhuw %%zmm11,%%zmm4,%%zmm4 \n" \ + "vpmaddubsw %%zmm3,%%zmm8,%%zmm0 \n" \ + "vpmaddubsw %%zmm3,%%zmm9,%%zmm1 \n" \ + "vpmaddubsw %%zmm3,%%zmm10,%%zmm2 \n" \ + "vpaddw %%zmm4,%%zmm12,%%zmm4 \n" \ + "vpaddsw %%zmm4,%%zmm0,%%zmm0 \n" \ + "vpsubsw %%zmm1,%%zmm4,%%zmm1 \n" \ + "vpaddsw %%zmm4,%%zmm2,%%zmm2 \n" + #define YUVTORGB_REGS_AVX2 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", +#define YUVTORGB_REGS_AVX512BW \ + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm16", "xmm17", "xmm18", #else // Convert 16 pixels: 16 UV and 16 Y. @@ -3410,6 +3702,15 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \ "vpackuswb %%ymm2,%%ymm2,%%ymm2 \n" +#define YUVTORGB_AVX512BW(yuvconstants) \ + YUVTORGB16_AVX512BW(yuvconstants) \ + "vpsraw $0x6,%%zmm0,%%zmm0 \n" \ + "vpsraw $0x6,%%zmm1,%%zmm1 \n" \ + "vpsraw $0x6,%%zmm2,%%zmm2 \n" \ + "vpackuswb %%zmm0,%%zmm0,%%zmm0 \n" \ + "vpackuswb %%zmm1,%%zmm1,%%zmm1 \n" \ + "vpackuswb %%zmm2,%%zmm2,%%zmm2 \n" + // Store 16 ARGB values. #define STOREARGB_AVX2 \ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ @@ -3422,6 +3723,18 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8_t* y_buf, "vmovdqu %%ymm0,0x20(%[dst_argb]) \n" \ "lea 0x40(%[dst_argb]), %[dst_argb] \n" +// Store 32 ARGB values. +#define STOREARGB_AVX512BW \ + "vpunpcklbw %%zmm1,%%zmm0,%%zmm0 \n" \ + "vpermq %%zmm0,%%zmm18,%%zmm0 \n" \ + "vpunpcklbw %%zmm5,%%zmm2,%%zmm2 \n" \ + "vpermq %%zmm2,%%zmm18,%%zmm2 \n" \ + "vpunpcklwd %%zmm2,%%zmm0,%%zmm1 \n" \ + "vpunpckhwd %%zmm2,%%zmm0,%%zmm0 \n" \ + "vmovdqu8 %%zmm1,(%[dst_argb]) \n" \ + "vmovdqu8 %%zmm0,0x40(%[dst_argb]) \n" \ + "lea 0x80(%[dst_argb]), %[dst_argb] \n" + // Store 16 AR30 values. #define STOREAR30_AVX2 \ "vpsraw $0x4,%%ymm0,%%ymm0 \n" \ @@ -3518,6 +3831,50 @@ void OMITFP I422ToARGBRow_AVX2(const uint8_t* y_buf, } #endif // HAS_I422TOARGBROW_AVX2 +#if defined(HAS_I422TOARGBROW_AVX512BW) +static const uint64_t kSplitQuadWords[8] = {0, 2, 2, 2, 1, 2, 2, 2}; +static const uint64_t kSplitDoubleQuadWords[8] = {0, 1, 4, 4, 2, 3, 4, 4}; +static const uint64_t kUnpermuteAVX512[8] = {0, 4, 1, 5, 2, 6, 3, 7}; + +// 32 pixels +// 16 UV values upsampled to 32 UV, mixed with 32 Y producing 32 ARGB (128 +// bytes). +void OMITFP I422ToARGBRow_AVX512BW(const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP_AVX512BW(yuvconstants) + "sub %[u_buf],%[v_buf] \n" + "vpcmpeqb %%xmm5,%%xmm5,%%xmm5 \n" + "vpbroadcastq %%xmm5,%%zmm5 \n" + + LABELALIGN + "1: \n" + READYUV422_AVX512BW + YUVTORGB_AVX512BW(yuvconstants) + STOREARGB_AVX512BW + "sub $0x20,%[width] \n" + "jg 1b \n" + + "vzeroupper \n" + : [y_buf]"+r"(y_buf), // %[y_buf] + [u_buf]"+r"(u_buf), // %[u_buf] + [v_buf]"+r"(v_buf), // %[v_buf] + [dst_argb]"+r"(dst_argb), // %[dst_argb] + [width]"+rm"(width) // %[width] + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] + [quadsplitperm]"r"(kSplitQuadWords), // %[quadsplitperm] + [dquadsplitperm]"r"(kSplitDoubleQuadWords), // %[dquadsplitperm] + [unperm]"r"(kUnpermuteAVX512) // %[unperm] + : "memory", "cc", YUVTORGB_REGS_AVX512BW + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + ); +} +#endif // HAS_I422TOARGBROW_AVX512BW + #if defined(HAS_I422TOAR30ROW_AVX2) // 16 pixels // 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 AR30 (64 bytes). @@ -4650,6 +5007,141 @@ void SplitUVRow_SSE2(const uint8_t* src_uv, } #endif // HAS_SPLITUVROW_SSE2 +#ifdef HAS_DETILEROW_SSE2 +void DetileRow_SSE2(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width) { + asm volatile( + "1: \n" + "movdqu (%0),%%xmm0 \n" + "sub $0x10,%2 \n" + "lea (%0,%3),%0 \n" + "movdqu %%xmm0,(%1) \n" + "lea 0x10(%1),%1 \n" + "jg 1b \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "r"(src_tile_stride) // %3 + : "cc", "memory", "xmm0"); +} +#endif // HAS_DETILEROW_SSE2 + +#ifdef HAS_DETILEROW_16_SSE2 +void DetileRow_16_SSE2(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width) { + asm volatile( + "1: \n" + "movdqu (%0),%%xmm0 \n" + "movdqu 0x10(%0),%%xmm1 \n" + "lea (%0,%3,2),%0 \n" + "movdqu %%xmm0,(%1) \n" + "movdqu %%xmm1,0x10(%1) \n" + "lea 0x20(%1),%1 \n" + "sub $0x10,%2 \n" + "jg 1b \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "r"(src_tile_stride) // %3 + : "cc", "memory", "xmm0", "xmm1"); +} +#endif // HAS_DETILEROW_SSE2 + +#ifdef HAS_DETILEROW_16_AVX +void DetileRow_16_AVX(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width) { + asm volatile( + "1: \n" + "vmovdqu (%0),%%ymm0 \n" + "lea (%0,%3,2),%0 \n" + "vmovdqu %%ymm0,(%1) \n" + "lea 0x20(%1),%1 \n" + "sub $0x10,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "r"(src_tile_stride) // %3 + : "cc", "memory", "xmm0"); +} +#endif // HAS_DETILEROW_AVX + +#ifdef HAS_DETILETOYUY2_SSE2 +// Read 16 Y, 8 UV, and write 8 YUYV. +void DetileToYUY2_SSE2(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width) { + asm volatile( + "1: \n" + "movdqu (%0),%%xmm0 \n" // Load 16 Y + "sub $0x10,%3 \n" + "lea (%0,%4),%0 \n" + "movdqu (%1),%%xmm1 \n" // Load 8 UV + "lea (%1,%5),%1 \n" + "movdqu %%xmm0,%%xmm2 \n" + "punpcklbw %%xmm1,%%xmm0 \n" + "punpckhbw %%xmm1,%%xmm2 \n" + "movdqu %%xmm0,(%2) \n" + "movdqu %%xmm2,0x10(%2) \n" + "lea 0x20(%2),%2 \n" + "jg 1b \n" + : "+r"(src_y), // %0 + "+r"(src_uv), // %1 + "+r"(dst_yuy2), // %2 + "+r"(width) // %3 + : "r"(src_y_tile_stride), // %4 + "r"(src_uv_tile_stride) // %5 + : "cc", "memory", "xmm0", "xmm1", "xmm2" // Clobber list + ); +} +#endif + +#ifdef HAS_DETILESPLITUVROW_SSSE3 +// TODO(greenjustin): Look into generating these constants instead of loading +// them since this can cause branch mispredicts for fPIC code on 32-bit +// machines. +static const uvec8 kDeinterlaceUV = {0, 2, 4, 6, 8, 10, 12, 14, + 1, 3, 5, 7, 9, 11, 13, 15}; + +// TODO(greenjustin): Research alternatives to pshufb, since pshufb can be very +// slow on older SSE2 processors. +void DetileSplitUVRow_SSSE3(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + asm volatile( + "movdqu %4,%%xmm1 \n" + "1: \n" + "movdqu (%0),%%xmm0 \n" + "lea (%0, %5),%0 \n" + "pshufb %%xmm1,%%xmm0 \n" + "movq %%xmm0,(%1) \n" + "lea 0x8(%1),%1 \n" + "movhps %%xmm0,(%2) \n" + "lea 0x8(%2),%2 \n" + "sub $0x10,%3 \n" + "jg 1b \n" + : "+r"(src_uv), // %0 + "+r"(dst_u), // %1 + "+r"(dst_v), // %2 + "+r"(width) // %3 + : "m"(kDeinterlaceUV), // %4 + "r"(src_tile_stride) // %5 + : "cc", "memory", "xmm0", "xmm1"); +} +#endif // HAS_DETILESPLITUVROW_SSSE3 + #ifdef HAS_MERGEUVROW_AVX2 void MergeUVRow_AVX2(const uint8_t* src_u, const uint8_t* src_v, @@ -5027,37 +5519,26 @@ void Convert8To16Row_AVX2(const uint8_t* src_y, #endif // HAS_CONVERT8TO16ROW_AVX2 #ifdef HAS_SPLITRGBROW_SSSE3 - // Shuffle table for converting RGB to Planar. -static const uvec8 kShuffleMaskRGBToR0 = {0u, 3u, 6u, 9u, 12u, 15u, - 128u, 128u, 128u, 128u, 128u, 128u, - 128u, 128u, 128u, 128u}; -static const uvec8 kShuffleMaskRGBToR1 = {128u, 128u, 128u, 128u, 128u, 128u, - 2u, 5u, 8u, 11u, 14u, 128u, - 128u, 128u, 128u, 128u}; -static const uvec8 kShuffleMaskRGBToR2 = {128u, 128u, 128u, 128u, 128u, 128u, - 128u, 128u, 128u, 128u, 128u, 1u, - 4u, 7u, 10u, 13u}; - -static const uvec8 kShuffleMaskRGBToG0 = {1u, 4u, 7u, 10u, 13u, 128u, - 128u, 128u, 128u, 128u, 128u, 128u, - 128u, 128u, 128u, 128u}; -static const uvec8 kShuffleMaskRGBToG1 = {128u, 128u, 128u, 128u, 128u, 0u, - 3u, 6u, 9u, 12u, 15u, 128u, - 128u, 128u, 128u, 128u}; -static const uvec8 kShuffleMaskRGBToG2 = {128u, 128u, 128u, 128u, 128u, 128u, - 128u, 128u, 128u, 128u, 128u, 2u, - 5u, 8u, 11u, 14u}; - -static const uvec8 kShuffleMaskRGBToB0 = {2u, 5u, 8u, 11u, 14u, 128u, - 128u, 128u, 128u, 128u, 128u, 128u, - 128u, 128u, 128u, 128u}; -static const uvec8 kShuffleMaskRGBToB1 = {128u, 128u, 128u, 128u, 128u, 1u, - 4u, 7u, 10u, 13u, 128u, 128u, - 128u, 128u, 128u, 128u}; -static const uvec8 kShuffleMaskRGBToB2 = {128u, 128u, 128u, 128u, 128u, 128u, - 128u, 128u, 128u, 128u, 0u, 3u, - 6u, 9u, 12u, 15u}; +static const uvec8 kSplitRGBShuffle[9] = { + {0u, 3u, 6u, 9u, 12u, 15u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, + 128u, 128u}, + {128u, 128u, 128u, 128u, 128u, 128u, 2u, 5u, 8u, 11u, 14u, 128u, 128u, 128u, + 128u, 128u}, + {128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 1u, 4u, + 7u, 10u, 13u}, + {1u, 4u, 7u, 10u, 13u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, + 128u, 128u}, + {128u, 128u, 128u, 128u, 128u, 0u, 3u, 6u, 9u, 12u, 15u, 128u, 128u, 128u, + 128u, 128u}, + {128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 2u, 5u, + 8u, 11u, 14u}, + {2u, 5u, 8u, 11u, 14u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, + 128u, 128u}, + {128u, 128u, 128u, 128u, 128u, 1u, 4u, 7u, 10u, 13u, 128u, 128u, 128u, 128u, + 128u, 128u}, + {128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 0u, 3u, 6u, 9u, + 12u, 15u}}; void SplitRGBRow_SSSE3(const uint8_t* src_rgb, uint8_t* dst_r, @@ -5071,9 +5552,9 @@ void SplitRGBRow_SSSE3(const uint8_t* src_rgb, "movdqu (%0),%%xmm0 \n" "movdqu 0x10(%0),%%xmm1 \n" "movdqu 0x20(%0),%%xmm2 \n" - "pshufb %5, %%xmm0 \n" - "pshufb %6, %%xmm1 \n" - "pshufb %7, %%xmm2 \n" + "pshufb 0(%5), %%xmm0 \n" + "pshufb 16(%5), %%xmm1 \n" + "pshufb 32(%5), %%xmm2 \n" "por %%xmm1,%%xmm0 \n" "por %%xmm2,%%xmm0 \n" "movdqu %%xmm0,(%1) \n" @@ -5082,9 +5563,9 @@ void SplitRGBRow_SSSE3(const uint8_t* src_rgb, "movdqu (%0),%%xmm0 \n" "movdqu 0x10(%0),%%xmm1 \n" "movdqu 0x20(%0),%%xmm2 \n" - "pshufb %8, %%xmm0 \n" - "pshufb %9, %%xmm1 \n" - "pshufb %10, %%xmm2 \n" + "pshufb 48(%5),%%xmm0 \n" + "pshufb 64(%5),%%xmm1 \n" + "pshufb 80(%5), %%xmm2 \n" "por %%xmm1,%%xmm0 \n" "por %%xmm2,%%xmm0 \n" "movdqu %%xmm0,(%2) \n" @@ -5093,9 +5574,9 @@ void SplitRGBRow_SSSE3(const uint8_t* src_rgb, "movdqu (%0),%%xmm0 \n" "movdqu 0x10(%0),%%xmm1 \n" "movdqu 0x20(%0),%%xmm2 \n" - "pshufb %11, %%xmm0 \n" - "pshufb %12, %%xmm1 \n" - "pshufb %13, %%xmm2 \n" + "pshufb 96(%5), %%xmm0 \n" + "pshufb 112(%5), %%xmm1 \n" + "pshufb 128(%5), %%xmm2 \n" "por %%xmm1,%%xmm0 \n" "por %%xmm2,%%xmm0 \n" "movdqu %%xmm0,(%3) \n" @@ -5108,51 +5589,32 @@ void SplitRGBRow_SSSE3(const uint8_t* src_rgb, "+r"(dst_g), // %2 "+r"(dst_b), // %3 "+r"(width) // %4 - : "m"(kShuffleMaskRGBToR0), // %5 - "m"(kShuffleMaskRGBToR1), // %6 - "m"(kShuffleMaskRGBToR2), // %7 - "m"(kShuffleMaskRGBToG0), // %8 - "m"(kShuffleMaskRGBToG1), // %9 - "m"(kShuffleMaskRGBToG2), // %10 - "m"(kShuffleMaskRGBToB0), // %11 - "m"(kShuffleMaskRGBToB1), // %12 - "m"(kShuffleMaskRGBToB2) // %13 + : "r"(&kSplitRGBShuffle[0]) // %5 : "memory", "cc", "xmm0", "xmm1", "xmm2"); } #endif // HAS_SPLITRGBROW_SSSE3 #ifdef HAS_MERGERGBROW_SSSE3 - -// Shuffle table for converting RGB to Planar. -static const uvec8 kShuffleMaskRToRGB0 = {0u, 128u, 128u, 1u, 128u, 128u, - 2u, 128u, 128u, 3u, 128u, 128u, - 4u, 128u, 128u, 5u}; -static const uvec8 kShuffleMaskGToRGB0 = {128u, 0u, 128u, 128u, 1u, 128u, - 128u, 2u, 128u, 128u, 3u, 128u, - 128u, 4u, 128u, 128u}; -static const uvec8 kShuffleMaskBToRGB0 = {128u, 128u, 0u, 128u, 128u, 1u, - 128u, 128u, 2u, 128u, 128u, 3u, - 128u, 128u, 4u, 128u}; - -static const uvec8 kShuffleMaskGToRGB1 = {5u, 128u, 128u, 6u, 128u, 128u, - 7u, 128u, 128u, 8u, 128u, 128u, - 9u, 128u, 128u, 10u}; -static const uvec8 kShuffleMaskBToRGB1 = {128u, 5u, 128u, 128u, 6u, 128u, - 128u, 7u, 128u, 128u, 8u, 128u, - 128u, 9u, 128u, 128u}; -static const uvec8 kShuffleMaskRToRGB1 = {128u, 128u, 6u, 128u, 128u, 7u, - 128u, 128u, 8u, 128u, 128u, 9u, - 128u, 128u, 10u, 128u}; - -static const uvec8 kShuffleMaskBToRGB2 = {10u, 128u, 128u, 11u, 128u, 128u, - 12u, 128u, 128u, 13u, 128u, 128u, - 14u, 128u, 128u, 15u}; -static const uvec8 kShuffleMaskRToRGB2 = {128u, 11u, 128u, 128u, 12u, 128u, - 128u, 13u, 128u, 128u, 14u, 128u, - 128u, 15u, 128u, 128u}; -static const uvec8 kShuffleMaskGToRGB2 = {128u, 128u, 11u, 128u, 128u, 12u, - 128u, 128u, 13u, 128u, 128u, 14u, - 128u, 128u, 15u, 128u}; +// Shuffle table for converting Planar to RGB. +static const uvec8 kMergeRGBShuffle[9] = { + {0u, 128u, 128u, 1u, 128u, 128u, 2u, 128u, 128u, 3u, 128u, 128u, 4u, 128u, + 128u, 5u}, + {128u, 0u, 128u, 128u, 1u, 128u, 128u, 2u, 128u, 128u, 3u, 128u, 128u, 4u, + 128u, 128u}, + {128u, 128u, 0u, 128u, 128u, 1u, 128u, 128u, 2u, 128u, 128u, 3u, 128u, 128u, + 4u, 128u}, + {128u, 128u, 6u, 128u, 128u, 7u, 128u, 128u, 8u, 128u, 128u, 9u, 128u, 128u, + 10u, 128u}, + {5u, 128u, 128u, 6u, 128u, 128u, 7u, 128u, 128u, 8u, 128u, 128u, 9u, 128u, + 128u, 10u}, + {128u, 5u, 128u, 128u, 6u, 128u, 128u, 7u, 128u, 128u, 8u, 128u, 128u, 9u, + 128u, 128u}, + {128u, 11u, 128u, 128u, 12u, 128u, 128u, 13u, 128u, 128u, 14u, 128u, 128u, + 15u, 128u, 128u}, + {128u, 128u, 11u, 128u, 128u, 12u, 128u, 128u, 13u, 128u, 128u, 14u, 128u, + 128u, 15u, 128u}, + {10u, 128u, 128u, 11u, 128u, 128u, 12u, 128u, 128u, 13u, 128u, 128u, 14u, + 128u, 128u, 15u}}; void MergeRGBRow_SSSE3(const uint8_t* src_r, const uint8_t* src_g, @@ -5166,9 +5628,9 @@ void MergeRGBRow_SSSE3(const uint8_t* src_r, "movdqu (%0),%%xmm0 \n" "movdqu (%1),%%xmm1 \n" "movdqu (%2),%%xmm2 \n" - "pshufb %5, %%xmm0 \n" - "pshufb %6, %%xmm1 \n" - "pshufb %7, %%xmm2 \n" + "pshufb (%5), %%xmm0 \n" + "pshufb 16(%5), %%xmm1 \n" + "pshufb 32(%5), %%xmm2 \n" "por %%xmm1,%%xmm0 \n" "por %%xmm2,%%xmm0 \n" "movdqu %%xmm0,(%3) \n" @@ -5176,9 +5638,9 @@ void MergeRGBRow_SSSE3(const uint8_t* src_r, "movdqu (%0),%%xmm0 \n" "movdqu (%1),%%xmm1 \n" "movdqu (%2),%%xmm2 \n" - "pshufb %8, %%xmm0 \n" - "pshufb %9, %%xmm1 \n" - "pshufb %10, %%xmm2 \n" + "pshufb 48(%5), %%xmm0 \n" + "pshufb 64(%5), %%xmm1 \n" + "pshufb 80(%5), %%xmm2 \n" "por %%xmm1,%%xmm0 \n" "por %%xmm2,%%xmm0 \n" "movdqu %%xmm0,16(%3) \n" @@ -5186,9 +5648,9 @@ void MergeRGBRow_SSSE3(const uint8_t* src_r, "movdqu (%0),%%xmm0 \n" "movdqu (%1),%%xmm1 \n" "movdqu (%2),%%xmm2 \n" - "pshufb %11, %%xmm0 \n" - "pshufb %12, %%xmm1 \n" - "pshufb %13, %%xmm2 \n" + "pshufb 96(%5), %%xmm0 \n" + "pshufb 112(%5), %%xmm1 \n" + "pshufb 128(%5), %%xmm2 \n" "por %%xmm1,%%xmm0 \n" "por %%xmm2,%%xmm0 \n" "movdqu %%xmm0,32(%3) \n" @@ -5204,15 +5666,7 @@ void MergeRGBRow_SSSE3(const uint8_t* src_r, "+r"(src_b), // %2 "+r"(dst_rgb), // %3 "+r"(width) // %4 - : "m"(kShuffleMaskRToRGB0), // %5 - "m"(kShuffleMaskGToRGB0), // %6 - "m"(kShuffleMaskBToRGB0), // %7 - "m"(kShuffleMaskRToRGB1), // %8 - "m"(kShuffleMaskGToRGB1), // %9 - "m"(kShuffleMaskBToRGB1), // %10 - "m"(kShuffleMaskRToRGB2), // %11 - "m"(kShuffleMaskGToRGB2), // %12 - "m"(kShuffleMaskBToRGB2) // %13 + : "r"(&kMergeRGBShuffle[0]) // %5 : "memory", "cc", "xmm0", "xmm1", "xmm2"); } #endif // HAS_MERGERGBROW_SSSE3 @@ -6045,6 +6499,7 @@ void CopyRow_AVX(const uint8_t* src, uint8_t* dst, int width) { "lea 0x40(%1),%1 \n" "sub $0x40,%2 \n" "jg 1b \n" + "vzeroupper \n" : "+r"(src), // %0 "+r"(dst), // %1 "+r"(width) // %2 @@ -6328,6 +6783,33 @@ void YUY2ToYRow_SSE2(const uint8_t* src_yuy2, uint8_t* dst_y, int width) { : "memory", "cc", "xmm0", "xmm1", "xmm5"); } +void YUY2ToNVUVRow_SSE2(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width) { + asm volatile(LABELALIGN + "1: \n" + "movdqu (%0),%%xmm0 \n" + "movdqu 0x10(%0),%%xmm1 \n" + "movdqu 0x00(%0,%3,1),%%xmm2 \n" + "movdqu 0x10(%0,%3,1),%%xmm3 \n" + "lea 0x20(%0),%0 \n" + "pavgb %%xmm2,%%xmm0 \n" + "pavgb %%xmm3,%%xmm1 \n" + "psrlw $0x8,%%xmm0 \n" + "psrlw $0x8,%%xmm1 \n" + "packuswb %%xmm1,%%xmm0 \n" + "movdqu %%xmm0,(%1) \n" + "lea 0x10(%1),%1 \n" + "sub $0x10,%2 \n" + "jg 1b \n" + : "+r"(src_yuy2), // %0 + "+r"(dst_uv), // %1 + "+r"(width) // %2 + : "r"((intptr_t)(stride_yuy2)) // %3 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3"); +} + void YUY2ToUVRow_SSE2(const uint8_t* src_yuy2, int stride_yuy2, uint8_t* dst_u, @@ -6528,6 +7010,35 @@ void YUY2ToYRow_AVX2(const uint8_t* src_yuy2, uint8_t* dst_y, int width) { : "memory", "cc", "xmm0", "xmm1", "xmm5"); } +void YUY2ToNVUVRow_AVX2(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width) { + asm volatile( + + LABELALIGN + "1: \n" + "vmovdqu (%0),%%ymm0 \n" + "vmovdqu 0x20(%0),%%ymm1 \n" + "vpavgb 0x00(%0,%3,1),%%ymm0,%%ymm0 \n" + "vpavgb 0x20(%0,%3,1),%%ymm1,%%ymm1 \n" + "lea 0x40(%0),%0 \n" + "vpsrlw $0x8,%%ymm0,%%ymm0 \n" + "vpsrlw $0x8,%%ymm1,%%ymm1 \n" + "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vmovdqu %%ymm0,(%1) \n" + "lea 0x20(%1),%1 \n" + "sub $0x20,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_yuy2), // %0 + "+r"(dst_uv), // %1 + "+r"(width) // %2 + : "r"((intptr_t)(stride_yuy2)) // %3 + : "memory", "cc", "xmm0", "xmm1"); +} + void YUY2ToUVRow_AVX2(const uint8_t* src_yuy2, int stride_yuy2, uint8_t* dst_u, @@ -8121,7 +8632,7 @@ void ARGBAffineRow_SSE2(const uint8_t* src_argb, void InterpolateRow_SSSE3(uint8_t* dst_ptr, const uint8_t* src_ptr, ptrdiff_t src_stride, - int dst_width, + int width, int source_y_fraction) { asm volatile( "sub %1,%0 \n" @@ -8190,7 +8701,7 @@ void InterpolateRow_SSSE3(uint8_t* dst_ptr, "99: \n" : "+r"(dst_ptr), // %0 "+r"(src_ptr), // %1 - "+rm"(dst_width), // %2 + "+rm"(width), // %2 "+r"(source_y_fraction) // %3 : "r"((intptr_t)(src_stride)) // %4 : "memory", "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"); @@ -8202,12 +8713,12 @@ void InterpolateRow_SSSE3(uint8_t* dst_ptr, void InterpolateRow_AVX2(uint8_t* dst_ptr, const uint8_t* src_ptr, ptrdiff_t src_stride, - int dst_width, + int width, int source_y_fraction) { asm volatile( + "sub %1,%0 \n" "cmp $0x0,%3 \n" "je 100f \n" - "sub %1,%0 \n" "cmp $0x80,%3 \n" "je 50f \n" @@ -8258,15 +8769,17 @@ void InterpolateRow_AVX2(uint8_t* dst_ptr, // Blend 100 / 0 - Copy row unchanged. LABELALIGN "100: \n" - "rep movsb \n" - "jmp 999f \n" + "vmovdqu (%1),%%ymm0 \n" + "vmovdqu %%ymm0,0x00(%1,%0,1) \n" + "lea 0x20(%1),%1 \n" + "sub $0x20,%2 \n" + "jg 100b \n" "99: \n" "vzeroupper \n" - "999: \n" - : "+D"(dst_ptr), // %0 - "+S"(src_ptr), // %1 - "+cm"(dst_width), // %2 + : "+r"(dst_ptr), // %0 + "+r"(src_ptr), // %1 + "+r"(width), // %2 "+r"(source_y_fraction) // %3 : "r"((intptr_t)(src_stride)) // %4 : "memory", "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm4", "xmm5"); @@ -8891,127 +9404,134 @@ void ARGBLumaColorTableRow_SSSE3(const uint8_t* src_argb, } #endif // HAS_ARGBLUMACOLORTABLEROW_SSSE3 -#ifdef HAS_NV21TOYUV24ROW_AVX2 +static const uvec8 kYUV24Shuffle[3] = { + {8, 9, 0, 8, 9, 1, 10, 11, 2, 10, 11, 3, 12, 13, 4, 12}, + {9, 1, 10, 11, 2, 10, 11, 3, 12, 13, 4, 12, 13, 5, 14, 15}, + {2, 10, 11, 3, 12, 13, 4, 12, 13, 5, 14, 15, 6, 14, 15, 7}}; -// begin NV21ToYUV24Row_C avx2 constants -static const ulvec8 kBLEND0 = {0x80, 0x00, 0x80, 0x80, 0x00, 0x80, 0x80, 0x00, - 0x80, 0x80, 0x00, 0x80, 0x80, 0x00, 0x80, 0x80, - 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, - 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, 0x00}; +// Convert biplanar NV21 to packed YUV24 +// NV21 has VU in memory for chroma. +// YUV24 is VUY in memory +void NV21ToYUV24Row_SSSE3(const uint8_t* src_y, + const uint8_t* src_vu, + uint8_t* dst_yuv24, + int width) { + asm volatile( + "sub %0,%1 \n" + "movdqa (%4),%%xmm4 \n" // 3 shuffler constants + "movdqa 16(%4),%%xmm5 \n" + "movdqa 32(%4),%%xmm6 \n" + "1: \n" + "movdqu (%0),%%xmm2 \n" // load 16 Y values + "movdqu (%0,%1),%%xmm3 \n" // load 8 VU values + "lea 16(%0),%0 \n" + "movdqa %%xmm2,%%xmm0 \n" + "movdqa %%xmm2,%%xmm1 \n" + "shufps $0x44,%%xmm3,%%xmm0 \n" // Y 0..7, UV 0..3 + "shufps $0x99,%%xmm3,%%xmm1 \n" // Y 4..11, UV 2..5 + "shufps $0xee,%%xmm3,%%xmm2 \n" // Y 8..15, UV 4..7 + "pshufb %%xmm4, %%xmm0 \n" // weave into YUV24 + "pshufb %%xmm5, %%xmm1 \n" + "pshufb %%xmm6, %%xmm2 \n" + "movdqu %%xmm0,(%2) \n" + "movdqu %%xmm1,16(%2) \n" + "movdqu %%xmm2,32(%2) \n" + "lea 48(%2),%2 \n" + "sub $16,%3 \n" // 16 pixels per loop + "jg 1b \n" + : "+r"(src_y), // %0 + "+r"(src_vu), // %1 + "+r"(dst_yuv24), // %2 + "+r"(width) // %3 + : "r"(&kYUV24Shuffle[0]) // %4 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"); +} -static const ulvec8 kBLEND1 = {0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, - 0x80, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, - 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80}; - -static const ulvec8 kBLEND2 = {0x80, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, - 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, - 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0x00}; - -static const ulvec8 kSHUF0 = {0x00, 0x0b, 0x80, 0x01, 0x0c, 0x80, 0x02, 0x0d, - 0x80, 0x03, 0x0e, 0x80, 0x04, 0x0f, 0x80, 0x05, - 0x00, 0x0b, 0x80, 0x01, 0x0c, 0x80, 0x02, 0x0d, - 0x80, 0x03, 0x0e, 0x80, 0x04, 0x0f, 0x80, 0x05}; - -static const ulvec8 kSHUF1 = {0x80, 0x00, 0x0b, 0x80, 0x01, 0x0c, 0x80, 0x02, - 0x0d, 0x80, 0x03, 0x0e, 0x80, 0x04, 0x0f, 0x80, - 0x80, 0x00, 0x0b, 0x80, 0x01, 0x0c, 0x80, 0x02, - 0x0d, 0x80, 0x03, 0x0e, 0x80, 0x04, 0x0f, 0x80}; - -static const ulvec8 kSHUF2 = {0x0a, 0x80, 0x00, 0x0b, 0x80, 0x01, 0x0c, 0x80, - 0x02, 0x0d, 0x80, 0x03, 0x0e, 0x80, 0x04, 0x0f, - 0x0a, 0x80, 0x00, 0x0b, 0x80, 0x01, 0x0c, 0x80, - 0x02, 0x0d, 0x80, 0x03, 0x0e, 0x80, 0x04, 0x0f}; - -static const ulvec8 kSHUF3 = {0x80, 0x80, 0x06, 0x80, 0x80, 0x07, 0x80, 0x80, - 0x08, 0x80, 0x80, 0x09, 0x80, 0x80, 0x0a, 0x80, - 0x80, 0x80, 0x06, 0x80, 0x80, 0x07, 0x80, 0x80, - 0x08, 0x80, 0x80, 0x09, 0x80, 0x80, 0x0a, 0x80}; - -static const ulvec8 kSHUF4 = {0x05, 0x80, 0x80, 0x06, 0x80, 0x80, 0x07, 0x80, - 0x80, 0x08, 0x80, 0x80, 0x09, 0x80, 0x80, 0x0a, - 0x05, 0x80, 0x80, 0x06, 0x80, 0x80, 0x07, 0x80, - 0x80, 0x08, 0x80, 0x80, 0x09, 0x80, 0x80, 0x0a}; - -static const ulvec8 kSHUF5 = {0x80, 0x05, 0x80, 0x80, 0x06, 0x80, 0x80, 0x07, - 0x80, 0x80, 0x08, 0x80, 0x80, 0x09, 0x80, 0x80, - 0x80, 0x05, 0x80, 0x80, 0x06, 0x80, 0x80, 0x07, - 0x80, 0x80, 0x08, 0x80, 0x80, 0x09, 0x80, 0x80}; - -// NV21ToYUV24Row_AVX2 +// Convert biplanar NV21 to packed YUV24 +// NV21 has VU in memory for chroma. +// YUV24 is VUY in memory void NV21ToYUV24Row_AVX2(const uint8_t* src_y, const uint8_t* src_vu, uint8_t* dst_yuv24, int width) { - uint8_t* src_y_ptr; - uint64_t src_offset = 0; - uint64_t width64; - - width64 = width; - src_y_ptr = (uint8_t*)src_y; - asm volatile( - "vmovdqu %5, %%ymm0 \n" // init blend value - "vmovdqu %6, %%ymm1 \n" // init blend value - "vmovdqu %7, %%ymm2 \n" // init blend value - // "sub $0x20, %3 \n" //sub 32 from - // width for final loop + "sub %0,%1 \n" + "vbroadcastf128 (%4),%%ymm4 \n" // 3 shuffler constants + "vbroadcastf128 16(%4),%%ymm5 \n" + "vbroadcastf128 32(%4),%%ymm6 \n" - LABELALIGN - "1: \n" // label 1 - "vmovdqu (%0,%4), %%ymm3 \n" // src_y - "vmovdqu 1(%1,%4), %%ymm4 \n" // src_uv+1 - "vmovdqu (%1), %%ymm5 \n" // src_uv - "vpshufb %8, %%ymm3, %%ymm13 \n" // y, kSHUF0 for shuf - "vpshufb %9, %%ymm4, %%ymm14 \n" // uv+1, kSHUF1 for - // shuf - "vpshufb %10, %%ymm5, %%ymm15 \n" // uv, kSHUF2 for - // shuf - "vpshufb %11, %%ymm3, %%ymm3 \n" // y kSHUF3 for shuf - "vpshufb %12, %%ymm4, %%ymm4 \n" // uv+1 kSHUF4 for - // shuf - "vpblendvb %%ymm0, %%ymm14, %%ymm13, %%ymm12 \n" // blend 0 - "vpblendvb %%ymm0, %%ymm13, %%ymm14, %%ymm14 \n" // blend 0 - "vpblendvb %%ymm2, %%ymm15, %%ymm12, %%ymm12 \n" // blend 2 - "vpblendvb %%ymm1, %%ymm15, %%ymm14, %%ymm13 \n" // blend 1 - "vpshufb %13, %%ymm5, %%ymm15 \n" // shuffle const - "vpor %%ymm4, %%ymm3, %%ymm5 \n" // get results - "vmovdqu %%ymm12, 0x20(%2) \n" // store dst_yuv+20h - "vpor %%ymm15, %%ymm5, %%ymm3 \n" // get results - "add $0x20, %4 \n" // add to src buffer - // ptr - "vinserti128 $0x1, %%xmm3, %%ymm13, %%ymm4 \n" // insert - "vperm2i128 $0x31, %%ymm13, %%ymm3, %%ymm5 \n" // insert - "vmovdqu %%ymm4, (%2) \n" // store dst_yuv - "vmovdqu %%ymm5, 0x40(%2) \n" // store dst_yuv+40h - "add $0x60,%2 \n" // add to dst buffer - // ptr - // "cmp %3, %4 \n" //(width64 - - // 32 bytes) and src_offset - "sub $0x20,%3 \n" // 32 pixels per loop + "1: \n" + "vmovdqu (%0),%%ymm2 \n" // load 32 Y values + "vmovdqu (%0,%1),%%ymm3 \n" // load 16 VU values + "lea 32(%0),%0 \n" + "vshufps $0x44,%%ymm3,%%ymm2,%%ymm0 \n" // Y 0..7, UV 0..3 + "vshufps $0x99,%%ymm3,%%ymm2,%%ymm1 \n" // Y 4..11, UV 2..5 + "vshufps $0xee,%%ymm3,%%ymm2,%%ymm2 \n" // Y 8..15, UV 4..7 + "vpshufb %%ymm4,%%ymm0,%%ymm0 \n" // weave into YUV24 + "vpshufb %%ymm5,%%ymm1,%%ymm1 \n" + "vpshufb %%ymm6,%%ymm2,%%ymm2 \n" + "vperm2i128 $0x20,%%ymm1,%%ymm0,%%ymm3 \n" + "vperm2i128 $0x30,%%ymm0,%%ymm2,%%ymm0 \n" + "vperm2i128 $0x31,%%ymm2,%%ymm1,%%ymm1 \n" + "vmovdqu %%ymm3,(%2) \n" + "vmovdqu %%ymm0,32(%2) \n" + "vmovdqu %%ymm1,64(%2) \n" + "lea 96(%2),%2 \n" + "sub $32,%3 \n" // 32 pixels per loop "jg 1b \n" - "vzeroupper \n" // sse-avx2 - // transistions - - : "+r"(src_y), //%0 - "+r"(src_vu), //%1 - "+r"(dst_yuv24), //%2 - "+r"(width64), //%3 - "+r"(src_offset) //%4 - : "m"(kBLEND0), //%5 - "m"(kBLEND1), //%6 - "m"(kBLEND2), //%7 - "m"(kSHUF0), //%8 - "m"(kSHUF1), //%9 - "m"(kSHUF2), //%10 - "m"(kSHUF3), //%11 - "m"(kSHUF4), //%12 - "m"(kSHUF5) //%13 - : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm12", - "xmm13", "xmm14", "xmm15"); + "vzeroupper \n" + : "+r"(src_y), // %0 + "+r"(src_vu), // %1 + "+r"(dst_yuv24), // %2 + "+r"(width) // %3 + : "r"(&kYUV24Shuffle[0]) // %4 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"); } -#endif // HAS_NV21TOYUV24ROW_AVX2 + +#ifdef HAS_NV21ToYUV24ROW_AVX512 +// The following VMBI VEX256 code tests okay with the intelsde emulator. +static const lvec8 kYUV24Perm[3] = { + {32, 33, 0, 32, 33, 1, 34, 35, 2, 34, 35, 3, 36, 37, 4, 36, + 37, 5, 38, 39, 6, 38, 39, 7, 40, 41, 8, 40, 41, 9, 42, 43}, + {10, 42, 43, 11, 44, 45, 12, 44, 45, 13, 46, 47, 14, 46, 47, 15, + 48, 49, 16, 48, 49, 17, 50, 51, 18, 50, 51, 19, 52, 53, 20, 52}, + {53, 21, 54, 55, 22, 54, 55, 23, 56, 57, 24, 56, 57, 25, 58, 59, + 26, 58, 59, 27, 60, 61, 28, 60, 61, 29, 62, 63, 30, 62, 63, 31}}; + +void NV21ToYUV24Row_AVX512(const uint8_t* src_y, + const uint8_t* src_vu, + uint8_t* dst_yuv24, + int width) { + asm volatile( + "sub %0,%1 \n" + "vmovdqa (%4),%%ymm4 \n" // 3 shuffler constants + "vmovdqa 32(%4),%%ymm5 \n" + "vmovdqa 64(%4),%%ymm6 \n" LABELALIGN + "1: \n" + "vmovdqu (%0),%%ymm2 \n" // load 32 Y values + "vmovdqu (%0,%1),%%ymm3 \n" // load 16 VU values + "lea 32(%0),%0 \n" + "vmovdqa %%ymm2, %%ymm0 \n" + "vmovdqa %%ymm2, %%ymm1 \n" + "vpermt2b %%ymm3,%%ymm4,%%ymm0 \n" + "vpermt2b %%ymm3,%%ymm5,%%ymm1 \n" + "vpermt2b %%ymm3,%%ymm6,%%ymm2 \n" + "vmovdqu %%ymm0,(%2) \n" + "vmovdqu %%ymm1,32(%2) \n" + "vmovdqu %%ymm2,64(%2) \n" + "lea 96(%2),%2 \n" + "sub $32,%3 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_y), // %0 + "+r"(src_vu), // %1 + "+r"(dst_yuv24), // %2 + "+r"(width) // %3 + : "r"(&kYUV24Perm[0]) // %4 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"); +} + +#endif // HAS_NV21ToYUV24ROW_AVX512 #ifdef HAS_SWAPUVROW_SSSE3 diff --git a/third-party/libyuv/third_party/libyuv/source/row_lasx.cc b/third-party/libyuv/third_party/libyuv/source/row_lasx.cc new file mode 100644 index 0000000000..7dd18f40e0 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/source/row_lasx.cc @@ -0,0 +1,2230 @@ +/* + * Copyright 2022 The LibYuv Project Authors. All rights reserved. + * + * Copyright (c) 2022 Loongson Technology Corporation Limited + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "libyuv/row.h" + +#if !defined(LIBYUV_DISABLE_LASX) && defined(__loongarch_asx) +#include "libyuv/loongson_intrinsics.h" + +#ifdef __cplusplus +namespace libyuv { +extern "C" { +#endif + +#define ALPHA_VAL (-1) + +// Fill YUV -> RGB conversion constants into vectors +#define YUVTORGB_SETUP(yuvconst, ub, vr, ug, vg, yg, yb) \ + { \ + ub = __lasx_xvreplgr2vr_h(yuvconst->kUVToB[0]); \ + vr = __lasx_xvreplgr2vr_h(yuvconst->kUVToR[1]); \ + ug = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[0]); \ + vg = __lasx_xvreplgr2vr_h(yuvconst->kUVToG[1]); \ + yg = __lasx_xvreplgr2vr_h(yuvconst->kYToRgb[0]); \ + yb = __lasx_xvreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \ + } + +// Load 32 YUV422 pixel data +#define READYUV422_D(psrc_y, psrc_u, psrc_v, out_y, uv_l, uv_h) \ + { \ + __m256i temp0, temp1; \ + \ + DUP2_ARG2(__lasx_xvld, psrc_y, 0, psrc_u, 0, out_y, temp0); \ + temp1 = __lasx_xvld(psrc_v, 0); \ + temp0 = __lasx_xvsub_b(temp0, const_0x80); \ + temp1 = __lasx_xvsub_b(temp1, const_0x80); \ + temp0 = __lasx_vext2xv_h_b(temp0); \ + temp1 = __lasx_vext2xv_h_b(temp1); \ + uv_l = __lasx_xvilvl_h(temp0, temp1); \ + uv_h = __lasx_xvilvh_h(temp0, temp1); \ + } + +// Load 16 YUV422 pixel data +#define READYUV422(psrc_y, psrc_u, psrc_v, out_y, uv) \ + { \ + __m256i temp0, temp1; \ + \ + out_y = __lasx_xvld(psrc_y, 0); \ + temp0 = __lasx_xvldrepl_d(psrc_u, 0); \ + temp1 = __lasx_xvldrepl_d(psrc_v, 0); \ + uv = __lasx_xvilvl_b(temp0, temp1); \ + uv = __lasx_xvsub_b(uv, const_0x80); \ + uv = __lasx_vext2xv_h_b(uv); \ + } + +// Convert 16 pixels of YUV420 to RGB. +#define YUVTORGB_D(in_y, in_uvl, in_uvh, ubvr, ugvg, yg, yb, b_l, b_h, g_l, \ + g_h, r_l, r_h) \ + { \ + __m256i u_l, u_h, v_l, v_h; \ + __m256i yl_ev, yl_od, yh_ev, yh_od; \ + __m256i temp0, temp1, temp2, temp3; \ + \ + temp0 = __lasx_xvilvl_b(in_y, in_y); \ + temp1 = __lasx_xvilvh_b(in_y, in_y); \ + yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \ + yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \ + yh_ev = __lasx_xvmulwev_w_hu_h(temp1, yg); \ + yh_od = __lasx_xvmulwod_w_hu_h(temp1, yg); \ + DUP4_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yh_ev, 16, yh_od, 16, \ + yl_ev, yl_od, yh_ev, yh_od); \ + yl_ev = __lasx_xvadd_w(yl_ev, yb); \ + yl_od = __lasx_xvadd_w(yl_od, yb); \ + yh_ev = __lasx_xvadd_w(yh_ev, yb); \ + yh_od = __lasx_xvadd_w(yh_od, yb); \ + v_l = __lasx_xvmulwev_w_h(in_uvl, ubvr); \ + u_l = __lasx_xvmulwod_w_h(in_uvl, ubvr); \ + v_h = __lasx_xvmulwev_w_h(in_uvh, ubvr); \ + u_h = __lasx_xvmulwod_w_h(in_uvh, ubvr); \ + temp0 = __lasx_xvadd_w(yl_ev, u_l); \ + temp1 = __lasx_xvadd_w(yl_od, u_l); \ + temp2 = __lasx_xvadd_w(yh_ev, u_h); \ + temp3 = __lasx_xvadd_w(yh_od, u_h); \ + DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \ + temp1, temp2, temp3); \ + DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \ + temp2, temp3); \ + b_l = __lasx_xvpackev_h(temp1, temp0); \ + b_h = __lasx_xvpackev_h(temp3, temp2); \ + temp0 = __lasx_xvadd_w(yl_ev, v_l); \ + temp1 = __lasx_xvadd_w(yl_od, v_l); \ + temp2 = __lasx_xvadd_w(yh_ev, v_h); \ + temp3 = __lasx_xvadd_w(yh_od, v_h); \ + DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \ + temp1, temp2, temp3); \ + DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \ + temp2, temp3); \ + r_l = __lasx_xvpackev_h(temp1, temp0); \ + r_h = __lasx_xvpackev_h(temp3, temp2); \ + DUP2_ARG2(__lasx_xvdp2_w_h, in_uvl, ugvg, in_uvh, ugvg, u_l, u_h); \ + temp0 = __lasx_xvsub_w(yl_ev, u_l); \ + temp1 = __lasx_xvsub_w(yl_od, u_l); \ + temp2 = __lasx_xvsub_w(yh_ev, u_h); \ + temp3 = __lasx_xvsub_w(yh_od, u_h); \ + DUP4_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp2, 6, temp3, 6, temp0, \ + temp1, temp2, temp3); \ + DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3, temp0, temp1, \ + temp2, temp3); \ + g_l = __lasx_xvpackev_h(temp1, temp0); \ + g_h = __lasx_xvpackev_h(temp3, temp2); \ + } + +// Convert 8 pixels of YUV420 to RGB. +#define YUVTORGB(in_y, in_uv, ubvr, ugvg, yg, yb, out_b, out_g, out_r) \ + { \ + __m256i u_l, v_l, yl_ev, yl_od; \ + __m256i temp0, temp1; \ + \ + in_y = __lasx_xvpermi_d(in_y, 0xD8); \ + temp0 = __lasx_xvilvl_b(in_y, in_y); \ + yl_ev = __lasx_xvmulwev_w_hu_h(temp0, yg); \ + yl_od = __lasx_xvmulwod_w_hu_h(temp0, yg); \ + DUP2_ARG2(__lasx_xvsrai_w, yl_ev, 16, yl_od, 16, yl_ev, yl_od); \ + yl_ev = __lasx_xvadd_w(yl_ev, yb); \ + yl_od = __lasx_xvadd_w(yl_od, yb); \ + v_l = __lasx_xvmulwev_w_h(in_uv, ubvr); \ + u_l = __lasx_xvmulwod_w_h(in_uv, ubvr); \ + temp0 = __lasx_xvadd_w(yl_ev, u_l); \ + temp1 = __lasx_xvadd_w(yl_od, u_l); \ + DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \ + DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \ + out_b = __lasx_xvpackev_h(temp1, temp0); \ + temp0 = __lasx_xvadd_w(yl_ev, v_l); \ + temp1 = __lasx_xvadd_w(yl_od, v_l); \ + DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \ + DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \ + out_r = __lasx_xvpackev_h(temp1, temp0); \ + u_l = __lasx_xvdp2_w_h(in_uv, ugvg); \ + temp0 = __lasx_xvsub_w(yl_ev, u_l); \ + temp1 = __lasx_xvsub_w(yl_od, u_l); \ + DUP2_ARG2(__lasx_xvsrai_w, temp0, 6, temp1, 6, temp0, temp1); \ + DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1); \ + out_g = __lasx_xvpackev_h(temp1, temp0); \ + } + +// Pack and Store 16 ARGB values. +#define STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, b_l, b_h, pdst_argb) \ + { \ + __m256i temp0, temp1, temp2, temp3; \ + \ + temp0 = __lasx_xvpackev_b(g_l, b_l); \ + temp1 = __lasx_xvpackev_b(a_l, r_l); \ + temp2 = __lasx_xvpackev_b(g_h, b_h); \ + temp3 = __lasx_xvpackev_b(a_h, r_h); \ + r_l = __lasx_xvilvl_h(temp1, temp0); \ + r_h = __lasx_xvilvh_h(temp1, temp0); \ + g_l = __lasx_xvilvl_h(temp3, temp2); \ + g_h = __lasx_xvilvh_h(temp3, temp2); \ + temp0 = __lasx_xvpermi_q(r_h, r_l, 0x20); \ + temp1 = __lasx_xvpermi_q(g_h, g_l, 0x20); \ + temp2 = __lasx_xvpermi_q(r_h, r_l, 0x31); \ + temp3 = __lasx_xvpermi_q(g_h, g_l, 0x31); \ + __lasx_xvst(temp0, pdst_argb, 0); \ + __lasx_xvst(temp1, pdst_argb, 32); \ + __lasx_xvst(temp2, pdst_argb, 64); \ + __lasx_xvst(temp3, pdst_argb, 96); \ + pdst_argb += 128; \ + } + +// Pack and Store 8 ARGB values. +#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \ + { \ + __m256i temp0, temp1, temp2, temp3; \ + \ + temp0 = __lasx_xvpackev_b(in_g, in_b); \ + temp1 = __lasx_xvpackev_b(in_a, in_r); \ + temp2 = __lasx_xvilvl_h(temp1, temp0); \ + temp3 = __lasx_xvilvh_h(temp1, temp0); \ + temp0 = __lasx_xvpermi_q(temp3, temp2, 0x20); \ + temp1 = __lasx_xvpermi_q(temp3, temp2, 0x31); \ + __lasx_xvst(temp0, pdst_argb, 0); \ + __lasx_xvst(temp1, pdst_argb, 32); \ + pdst_argb += 64; \ + } + +#define RGBTOUV(_tmpb, _tmpg, _tmpr, _nexb, _nexg, _nexr, _reg0, _reg1) \ + { \ + __m256i _tmp0, _tmp1, _tmp2, _tmp3; \ + _tmp0 = __lasx_xvaddwev_h_bu(_tmpb, _nexb); \ + _tmp1 = __lasx_xvaddwod_h_bu(_tmpb, _nexb); \ + _tmp2 = __lasx_xvaddwev_h_bu(_tmpg, _nexg); \ + _tmp3 = __lasx_xvaddwod_h_bu(_tmpg, _nexg); \ + _reg0 = __lasx_xvaddwev_h_bu(_tmpr, _nexr); \ + _reg1 = __lasx_xvaddwod_h_bu(_tmpr, _nexr); \ + _tmpb = __lasx_xvavgr_hu(_tmp0, _tmp1); \ + _tmpg = __lasx_xvavgr_hu(_tmp2, _tmp3); \ + _tmpr = __lasx_xvavgr_hu(_reg0, _reg1); \ + _reg0 = __lasx_xvmadd_h(const_8080, const_112, _tmpb); \ + _reg1 = __lasx_xvmadd_h(const_8080, const_112, _tmpr); \ + _reg0 = __lasx_xvmsub_h(_reg0, const_74, _tmpg); \ + _reg1 = __lasx_xvmsub_h(_reg1, const_94, _tmpg); \ + _reg0 = __lasx_xvmsub_h(_reg0, const_38, _tmpr); \ + _reg1 = __lasx_xvmsub_h(_reg1, const_18, _tmpb); \ + } + +void MirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width) { + int x; + int len = width / 64; + __m256i src0, src1; + __m256i shuffler = {0x08090A0B0C0D0E0F, 0x0001020304050607, + 0x08090A0B0C0D0E0F, 0x0001020304050607}; + src += width - 64; + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1); + DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler, src1, src1, shuffler, src0, + src1); + src0 = __lasx_xvpermi_q(src0, src0, 0x01); + src1 = __lasx_xvpermi_q(src1, src1, 0x01); + __lasx_xvst(src1, dst, 0); + __lasx_xvst(src0, dst, 32); + dst += 64; + src -= 64; + } +} + +void MirrorUVRow_LASX(const uint8_t* src_uv, uint8_t* dst_uv, int width) { + int x; + int len = width / 16; + __m256i src, dst; + __m256i shuffler = {0x0004000500060007, 0x0000000100020003, + 0x0004000500060007, 0x0000000100020003}; + + src_uv += (width - 16) << 1; + for (x = 0; x < len; x++) { + src = __lasx_xvld(src_uv, 0); + dst = __lasx_xvshuf_h(shuffler, src, src); + dst = __lasx_xvpermi_q(dst, dst, 0x01); + __lasx_xvst(dst, dst_uv, 0); + src_uv -= 32; + dst_uv += 32; + } +} + +void ARGBMirrorRow_LASX(const uint8_t* src, uint8_t* dst, int width) { + int x; + int len = width / 16; + __m256i src0, src1; + __m256i dst0, dst1; + __m256i shuffler = {0x0B0A09080F0E0D0C, 0x0302010007060504, + 0x0B0A09080F0E0D0C, 0x0302010007060504}; + src += (width * 4) - 64; + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src, 0, src, 32, src0, src1); + DUP2_ARG3(__lasx_xvshuf_b, src0, src0, shuffler, src1, src1, shuffler, src0, + src1); + dst1 = __lasx_xvpermi_q(src0, src0, 0x01); + dst0 = __lasx_xvpermi_q(src1, src1, 0x01); + __lasx_xvst(dst0, dst, 0); + __lasx_xvst(dst1, dst, 32); + dst += 64; + src -= 64; + } +} + +void I422ToYUY2Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_yuy2, + int width) { + int x; + int len = width / 32; + __m256i src_u0, src_v0, src_y0, vec_uv0; + __m256i vec_yuy2_0, vec_yuy2_1; + __m256i dst_yuy2_0, dst_yuy2_1; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0); + src_y0 = __lasx_xvld(src_y, 0); + src_u0 = __lasx_xvpermi_d(src_u0, 0xD8); + src_v0 = __lasx_xvpermi_d(src_v0, 0xD8); + vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0); + vec_yuy2_0 = __lasx_xvilvl_b(vec_uv0, src_y0); + vec_yuy2_1 = __lasx_xvilvh_b(vec_uv0, src_y0); + dst_yuy2_0 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x20); + dst_yuy2_1 = __lasx_xvpermi_q(vec_yuy2_1, vec_yuy2_0, 0x31); + __lasx_xvst(dst_yuy2_0, dst_yuy2, 0); + __lasx_xvst(dst_yuy2_1, dst_yuy2, 32); + src_u += 16; + src_v += 16; + src_y += 32; + dst_yuy2 += 64; + } +} + +void I422ToUYVYRow_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_uyvy, + int width) { + int x; + int len = width / 32; + __m256i src_u0, src_v0, src_y0, vec_uv0; + __m256i vec_uyvy0, vec_uyvy1; + __m256i dst_uyvy0, dst_uyvy1; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_u, 0, src_v, 0, src_u0, src_v0); + src_y0 = __lasx_xvld(src_y, 0); + src_u0 = __lasx_xvpermi_d(src_u0, 0xD8); + src_v0 = __lasx_xvpermi_d(src_v0, 0xD8); + vec_uv0 = __lasx_xvilvl_b(src_v0, src_u0); + vec_uyvy0 = __lasx_xvilvl_b(src_y0, vec_uv0); + vec_uyvy1 = __lasx_xvilvh_b(src_y0, vec_uv0); + dst_uyvy0 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x20); + dst_uyvy1 = __lasx_xvpermi_q(vec_uyvy1, vec_uyvy0, 0x31); + __lasx_xvst(dst_uyvy0, dst_uyvy, 0); + __lasx_xvst(dst_uyvy1, dst_uyvy, 32); + src_u += 16; + src_v += 16; + src_y += 32; + dst_uyvy += 64; + } +} + +void I422ToARGBRow_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 32; + __m256i vec_yb, vec_yg, vec_ub, vec_ug, vec_vr, vec_vg; + __m256i vec_ubvr, vec_ugvg; + __m256i alpha = __lasx_xvldi(0xFF); + __m256i const_0x80 = __lasx_xvldi(0x80); + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr); + vec_ugvg = __lasx_xvilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + __m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h; + + READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h); + YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l, + g_h, r_l, r_h); + STOREARGB_D(alpha, alpha, r_l, r_h, g_l, g_h, b_l, b_h, dst_argb); + src_y += 32; + src_u += 16; + src_v += 16; + } +} + +void I422ToRGBARow_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 32; + __m256i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_ubvr, vec_ugvg; + __m256i alpha = __lasx_xvldi(0xFF); + __m256i const_0x80 = __lasx_xvldi(0x80); + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr); + vec_ugvg = __lasx_xvilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + __m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h; + + READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h); + YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l, + g_h, r_l, r_h); + STOREARGB_D(r_l, r_h, g_l, g_h, b_l, b_h, alpha, alpha, dst_argb); + src_y += 32; + src_u += 16; + src_v += 16; + } +} + +void I422AlphaToARGBRow_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + const uint8_t* src_a, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 32; + int res = width & 31; + __m256i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_ubvr, vec_ugvg; + __m256i zero = __lasx_xvldi(0); + __m256i const_0x80 = __lasx_xvldi(0x80); + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr); + vec_ugvg = __lasx_xvilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + __m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h, a_l, a_h; + + y = __lasx_xvld(src_a, 0); + a_l = __lasx_xvilvl_b(zero, y); + a_h = __lasx_xvilvh_b(zero, y); + READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h); + YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l, + g_h, r_l, r_h); + STOREARGB_D(a_l, a_h, r_l, r_h, g_l, g_h, b_l, b_h, dst_argb); + src_y += 32; + src_u += 16; + src_v += 16; + src_a += 32; + } + if (res) { + __m256i y, uv, r, g, b, a; + a = __lasx_xvld(src_a, 0); + a = __lasx_vext2xv_hu_bu(a); + READYUV422(src_y, src_u, src_v, y, uv); + YUVTORGB(y, uv, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b, g, r); + STOREARGB(a, r, g, b, dst_argb); + } +} + +void I422ToRGB24Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int32_t width) { + int x; + int len = width / 32; + __m256i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_ubvr, vec_ugvg; + __m256i const_0x80 = __lasx_xvldi(0x80); + __m256i shuffler0 = {0x0504120302100100, 0x0A18090816070614, + 0x0504120302100100, 0x0A18090816070614}; + __m256i shuffler1 = {0x1E0F0E1C0D0C1A0B, 0x1E0F0E1C0D0C1A0B, + 0x1E0F0E1C0D0C1A0B, 0x1E0F0E1C0D0C1A0B}; + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr); + vec_ugvg = __lasx_xvilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + __m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h; + __m256i temp0, temp1, temp2, temp3; + + READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h); + YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l, + g_h, r_l, r_h); + temp0 = __lasx_xvpackev_b(g_l, b_l); + temp1 = __lasx_xvpackev_b(g_h, b_h); + DUP4_ARG3(__lasx_xvshuf_b, r_l, temp0, shuffler1, r_h, temp1, shuffler1, + r_l, temp0, shuffler0, r_h, temp1, shuffler0, temp2, temp3, temp0, + temp1); + + b_l = __lasx_xvilvl_d(temp1, temp2); + b_h = __lasx_xvilvh_d(temp3, temp1); + temp1 = __lasx_xvpermi_q(b_l, temp0, 0x20); + temp2 = __lasx_xvpermi_q(temp0, b_h, 0x30); + temp3 = __lasx_xvpermi_q(b_h, b_l, 0x31); + __lasx_xvst(temp1, dst_argb, 0); + __lasx_xvst(temp2, dst_argb, 32); + __lasx_xvst(temp3, dst_argb, 64); + dst_argb += 96; + src_y += 32; + src_u += 16; + src_v += 16; + } +} + +// TODO(fbarchard): Consider AND instead of shift to isolate 5 upper bits of R. +void I422ToRGB565Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 32; + __m256i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_ubvr, vec_ugvg; + __m256i const_0x80 = __lasx_xvldi(0x80); + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr); + vec_ugvg = __lasx_xvilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + __m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h; + __m256i dst_l, dst_h; + + READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h); + YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l, + g_h, r_l, r_h); + b_l = __lasx_xvsrli_h(b_l, 3); + b_h = __lasx_xvsrli_h(b_h, 3); + g_l = __lasx_xvsrli_h(g_l, 2); + g_h = __lasx_xvsrli_h(g_h, 2); + r_l = __lasx_xvsrli_h(r_l, 3); + r_h = __lasx_xvsrli_h(r_h, 3); + r_l = __lasx_xvslli_h(r_l, 11); + r_h = __lasx_xvslli_h(r_h, 11); + g_l = __lasx_xvslli_h(g_l, 5); + g_h = __lasx_xvslli_h(g_h, 5); + r_l = __lasx_xvor_v(r_l, g_l); + r_l = __lasx_xvor_v(r_l, b_l); + r_h = __lasx_xvor_v(r_h, g_h); + r_h = __lasx_xvor_v(r_h, b_h); + dst_l = __lasx_xvpermi_q(r_h, r_l, 0x20); + dst_h = __lasx_xvpermi_q(r_h, r_l, 0x31); + __lasx_xvst(dst_l, dst_rgb565, 0); + __lasx_xvst(dst_h, dst_rgb565, 32); + dst_rgb565 += 64; + src_y += 32; + src_u += 16; + src_v += 16; + } +} + +// TODO(fbarchard): Consider AND instead of shift to isolate 4 upper bits of G. +void I422ToARGB4444Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb4444, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 32; + __m256i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_ubvr, vec_ugvg; + __m256i const_0x80 = __lasx_xvldi(0x80); + __m256i alpha = {0xF000F000F000F000, 0xF000F000F000F000, 0xF000F000F000F000, + 0xF000F000F000F000}; + __m256i mask = {0x00F000F000F000F0, 0x00F000F000F000F0, 0x00F000F000F000F0, + 0x00F000F000F000F0}; + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr); + vec_ugvg = __lasx_xvilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + __m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h; + __m256i dst_l, dst_h; + + READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h); + YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l, + g_h, r_l, r_h); + b_l = __lasx_xvsrli_h(b_l, 4); + b_h = __lasx_xvsrli_h(b_h, 4); + r_l = __lasx_xvsrli_h(r_l, 4); + r_h = __lasx_xvsrli_h(r_h, 4); + g_l = __lasx_xvand_v(g_l, mask); + g_h = __lasx_xvand_v(g_h, mask); + r_l = __lasx_xvslli_h(r_l, 8); + r_h = __lasx_xvslli_h(r_h, 8); + r_l = __lasx_xvor_v(r_l, alpha); + r_h = __lasx_xvor_v(r_h, alpha); + r_l = __lasx_xvor_v(r_l, g_l); + r_h = __lasx_xvor_v(r_h, g_h); + r_l = __lasx_xvor_v(r_l, b_l); + r_h = __lasx_xvor_v(r_h, b_h); + dst_l = __lasx_xvpermi_q(r_h, r_l, 0x20); + dst_h = __lasx_xvpermi_q(r_h, r_l, 0x31); + __lasx_xvst(dst_l, dst_argb4444, 0); + __lasx_xvst(dst_h, dst_argb4444, 32); + dst_argb4444 += 64; + src_y += 32; + src_u += 16; + src_v += 16; + } +} + +void I422ToARGB1555Row_LASX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb1555, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 32; + __m256i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_ubvr, vec_ugvg; + __m256i const_0x80 = __lasx_xvldi(0x80); + __m256i alpha = {0x8000800080008000, 0x8000800080008000, 0x8000800080008000, + 0x8000800080008000}; + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr); + vec_ugvg = __lasx_xvilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + __m256i y, uv_l, uv_h, b_l, b_h, g_l, g_h, r_l, r_h; + __m256i dst_l, dst_h; + + READYUV422_D(src_y, src_u, src_v, y, uv_l, uv_h); + YUVTORGB_D(y, uv_l, uv_h, vec_ubvr, vec_ugvg, vec_yg, vec_yb, b_l, b_h, g_l, + g_h, r_l, r_h); + b_l = __lasx_xvsrli_h(b_l, 3); + b_h = __lasx_xvsrli_h(b_h, 3); + g_l = __lasx_xvsrli_h(g_l, 3); + g_h = __lasx_xvsrli_h(g_h, 3); + g_l = __lasx_xvslli_h(g_l, 5); + g_h = __lasx_xvslli_h(g_h, 5); + r_l = __lasx_xvsrli_h(r_l, 3); + r_h = __lasx_xvsrli_h(r_h, 3); + r_l = __lasx_xvslli_h(r_l, 10); + r_h = __lasx_xvslli_h(r_h, 10); + r_l = __lasx_xvor_v(r_l, alpha); + r_h = __lasx_xvor_v(r_h, alpha); + r_l = __lasx_xvor_v(r_l, g_l); + r_h = __lasx_xvor_v(r_h, g_h); + r_l = __lasx_xvor_v(r_l, b_l); + r_h = __lasx_xvor_v(r_h, b_h); + dst_l = __lasx_xvpermi_q(r_h, r_l, 0x20); + dst_h = __lasx_xvpermi_q(r_h, r_l, 0x31); + __lasx_xvst(dst_l, dst_argb1555, 0); + __lasx_xvst(dst_h, dst_argb1555, 32); + dst_argb1555 += 64; + src_y += 32; + src_u += 16; + src_v += 16; + } +} + +void YUY2ToYRow_LASX(const uint8_t* src_yuy2, uint8_t* dst_y, int width) { + int x; + int len = width / 32; + __m256i src0, src1, dst0; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_yuy2, 0, src_yuy2, 32, src0, src1); + dst0 = __lasx_xvpickev_b(src1, src0); + dst0 = __lasx_xvpermi_d(dst0, 0xD8); + __lasx_xvst(dst0, dst_y, 0); + src_yuy2 += 64; + dst_y += 32; + } +} + +void YUY2ToUVRow_LASX(const uint8_t* src_yuy2, + int src_stride_yuy2, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + const uint8_t* src_yuy2_next = src_yuy2 + src_stride_yuy2; + int x; + int len = width / 32; + __m256i src0, src1, src2, src3; + __m256i tmp0, dst0, dst1; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_yuy2, 0, src_yuy2, 32, src_yuy2_next, 0, + src_yuy2_next, 32, src0, src1, src2, src3); + src0 = __lasx_xvpickod_b(src1, src0); + src1 = __lasx_xvpickod_b(src3, src2); + tmp0 = __lasx_xvavgr_bu(src1, src0); + tmp0 = __lasx_xvpermi_d(tmp0, 0xD8); + dst0 = __lasx_xvpickev_b(tmp0, tmp0); + dst1 = __lasx_xvpickod_b(tmp0, tmp0); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst1, dst_v, 0, 0); + __lasx_xvstelm_d(dst1, dst_v, 8, 2); + src_yuy2 += 64; + src_yuy2_next += 64; + dst_u += 16; + dst_v += 16; + } +} + +void YUY2ToUV422Row_LASX(const uint8_t* src_yuy2, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 32; + __m256i src0, src1, tmp0, dst0, dst1; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_yuy2, 0, src_yuy2, 32, src0, src1); + tmp0 = __lasx_xvpickod_b(src1, src0); + tmp0 = __lasx_xvpermi_d(tmp0, 0xD8); + dst0 = __lasx_xvpickev_b(tmp0, tmp0); + dst1 = __lasx_xvpickod_b(tmp0, tmp0); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst1, dst_v, 0, 0); + __lasx_xvstelm_d(dst1, dst_v, 8, 2); + src_yuy2 += 64; + dst_u += 16; + dst_v += 16; + } +} + +void UYVYToYRow_LASX(const uint8_t* src_uyvy, uint8_t* dst_y, int width) { + int x; + int len = width / 32; + __m256i src0, src1, dst0; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_uyvy, 0, src_uyvy, 32, src0, src1); + dst0 = __lasx_xvpickod_b(src1, src0); + dst0 = __lasx_xvpermi_d(dst0, 0xD8); + __lasx_xvst(dst0, dst_y, 0); + src_uyvy += 64; + dst_y += 32; + } +} + +void UYVYToUVRow_LASX(const uint8_t* src_uyvy, + int src_stride_uyvy, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + const uint8_t* src_uyvy_next = src_uyvy + src_stride_uyvy; + int x; + int len = width / 32; + __m256i src0, src1, src2, src3, tmp0, dst0, dst1; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_uyvy, 0, src_uyvy, 32, src_uyvy_next, 0, + src_uyvy_next, 32, src0, src1, src2, src3); + src0 = __lasx_xvpickev_b(src1, src0); + src1 = __lasx_xvpickev_b(src3, src2); + tmp0 = __lasx_xvavgr_bu(src1, src0); + tmp0 = __lasx_xvpermi_d(tmp0, 0xD8); + dst0 = __lasx_xvpickev_b(tmp0, tmp0); + dst1 = __lasx_xvpickod_b(tmp0, tmp0); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst1, dst_v, 0, 0); + __lasx_xvstelm_d(dst1, dst_v, 8, 2); + src_uyvy += 64; + src_uyvy_next += 64; + dst_u += 16; + dst_v += 16; + } +} + +void UYVYToUV422Row_LASX(const uint8_t* src_uyvy, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 32; + __m256i src0, src1, tmp0, dst0, dst1; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_uyvy, 0, src_uyvy, 32, src0, src1); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp0 = __lasx_xvpermi_d(tmp0, 0xD8); + dst0 = __lasx_xvpickev_b(tmp0, tmp0); + dst1 = __lasx_xvpickod_b(tmp0, tmp0); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst1, dst_v, 0, 0); + __lasx_xvstelm_d(dst1, dst_v, 8, 2); + src_uyvy += 64; + dst_u += 16; + dst_v += 16; + } +} + +void ARGBToYRow_LASX(const uint8_t* src_argb0, uint8_t* dst_y, int width) { + int x; + int len = width / 32; + __m256i src0, src1, src2, src3, vec0, vec1, vec2, vec3; + __m256i tmp0, tmp1, dst0; + __m256i const_19 = __lasx_xvldi(0x19); + __m256i const_42 = __lasx_xvldi(0x42); + __m256i const_81 = __lasx_xvldi(0x81); + __m256i const_1080 = {0x1080108010801080, 0x1080108010801080, + 0x1080108010801080, 0x1080108010801080}; + __m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002, + 0x0000000700000003}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_argb0, 0, src_argb0, 32, src_argb0, 64, + src_argb0, 96, src0, src1, src2, src3); + vec0 = __lasx_xvpickev_b(src1, src0); + vec1 = __lasx_xvpickev_b(src3, src2); + vec2 = __lasx_xvpickod_b(src1, src0); + vec3 = __lasx_xvpickod_b(src3, src2); + tmp0 = __lasx_xvmaddwev_h_bu(const_1080, vec0, const_19); + tmp1 = __lasx_xvmaddwev_h_bu(const_1080, vec1, const_19); + tmp0 = __lasx_xvmaddwev_h_bu(tmp0, vec2, const_81); + tmp1 = __lasx_xvmaddwev_h_bu(tmp1, vec3, const_81); + tmp0 = __lasx_xvmaddwod_h_bu(tmp0, vec0, const_42); + tmp1 = __lasx_xvmaddwod_h_bu(tmp1, vec1, const_42); + dst0 = __lasx_xvssrani_b_h(tmp1, tmp0, 8); + dst0 = __lasx_xvperm_w(dst0, control); + __lasx_xvst(dst0, dst_y, 0); + src_argb0 += 128; + dst_y += 32; + } +} + +void ARGBToUVRow_LASX(const uint8_t* src_argb0, + int src_stride_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 32; + const uint8_t* src_argb1 = src_argb0 + src_stride_argb; + + __m256i src0, src1, src2, src3, src4, src5, src6, src7; + __m256i vec0, vec1, vec2, vec3; + __m256i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, dst0, dst1; + __m256i const_0x70 = {0x0038003800380038, 0x0038003800380038, + 0x0038003800380038, 0x0038003800380038}; + __m256i const_0x4A = {0x0025002500250025, 0x0025002500250025, + 0x0025002500250025, 0x0025002500250025}; + __m256i const_0x26 = {0x0013001300130013, 0x0013001300130013, + 0x0013001300130013, 0x0013001300130013}; + __m256i const_0x5E = {0x002f002f002f002f, 0x002f002f002f002f, + 0x002f002f002f002f, 0x002f002f002f002f}; + __m256i const_0x12 = {0x0009000900090009, 0x0009000900090009, + 0x0009000900090009, 0x0009000900090009}; + __m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002, + 0x0000000700000003}; + __m256i const_0x8080 = {0x8080808080808080, 0x8080808080808080, + 0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_argb0, 0, src_argb0, 32, src_argb0, 64, + src_argb0, 96, src0, src1, src2, src3); + DUP4_ARG2(__lasx_xvld, src_argb1, 0, src_argb1, 32, src_argb1, 64, + src_argb1, 96, src4, src5, src6, src7); + vec0 = __lasx_xvaddwev_h_bu(src0, src4); + vec1 = __lasx_xvaddwev_h_bu(src1, src5); + vec2 = __lasx_xvaddwev_h_bu(src2, src6); + vec3 = __lasx_xvaddwev_h_bu(src3, src7); + tmp0 = __lasx_xvpickev_h(vec1, vec0); + tmp1 = __lasx_xvpickev_h(vec3, vec2); + tmp2 = __lasx_xvpickod_h(vec1, vec0); + tmp3 = __lasx_xvpickod_h(vec3, vec2); + vec0 = __lasx_xvaddwod_h_bu(src0, src4); + vec1 = __lasx_xvaddwod_h_bu(src1, src5); + vec2 = __lasx_xvaddwod_h_bu(src2, src6); + vec3 = __lasx_xvaddwod_h_bu(src3, src7); + tmp4 = __lasx_xvpickev_h(vec1, vec0); + tmp5 = __lasx_xvpickev_h(vec3, vec2); + vec0 = __lasx_xvpickev_h(tmp1, tmp0); + vec1 = __lasx_xvpickod_h(tmp1, tmp0); + src0 = __lasx_xvavgr_h(vec0, vec1); + vec0 = __lasx_xvpickev_h(tmp3, tmp2); + vec1 = __lasx_xvpickod_h(tmp3, tmp2); + src1 = __lasx_xvavgr_h(vec0, vec1); + vec0 = __lasx_xvpickev_h(tmp5, tmp4); + vec1 = __lasx_xvpickod_h(tmp5, tmp4); + src2 = __lasx_xvavgr_h(vec0, vec1); + dst0 = __lasx_xvmadd_h(const_0x8080, src0, const_0x70); + dst0 = __lasx_xvmsub_h(dst0, src2, const_0x4A); + dst0 = __lasx_xvmsub_h(dst0, src1, const_0x26); + dst1 = __lasx_xvmadd_h(const_0x8080, src1, const_0x70); + dst1 = __lasx_xvmsub_h(dst1, src2, const_0x5E); + dst1 = __lasx_xvmsub_h(dst1, src0, const_0x12); + dst0 = __lasx_xvperm_w(dst0, control); + dst1 = __lasx_xvperm_w(dst1, control); + dst0 = __lasx_xvssrani_b_h(dst0, dst0, 8); + dst1 = __lasx_xvssrani_b_h(dst1, dst1, 8); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst1, dst_v, 0, 0); + __lasx_xvstelm_d(dst1, dst_v, 8, 2); + src_argb0 += 128; + src_argb1 += 128; + dst_u += 16; + dst_v += 16; + } +} + +void ARGBToRGB24Row_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) { + int x; + int len = (width / 32) - 1; + __m256i src0, src1, src2, src3; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i shuf = {0x0908060504020100, 0x000000000E0D0C0A, 0x0908060504020100, + 0x000000000E0D0C0A}; + __m256i control = {0x0000000100000000, 0x0000000400000002, 0x0000000600000005, + 0x0000000700000003}; + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, + 96, src0, src1, src2, src3); + tmp0 = __lasx_xvshuf_b(src0, src0, shuf); + tmp1 = __lasx_xvshuf_b(src1, src1, shuf); + tmp2 = __lasx_xvshuf_b(src2, src2, shuf); + tmp3 = __lasx_xvshuf_b(src3, src3, shuf); + tmp0 = __lasx_xvperm_w(tmp0, control); + tmp1 = __lasx_xvperm_w(tmp1, control); + tmp2 = __lasx_xvperm_w(tmp2, control); + tmp3 = __lasx_xvperm_w(tmp3, control); + __lasx_xvst(tmp0, dst_rgb, 0); + __lasx_xvst(tmp1, dst_rgb, 24); + __lasx_xvst(tmp2, dst_rgb, 48); + __lasx_xvst(tmp3, dst_rgb, 72); + dst_rgb += 96; + src_argb += 128; + } + DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, 96, + src0, src1, src2, src3); + tmp0 = __lasx_xvshuf_b(src0, src0, shuf); + tmp1 = __lasx_xvshuf_b(src1, src1, shuf); + tmp2 = __lasx_xvshuf_b(src2, src2, shuf); + tmp3 = __lasx_xvshuf_b(src3, src3, shuf); + tmp0 = __lasx_xvperm_w(tmp0, control); + tmp1 = __lasx_xvperm_w(tmp1, control); + tmp2 = __lasx_xvperm_w(tmp2, control); + tmp3 = __lasx_xvperm_w(tmp3, control); + __lasx_xvst(tmp0, dst_rgb, 0); + __lasx_xvst(tmp1, dst_rgb, 24); + __lasx_xvst(tmp2, dst_rgb, 48); + dst_rgb += 72; + __lasx_xvstelm_d(tmp3, dst_rgb, 0, 0); + __lasx_xvstelm_d(tmp3, dst_rgb, 8, 1); + __lasx_xvstelm_d(tmp3, dst_rgb, 16, 2); +} + +void ARGBToRAWRow_LASX(const uint8_t* src_argb, uint8_t* dst_rgb, int width) { + int x; + int len = (width / 32) - 1; + __m256i src0, src1, src2, src3; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i shuf = {0x090A040506000102, 0x000000000C0D0E08, 0x090A040506000102, + 0x000000000C0D0E08}; + __m256i control = {0x0000000100000000, 0x0000000400000002, 0x0000000600000005, + 0x0000000700000003}; + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, + 96, src0, src1, src2, src3); + tmp0 = __lasx_xvshuf_b(src0, src0, shuf); + tmp1 = __lasx_xvshuf_b(src1, src1, shuf); + tmp2 = __lasx_xvshuf_b(src2, src2, shuf); + tmp3 = __lasx_xvshuf_b(src3, src3, shuf); + tmp0 = __lasx_xvperm_w(tmp0, control); + tmp1 = __lasx_xvperm_w(tmp1, control); + tmp2 = __lasx_xvperm_w(tmp2, control); + tmp3 = __lasx_xvperm_w(tmp3, control); + __lasx_xvst(tmp0, dst_rgb, 0); + __lasx_xvst(tmp1, dst_rgb, 24); + __lasx_xvst(tmp2, dst_rgb, 48); + __lasx_xvst(tmp3, dst_rgb, 72); + dst_rgb += 96; + src_argb += 128; + } + DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, 96, + src0, src1, src2, src3); + tmp0 = __lasx_xvshuf_b(src0, src0, shuf); + tmp1 = __lasx_xvshuf_b(src1, src1, shuf); + tmp2 = __lasx_xvshuf_b(src2, src2, shuf); + tmp3 = __lasx_xvshuf_b(src3, src3, shuf); + tmp0 = __lasx_xvperm_w(tmp0, control); + tmp1 = __lasx_xvperm_w(tmp1, control); + tmp2 = __lasx_xvperm_w(tmp2, control); + tmp3 = __lasx_xvperm_w(tmp3, control); + __lasx_xvst(tmp0, dst_rgb, 0); + __lasx_xvst(tmp1, dst_rgb, 24); + __lasx_xvst(tmp2, dst_rgb, 48); + dst_rgb += 72; + __lasx_xvstelm_d(tmp3, dst_rgb, 0, 0); + __lasx_xvstelm_d(tmp3, dst_rgb, 8, 1); + __lasx_xvstelm_d(tmp3, dst_rgb, 16, 2); +} + +void ARGBToRGB565Row_LASX(const uint8_t* src_argb, + uint8_t* dst_rgb, + int width) { + int x; + int len = width / 16; + __m256i zero = __lasx_xvldi(0); + __m256i src0, src1, tmp0, tmp1, dst0; + __m256i shift = {0x0300030003000300, 0x0300030003000300, 0x0300030003000300, + 0x0300030003000300}; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmp0 = __lasx_xvsrli_b(tmp0, 3); + tmp1 = __lasx_xvpackev_b(zero, tmp1); + tmp1 = __lasx_xvsrli_h(tmp1, 2); + tmp0 = __lasx_xvsll_b(tmp0, shift); + tmp1 = __lasx_xvslli_h(tmp1, 5); + dst0 = __lasx_xvor_v(tmp0, tmp1); + dst0 = __lasx_xvpermi_d(dst0, 0xD8); + __lasx_xvst(dst0, dst_rgb, 0); + dst_rgb += 32; + src_argb += 64; + } +} + +void ARGBToARGB1555Row_LASX(const uint8_t* src_argb, + uint8_t* dst_rgb, + int width) { + int x; + int len = width / 16; + __m256i zero = __lasx_xvldi(0); + __m256i src0, src1, tmp0, tmp1, tmp2, tmp3, dst0; + __m256i shift1 = {0x0703070307030703, 0x0703070307030703, 0x0703070307030703, + 0x0703070307030703}; + __m256i shift2 = {0x0200020002000200, 0x0200020002000200, 0x0200020002000200, + 0x0200020002000200}; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmp0 = __lasx_xvsrli_b(tmp0, 3); + tmp1 = __lasx_xvsrl_b(tmp1, shift1); + tmp0 = __lasx_xvsll_b(tmp0, shift2); + tmp2 = __lasx_xvpackev_b(zero, tmp1); + tmp3 = __lasx_xvpackod_b(zero, tmp1); + tmp2 = __lasx_xvslli_h(tmp2, 5); + tmp3 = __lasx_xvslli_h(tmp3, 15); + dst0 = __lasx_xvor_v(tmp0, tmp2); + dst0 = __lasx_xvor_v(dst0, tmp3); + dst0 = __lasx_xvpermi_d(dst0, 0xD8); + __lasx_xvst(dst0, dst_rgb, 0); + dst_rgb += 32; + src_argb += 64; + } +} + +void ARGBToARGB4444Row_LASX(const uint8_t* src_argb, + uint8_t* dst_rgb, + int width) { + int x; + int len = width / 16; + __m256i src0, src1, tmp0, tmp1, dst0; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmp1 = __lasx_xvandi_b(tmp1, 0xF0); + tmp0 = __lasx_xvsrli_b(tmp0, 4); + dst0 = __lasx_xvor_v(tmp1, tmp0); + dst0 = __lasx_xvpermi_d(dst0, 0xD8); + __lasx_xvst(dst0, dst_rgb, 0); + dst_rgb += 32; + src_argb += 64; + } +} + +void ARGBToUV444Row_LASX(const uint8_t* src_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int32_t width) { + int x; + int len = width / 32; + __m256i src0, src1, src2, src3; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i reg0, reg1, reg2, reg3, dst0, dst1; + __m256i const_112 = __lasx_xvldi(112); + __m256i const_74 = __lasx_xvldi(74); + __m256i const_38 = __lasx_xvldi(38); + __m256i const_94 = __lasx_xvldi(94); + __m256i const_18 = __lasx_xvldi(18); + __m256i const_0x8080 = {0x8080808080808080, 0x8080808080808080, + 0x8080808080808080, 0x8080808080808080}; + __m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002, + 0x0000000700000003}; + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, + 96, src0, src1, src2, src3); + tmp0 = __lasx_xvpickev_h(src1, src0); + tmp1 = __lasx_xvpickod_h(src1, src0); + tmp2 = __lasx_xvpickev_h(src3, src2); + tmp3 = __lasx_xvpickod_h(src3, src2); + reg0 = __lasx_xvmaddwev_h_bu(const_0x8080, tmp0, const_112); + reg1 = __lasx_xvmaddwev_h_bu(const_0x8080, tmp2, const_112); + reg2 = __lasx_xvmulwod_h_bu(tmp0, const_74); + reg3 = __lasx_xvmulwod_h_bu(tmp2, const_74); + reg2 = __lasx_xvmaddwev_h_bu(reg2, tmp1, const_38); + reg3 = __lasx_xvmaddwev_h_bu(reg3, tmp3, const_38); + reg0 = __lasx_xvsub_h(reg0, reg2); + reg1 = __lasx_xvsub_h(reg1, reg3); + dst0 = __lasx_xvssrani_b_h(reg1, reg0, 8); + dst0 = __lasx_xvperm_w(dst0, control); + reg0 = __lasx_xvmaddwev_h_bu(const_0x8080, tmp1, const_112); + reg1 = __lasx_xvmaddwev_h_bu(const_0x8080, tmp3, const_112); + reg2 = __lasx_xvmulwev_h_bu(tmp0, const_18); + reg3 = __lasx_xvmulwev_h_bu(tmp2, const_18); + reg2 = __lasx_xvmaddwod_h_bu(reg2, tmp0, const_94); + reg3 = __lasx_xvmaddwod_h_bu(reg3, tmp2, const_94); + reg0 = __lasx_xvsub_h(reg0, reg2); + reg1 = __lasx_xvsub_h(reg1, reg3); + dst1 = __lasx_xvssrani_b_h(reg1, reg0, 8); + dst1 = __lasx_xvperm_w(dst1, control); + __lasx_xvst(dst0, dst_u, 0); + __lasx_xvst(dst1, dst_v, 0); + dst_u += 32; + dst_v += 32; + src_argb += 128; + } +} + +void ARGBMultiplyRow_LASX(const uint8_t* src_argb0, + const uint8_t* src_argb1, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 8; + __m256i zero = __lasx_xvldi(0); + __m256i src0, src1, dst0, dst1; + __m256i tmp0, tmp1, tmp2, tmp3; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb0, 0, src_argb1, 0, src0, src1); + tmp0 = __lasx_xvilvl_b(src0, src0); + tmp1 = __lasx_xvilvh_b(src0, src0); + tmp2 = __lasx_xvilvl_b(zero, src1); + tmp3 = __lasx_xvilvh_b(zero, src1); + dst0 = __lasx_xvmuh_hu(tmp0, tmp2); + dst1 = __lasx_xvmuh_hu(tmp1, tmp3); + dst0 = __lasx_xvpickev_b(dst1, dst0); + __lasx_xvst(dst0, dst_argb, 0); + src_argb0 += 32; + src_argb1 += 32; + dst_argb += 32; + } +} + +void ARGBAddRow_LASX(const uint8_t* src_argb0, + const uint8_t* src_argb1, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 8; + __m256i src0, src1, dst0; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb0, 0, src_argb1, 0, src0, src1); + dst0 = __lasx_xvsadd_bu(src0, src1); + __lasx_xvst(dst0, dst_argb, 0); + src_argb0 += 32; + src_argb1 += 32; + dst_argb += 32; + } +} + +void ARGBSubtractRow_LASX(const uint8_t* src_argb0, + const uint8_t* src_argb1, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 8; + __m256i src0, src1, dst0; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb0, 0, src_argb1, 0, src0, src1); + dst0 = __lasx_xvssub_bu(src0, src1); + __lasx_xvst(dst0, dst_argb, 0); + src_argb0 += 32; + src_argb1 += 32; + dst_argb += 32; + } +} + +void ARGBAttenuateRow_LASX(const uint8_t* src_argb, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 16; + __m256i src0, src1, tmp0, tmp1; + __m256i reg0, reg1, reg2, reg3, reg4, reg5; + __m256i b, g, r, a, dst0, dst1; + __m256i control = {0x0005000100040000, 0x0007000300060002, 0x0005000100040000, + 0x0007000300060002}; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + b = __lasx_xvpackev_b(tmp0, tmp0); + r = __lasx_xvpackod_b(tmp0, tmp0); + g = __lasx_xvpackev_b(tmp1, tmp1); + a = __lasx_xvpackod_b(tmp1, tmp1); + reg0 = __lasx_xvmulwev_w_hu(b, a); + reg1 = __lasx_xvmulwod_w_hu(b, a); + reg2 = __lasx_xvmulwev_w_hu(r, a); + reg3 = __lasx_xvmulwod_w_hu(r, a); + reg4 = __lasx_xvmulwev_w_hu(g, a); + reg5 = __lasx_xvmulwod_w_hu(g, a); + reg0 = __lasx_xvssrani_h_w(reg1, reg0, 24); + reg2 = __lasx_xvssrani_h_w(reg3, reg2, 24); + reg4 = __lasx_xvssrani_h_w(reg5, reg4, 24); + reg0 = __lasx_xvshuf_h(control, reg0, reg0); + reg2 = __lasx_xvshuf_h(control, reg2, reg2); + reg4 = __lasx_xvshuf_h(control, reg4, reg4); + tmp0 = __lasx_xvpackev_b(reg4, reg0); + tmp1 = __lasx_xvpackev_b(a, reg2); + dst0 = __lasx_xvilvl_h(tmp1, tmp0); + dst1 = __lasx_xvilvh_h(tmp1, tmp0); + __lasx_xvst(dst0, dst_argb, 0); + __lasx_xvst(dst1, dst_argb, 32); + dst_argb += 64; + src_argb += 64; + } +} + +void ARGBToRGB565DitherRow_LASX(const uint8_t* src_argb, + uint8_t* dst_rgb, + const uint32_t dither4, + int width) { + int x; + int len = width / 16; + __m256i src0, src1, tmp0, tmp1, dst0; + __m256i b, g, r; + __m256i zero = __lasx_xvldi(0); + __m256i vec_dither = __lasx_xvldrepl_w(&dither4, 0); + + vec_dither = __lasx_xvilvl_b(zero, vec_dither); + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + b = __lasx_xvpackev_b(zero, tmp0); + r = __lasx_xvpackod_b(zero, tmp0); + g = __lasx_xvpackev_b(zero, tmp1); + b = __lasx_xvadd_h(b, vec_dither); + g = __lasx_xvadd_h(g, vec_dither); + r = __lasx_xvadd_h(r, vec_dither); + DUP2_ARG1(__lasx_xvclip255_h, b, g, b, g); + r = __lasx_xvclip255_h(r); + b = __lasx_xvsrai_h(b, 3); + g = __lasx_xvsrai_h(g, 2); + r = __lasx_xvsrai_h(r, 3); + g = __lasx_xvslli_h(g, 5); + r = __lasx_xvslli_h(r, 11); + dst0 = __lasx_xvor_v(b, g); + dst0 = __lasx_xvor_v(dst0, r); + dst0 = __lasx_xvpermi_d(dst0, 0xD8); + __lasx_xvst(dst0, dst_rgb, 0); + src_argb += 64; + dst_rgb += 32; + } +} + +void ARGBShuffleRow_LASX(const uint8_t* src_argb, + uint8_t* dst_argb, + const uint8_t* shuffler, + int width) { + int x; + int len = width / 16; + __m256i src0, src1, dst0, dst1; + __m256i shuf = {0x0404040400000000, 0x0C0C0C0C08080808, 0x0404040400000000, + 0x0C0C0C0C08080808}; + __m256i temp = __lasx_xvldrepl_w(shuffler, 0); + + shuf = __lasx_xvadd_b(shuf, temp); + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1); + dst0 = __lasx_xvshuf_b(src0, src0, shuf); + dst1 = __lasx_xvshuf_b(src1, src1, shuf); + __lasx_xvst(dst0, dst_argb, 0); + __lasx_xvst(dst1, dst_argb, 32); + src_argb += 64; + dst_argb += 64; + } +} + +void ARGBShadeRow_LASX(const uint8_t* src_argb, + uint8_t* dst_argb, + int width, + uint32_t value) { + int x; + int len = width / 8; + __m256i src0, dst0, tmp0, tmp1; + __m256i vec_value = __lasx_xvreplgr2vr_w(value); + + vec_value = __lasx_xvilvl_b(vec_value, vec_value); + for (x = 0; x < len; x++) { + src0 = __lasx_xvld(src_argb, 0); + tmp0 = __lasx_xvilvl_b(src0, src0); + tmp1 = __lasx_xvilvh_b(src0, src0); + tmp0 = __lasx_xvmuh_hu(tmp0, vec_value); + tmp1 = __lasx_xvmuh_hu(tmp1, vec_value); + dst0 = __lasx_xvpickod_b(tmp1, tmp0); + __lasx_xvst(dst0, dst_argb, 0); + src_argb += 32; + dst_argb += 32; + } +} + +void ARGBGrayRow_LASX(const uint8_t* src_argb, uint8_t* dst_argb, int width) { + int x; + int len = width / 16; + __m256i src0, src1, tmp0, tmp1; + __m256i reg0, reg1, reg2, dst0, dst1; + __m256i const_128 = __lasx_xvldi(0x480); + __m256i const_150 = __lasx_xvldi(0x96); + __m256i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D, + 0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D}; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + reg0 = __lasx_xvdp2_h_bu(tmp0, const_br); + reg1 = __lasx_xvmaddwev_h_bu(const_128, tmp1, const_150); + reg2 = __lasx_xvadd_h(reg0, reg1); + tmp0 = __lasx_xvpackod_b(reg2, reg2); + tmp1 = __lasx_xvpackod_b(tmp1, reg2); + dst0 = __lasx_xvilvl_h(tmp1, tmp0); + dst1 = __lasx_xvilvh_h(tmp1, tmp0); + __lasx_xvst(dst0, dst_argb, 0); + __lasx_xvst(dst1, dst_argb, 32); + src_argb += 64; + dst_argb += 64; + } +} + +void ARGBSepiaRow_LASX(uint8_t* dst_argb, int width) { + int x; + int len = width / 16; + __m256i src0, src1, tmp0, tmp1; + __m256i reg0, reg1, spb, spg, spr; + __m256i dst0, dst1; + __m256i spb_g = __lasx_xvldi(68); + __m256i spg_g = __lasx_xvldi(88); + __m256i spr_g = __lasx_xvldi(98); + __m256i spb_br = {0x2311231123112311, 0x2311231123112311, 0x2311231123112311, + 0x2311231123112311}; + __m256i spg_br = {0x2D162D162D162D16, 0x2D162D162D162D16, 0x2D162D162D162D16, + 0x2D162D162D162D16}; + __m256i spr_br = {0x3218321832183218, 0x3218321832183218, 0x3218321832183218, + 0x3218321832183218}; + __m256i shuff = {0x1706150413021100, 0x1F0E1D0C1B0A1908, 0x1706150413021100, + 0x1F0E1D0C1B0A1908}; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lasx_xvld, dst_argb, 0, dst_argb, 32, src0, src1); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + DUP2_ARG2(__lasx_xvdp2_h_bu, tmp0, spb_br, tmp0, spg_br, spb, spg); + spr = __lasx_xvdp2_h_bu(tmp0, spr_br); + spb = __lasx_xvmaddwev_h_bu(spb, tmp1, spb_g); + spg = __lasx_xvmaddwev_h_bu(spg, tmp1, spg_g); + spr = __lasx_xvmaddwev_h_bu(spr, tmp1, spr_g); + spb = __lasx_xvsrli_h(spb, 7); + spg = __lasx_xvsrli_h(spg, 7); + spr = __lasx_xvsrli_h(spr, 7); + spg = __lasx_xvsat_hu(spg, 7); + spr = __lasx_xvsat_hu(spr, 7); + reg0 = __lasx_xvpackev_b(spg, spb); + reg1 = __lasx_xvshuf_b(tmp1, spr, shuff); + dst0 = __lasx_xvilvl_h(reg1, reg0); + dst1 = __lasx_xvilvh_h(reg1, reg0); + __lasx_xvst(dst0, dst_argb, 0); + __lasx_xvst(dst1, dst_argb, 32); + dst_argb += 64; + } +} + +void ARGB4444ToARGBRow_LASX(const uint8_t* src_argb4444, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 32; + __m256i src0, src1; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i reg0, reg1, reg2, reg3; + __m256i dst0, dst1, dst2, dst3; + + for (x = 0; x < len; x++) { + src0 = __lasx_xvld(src_argb4444, 0); + src1 = __lasx_xvld(src_argb4444, 32); + DUP4_ARG2(__lasx_xvandi_b, src0, 0x0F, src0, 0xF0, src1, 0x0F, src1, 0xF0, + tmp0, tmp1, tmp2, tmp3); + DUP2_ARG2(__lasx_xvslli_b, tmp0, 4, tmp2, 4, reg0, reg2); + DUP2_ARG2(__lasx_xvsrli_b, tmp1, 4, tmp3, 4, reg1, reg3); + DUP4_ARG2(__lasx_xvor_v, tmp0, reg0, tmp1, reg1, tmp2, reg2, tmp3, reg3, + tmp0, tmp1, tmp2, tmp3); + DUP2_ARG2(__lasx_xvilvl_b, tmp1, tmp0, tmp3, tmp2, reg0, reg2); + DUP2_ARG2(__lasx_xvilvh_b, tmp1, tmp0, tmp3, tmp2, reg1, reg3); + DUP4_ARG3(__lasx_xvpermi_q, reg1, reg0, 0x20, reg1, reg0, 0x31, reg3, reg2, + 0x20, reg3, reg2, 0x31, dst0, dst1, dst2, dst3); + __lasx_xvst(dst0, dst_argb, 0); + __lasx_xvst(dst1, dst_argb, 32); + __lasx_xvst(dst2, dst_argb, 64); + __lasx_xvst(dst3, dst_argb, 96); + src_argb4444 += 64; + dst_argb += 128; + } +} + +void ARGB1555ToARGBRow_LASX(const uint8_t* src_argb1555, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 32; + __m256i src0, src1; + __m256i tmp0, tmp1, tmpb, tmpg, tmpr, tmpa; + __m256i reg0, reg1, reg2, reg3; + __m256i dst0, dst1, dst2, dst3; + + for (x = 0; x < len; x++) { + src0 = __lasx_xvld(src_argb1555, 0); + src1 = __lasx_xvld(src_argb1555, 32); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmpb = __lasx_xvandi_b(tmp0, 0x1F); + tmpg = __lasx_xvsrli_b(tmp0, 5); + reg0 = __lasx_xvandi_b(tmp1, 0x03); + reg0 = __lasx_xvslli_b(reg0, 3); + tmpg = __lasx_xvor_v(tmpg, reg0); + reg1 = __lasx_xvandi_b(tmp1, 0x7C); + tmpr = __lasx_xvsrli_b(reg1, 2); + tmpa = __lasx_xvsrli_b(tmp1, 7); + tmpa = __lasx_xvneg_b(tmpa); + reg0 = __lasx_xvslli_b(tmpb, 3); + reg1 = __lasx_xvslli_b(tmpg, 3); + reg2 = __lasx_xvslli_b(tmpr, 3); + tmpb = __lasx_xvsrli_b(tmpb, 2); + tmpg = __lasx_xvsrli_b(tmpg, 2); + tmpr = __lasx_xvsrli_b(tmpr, 2); + tmpb = __lasx_xvor_v(reg0, tmpb); + tmpg = __lasx_xvor_v(reg1, tmpg); + tmpr = __lasx_xvor_v(reg2, tmpr); + DUP2_ARG2(__lasx_xvilvl_b, tmpg, tmpb, tmpa, tmpr, reg0, reg1); + DUP2_ARG2(__lasx_xvilvh_b, tmpg, tmpb, tmpa, tmpr, reg2, reg3); + dst0 = __lasx_xvilvl_h(reg1, reg0); + dst1 = __lasx_xvilvh_h(reg1, reg0); + dst2 = __lasx_xvilvl_h(reg3, reg2); + dst3 = __lasx_xvilvh_h(reg3, reg2); + DUP4_ARG3(__lasx_xvpermi_q, dst1, dst0, 0x20, dst1, dst0, 0x31, dst3, dst2, + 0x20, dst3, dst2, 0x31, reg0, reg1, reg2, reg3); + __lasx_xvst(reg0, dst_argb, 0); + __lasx_xvst(reg1, dst_argb, 32); + __lasx_xvst(reg2, dst_argb, 64); + __lasx_xvst(reg3, dst_argb, 96); + src_argb1555 += 64; + dst_argb += 128; + } +} + +void RGB565ToARGBRow_LASX(const uint8_t* src_rgb565, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 32; + __m256i src0, src1; + __m256i tmp0, tmp1, tmpb, tmpg, tmpr; + __m256i reg0, reg1, reg2, reg3, dst0, dst1, dst2, dst3; + __m256i alpha = __lasx_xvldi(0xFF); + + for (x = 0; x < len; x++) { + src0 = __lasx_xvld(src_rgb565, 0); + src1 = __lasx_xvld(src_rgb565, 32); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmpb = __lasx_xvandi_b(tmp0, 0x1F); + tmpr = __lasx_xvandi_b(tmp1, 0xF8); + reg1 = __lasx_xvandi_b(tmp1, 0x07); + reg0 = __lasx_xvsrli_b(tmp0, 5); + reg1 = __lasx_xvslli_b(reg1, 3); + tmpg = __lasx_xvor_v(reg1, reg0); + reg0 = __lasx_xvslli_b(tmpb, 3); + reg1 = __lasx_xvsrli_b(tmpb, 2); + tmpb = __lasx_xvor_v(reg1, reg0); + reg0 = __lasx_xvslli_b(tmpg, 2); + reg1 = __lasx_xvsrli_b(tmpg, 4); + tmpg = __lasx_xvor_v(reg1, reg0); + reg0 = __lasx_xvsrli_b(tmpr, 5); + tmpr = __lasx_xvor_v(tmpr, reg0); + DUP2_ARG2(__lasx_xvilvl_b, tmpg, tmpb, alpha, tmpr, reg0, reg1); + dst0 = __lasx_xvilvl_h(reg1, reg0); + dst1 = __lasx_xvilvh_h(reg1, reg0); + DUP2_ARG2(__lasx_xvilvh_b, tmpg, tmpb, alpha, tmpr, reg0, reg1); + dst2 = __lasx_xvilvl_h(reg1, reg0); + dst3 = __lasx_xvilvh_h(reg1, reg0); + DUP4_ARG3(__lasx_xvpermi_q, dst1, dst0, 0x20, dst1, dst0, 0x31, dst3, dst2, + 0x20, dst3, dst2, 0x31, reg0, reg1, reg2, reg3); + __lasx_xvst(reg0, dst_argb, 0); + __lasx_xvst(reg1, dst_argb, 32); + __lasx_xvst(reg2, dst_argb, 64); + __lasx_xvst(reg3, dst_argb, 96); + src_rgb565 += 64; + dst_argb += 128; + } +} + +void RGB24ToARGBRow_LASX(const uint8_t* src_rgb24, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 32; + __m256i src0, src1, src2; + __m256i tmp0, tmp1, tmp2; + __m256i dst0, dst1, dst2, dst3; + __m256i reg0, reg1, reg2, reg3; + __m256i alpha = __lasx_xvldi(0xFF); + __m256i shuf0 = {0x131211100F0E0D0C, 0x1B1A191817161514, 0x131211100F0E0D0C, + 0x1B1A191817161514}; + __m256i shuf1 = {0x1F1E1D1C1B1A1918, 0x0706050403020100, 0x1F1E1D1C1B1A1918, + 0x0706050403020100}; + __m256i shuf2 = {0x0B0A090807060504, 0x131211100F0E0D0C, 0x0B0A090807060504, + 0x131211100F0E0D0C}; + __m256i shuf3 = {0x1005040310020100, 0x100B0A0910080706, 0x1005040310020100, + 0x100B0A0910080706}; + + for (x = 0; x < len; x++) { + reg0 = __lasx_xvld(src_rgb24, 0); + reg1 = __lasx_xvld(src_rgb24, 32); + reg2 = __lasx_xvld(src_rgb24, 64); + src0 = __lasx_xvpermi_q(reg1, reg0, 0x30); + src1 = __lasx_xvpermi_q(reg2, reg0, 0x21); + src2 = __lasx_xvpermi_q(reg2, reg1, 0x30); + DUP2_ARG3(__lasx_xvshuf_b, src1, src0, shuf0, src1, src2, shuf1, tmp0, + tmp1); + tmp2 = __lasx_xvshuf_b(src1, src2, shuf2); + DUP4_ARG3(__lasx_xvshuf_b, alpha, src0, shuf3, alpha, tmp0, shuf3, alpha, + tmp1, shuf3, alpha, tmp2, shuf3, reg0, reg1, reg2, reg3); + DUP4_ARG3(__lasx_xvpermi_q, reg1, reg0, 0x20, reg3, reg2, 0x20, reg1, reg0, + 0x31, reg3, reg2, 0x31, dst0, dst1, dst2, dst3); + __lasx_xvst(dst0, dst_argb, 0); + __lasx_xvst(dst1, dst_argb, 32); + __lasx_xvst(dst2, dst_argb, 64); + __lasx_xvst(dst3, dst_argb, 96); + src_rgb24 += 96; + dst_argb += 128; + } +} + +void RAWToARGBRow_LASX(const uint8_t* src_raw, uint8_t* dst_argb, int width) { + int x; + int len = width / 32; + __m256i src0, src1, src2; + __m256i tmp0, tmp1, tmp2, reg0, reg1, reg2, reg3; + __m256i dst0, dst1, dst2, dst3; + __m256i alpha = __lasx_xvldi(0xFF); + __m256i shuf0 = {0x131211100F0E0D0C, 0x1B1A191817161514, 0x131211100F0E0D0C, + 0x1B1A191817161514}; + __m256i shuf1 = {0x1F1E1D1C1B1A1918, 0x0706050403020100, 0x1F1E1D1C1B1A1918, + 0x0706050403020100}; + __m256i shuf2 = {0x0B0A090807060504, 0x131211100F0E0D0C, 0x0B0A090807060504, + 0x131211100F0E0D0C}; + __m256i shuf3 = {0x1003040510000102, 0x10090A0B10060708, 0x1003040510000102, + 0x10090A0B10060708}; + + for (x = 0; x < len; x++) { + reg0 = __lasx_xvld(src_raw, 0); + reg1 = __lasx_xvld(src_raw, 32); + reg2 = __lasx_xvld(src_raw, 64); + src0 = __lasx_xvpermi_q(reg1, reg0, 0x30); + src1 = __lasx_xvpermi_q(reg2, reg0, 0x21); + src2 = __lasx_xvpermi_q(reg2, reg1, 0x30); + DUP2_ARG3(__lasx_xvshuf_b, src1, src0, shuf0, src1, src2, shuf1, tmp0, + tmp1); + tmp2 = __lasx_xvshuf_b(src1, src2, shuf2); + DUP4_ARG3(__lasx_xvshuf_b, alpha, src0, shuf3, alpha, tmp0, shuf3, alpha, + tmp1, shuf3, alpha, tmp2, shuf3, reg0, reg1, reg2, reg3); + DUP4_ARG3(__lasx_xvpermi_q, reg1, reg0, 0x20, reg3, reg2, 0x20, reg1, reg0, + 0x31, reg3, reg2, 0x31, dst0, dst1, dst2, dst3); + __lasx_xvst(dst0, dst_argb, 0); + __lasx_xvst(dst1, dst_argb, 32); + __lasx_xvst(dst2, dst_argb, 64); + __lasx_xvst(dst3, dst_argb, 96); + src_raw += 96; + dst_argb += 128; + } +} + +void ARGB1555ToYRow_LASX(const uint8_t* src_argb1555, + uint8_t* dst_y, + int width) { + int x; + int len = width / 32; + __m256i src0, src1; + __m256i tmp0, tmp1, tmpb, tmpg, tmpr; + __m256i reg0, reg1, reg2, dst0; + __m256i const_66 = __lasx_xvldi(66); + __m256i const_129 = __lasx_xvldi(129); + __m256i const_25 = __lasx_xvldi(25); + __m256i const_1080 = {0x1080108010801080, 0x1080108010801080, + 0x1080108010801080, 0x1080108010801080}; + + for (x = 0; x < len; x++) { + src0 = __lasx_xvld(src_argb1555, 0); + src1 = __lasx_xvld(src_argb1555, 32); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmpb = __lasx_xvandi_b(tmp0, 0x1F); + tmpg = __lasx_xvsrli_b(tmp0, 5); + reg0 = __lasx_xvandi_b(tmp1, 0x03); + reg0 = __lasx_xvslli_b(reg0, 3); + tmpg = __lasx_xvor_v(tmpg, reg0); + reg1 = __lasx_xvandi_b(tmp1, 0x7C); + tmpr = __lasx_xvsrli_b(reg1, 2); + reg0 = __lasx_xvslli_b(tmpb, 3); + reg1 = __lasx_xvslli_b(tmpg, 3); + reg2 = __lasx_xvslli_b(tmpr, 3); + tmpb = __lasx_xvsrli_b(tmpb, 2); + tmpg = __lasx_xvsrli_b(tmpg, 2); + tmpr = __lasx_xvsrli_b(tmpr, 2); + tmpb = __lasx_xvor_v(reg0, tmpb); + tmpg = __lasx_xvor_v(reg1, tmpg); + tmpr = __lasx_xvor_v(reg2, tmpr); + reg0 = __lasx_xvmaddwev_h_bu(const_1080, tmpb, const_25); + reg1 = __lasx_xvmaddwod_h_bu(const_1080, tmpb, const_25); + reg0 = __lasx_xvmaddwev_h_bu(reg0, tmpg, const_129); + reg1 = __lasx_xvmaddwod_h_bu(reg1, tmpg, const_129); + reg0 = __lasx_xvmaddwev_h_bu(reg0, tmpr, const_66); + reg1 = __lasx_xvmaddwod_h_bu(reg1, tmpr, const_66); + dst0 = __lasx_xvpackod_b(reg1, reg0); + dst0 = __lasx_xvpermi_d(dst0, 0xD8); + __lasx_xvst(dst0, dst_y, 0); + src_argb1555 += 64; + dst_y += 32; + } +} + +void ARGB1555ToUVRow_LASX(const uint8_t* src_argb1555, + int src_stride_argb1555, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 32; + const uint8_t* next_argb1555 = src_argb1555 + src_stride_argb1555; + __m256i src0, src1, src2, src3; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m256i reg0, reg1, reg2, reg3, dst0; + __m256i const_112 = __lasx_xvldi(0x438); + __m256i const_74 = __lasx_xvldi(0x425); + __m256i const_38 = __lasx_xvldi(0x413); + __m256i const_94 = __lasx_xvldi(0x42F); + __m256i const_18 = __lasx_xvldi(0x409); + __m256i const_8080 = {0x8080808080808080, 0x8080808080808080, + 0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_argb1555, 0, src_argb1555, 32, next_argb1555, 0, + next_argb1555, 32, src0, src1, src2, src3); + DUP2_ARG2(__lasx_xvpickev_b, src1, src0, src3, src2, tmp0, tmp2); + DUP2_ARG2(__lasx_xvpickod_b, src1, src0, src3, src2, tmp1, tmp3); + tmpb = __lasx_xvandi_b(tmp0, 0x1F); + nexb = __lasx_xvandi_b(tmp2, 0x1F); + tmpg = __lasx_xvsrli_b(tmp0, 5); + nexg = __lasx_xvsrli_b(tmp2, 5); + reg0 = __lasx_xvandi_b(tmp1, 0x03); + reg2 = __lasx_xvandi_b(tmp3, 0x03); + reg0 = __lasx_xvslli_b(reg0, 3); + reg2 = __lasx_xvslli_b(reg2, 3); + tmpg = __lasx_xvor_v(tmpg, reg0); + nexg = __lasx_xvor_v(nexg, reg2); + reg1 = __lasx_xvandi_b(tmp1, 0x7C); + reg3 = __lasx_xvandi_b(tmp3, 0x7C); + tmpr = __lasx_xvsrli_b(reg1, 2); + nexr = __lasx_xvsrli_b(reg3, 2); + reg0 = __lasx_xvslli_b(tmpb, 3); + reg1 = __lasx_xvslli_b(tmpg, 3); + reg2 = __lasx_xvslli_b(tmpr, 3); + tmpb = __lasx_xvsrli_b(tmpb, 2); + tmpg = __lasx_xvsrli_b(tmpg, 2); + tmpr = __lasx_xvsrli_b(tmpr, 2); + tmpb = __lasx_xvor_v(reg0, tmpb); + tmpg = __lasx_xvor_v(reg1, tmpg); + tmpr = __lasx_xvor_v(reg2, tmpr); + reg0 = __lasx_xvslli_b(nexb, 3); + reg1 = __lasx_xvslli_b(nexg, 3); + reg2 = __lasx_xvslli_b(nexr, 3); + nexb = __lasx_xvsrli_b(nexb, 2); + nexg = __lasx_xvsrli_b(nexg, 2); + nexr = __lasx_xvsrli_b(nexr, 2); + nexb = __lasx_xvor_v(reg0, nexb); + nexg = __lasx_xvor_v(reg1, nexg); + nexr = __lasx_xvor_v(reg2, nexr); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, reg0, reg1); + reg0 = __lasx_xvpermi_d(reg0, 0xD8); + reg1 = __lasx_xvpermi_d(reg1, 0xD8); + dst0 = __lasx_xvpickod_b(reg1, reg0); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_v, 0, 1); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst0, dst_v, 8, 3); + src_argb1555 += 64; + next_argb1555 += 64; + dst_u += 16; + dst_v += 16; + } +} + +void RGB565ToYRow_LASX(const uint8_t* src_rgb565, uint8_t* dst_y, int width) { + int x; + int len = width / 32; + __m256i src0, src1; + __m256i tmp0, tmp1, tmpb, tmpg, tmpr; + __m256i reg0, reg1, dst0; + __m256i const_66 = __lasx_xvldi(66); + __m256i const_129 = __lasx_xvldi(129); + __m256i const_25 = __lasx_xvldi(25); + __m256i const_1080 = {0x1080108010801080, 0x1080108010801080, + 0x1080108010801080, 0x1080108010801080}; + + for (x = 0; x < len; x++) { + src0 = __lasx_xvld(src_rgb565, 0); + src1 = __lasx_xvld(src_rgb565, 32); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmpb = __lasx_xvandi_b(tmp0, 0x1F); + tmpr = __lasx_xvandi_b(tmp1, 0xF8); + reg1 = __lasx_xvandi_b(tmp1, 0x07); + reg0 = __lasx_xvsrli_b(tmp0, 5); + reg1 = __lasx_xvslli_b(reg1, 3); + tmpg = __lasx_xvor_v(reg1, reg0); + reg0 = __lasx_xvslli_b(tmpb, 3); + reg1 = __lasx_xvsrli_b(tmpb, 2); + tmpb = __lasx_xvor_v(reg1, reg0); + reg0 = __lasx_xvslli_b(tmpg, 2); + reg1 = __lasx_xvsrli_b(tmpg, 4); + tmpg = __lasx_xvor_v(reg1, reg0); + reg0 = __lasx_xvsrli_b(tmpr, 5); + tmpr = __lasx_xvor_v(tmpr, reg0); + reg0 = __lasx_xvmaddwev_h_bu(const_1080, tmpb, const_25); + reg1 = __lasx_xvmaddwod_h_bu(const_1080, tmpb, const_25); + reg0 = __lasx_xvmaddwev_h_bu(reg0, tmpg, const_129); + reg1 = __lasx_xvmaddwod_h_bu(reg1, tmpg, const_129); + reg0 = __lasx_xvmaddwev_h_bu(reg0, tmpr, const_66); + reg1 = __lasx_xvmaddwod_h_bu(reg1, tmpr, const_66); + dst0 = __lasx_xvpackod_b(reg1, reg0); + dst0 = __lasx_xvpermi_d(dst0, 0xD8); + __lasx_xvst(dst0, dst_y, 0); + dst_y += 32; + src_rgb565 += 64; + } +} + +void RGB565ToUVRow_LASX(const uint8_t* src_rgb565, + int src_stride_rgb565, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 32; + const uint8_t* next_rgb565 = src_rgb565 + src_stride_rgb565; + __m256i src0, src1, src2, src3; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m256i reg0, reg1, reg2, reg3, dst0; + __m256i const_112 = __lasx_xvldi(0x438); + __m256i const_74 = __lasx_xvldi(0x425); + __m256i const_38 = __lasx_xvldi(0x413); + __m256i const_94 = __lasx_xvldi(0x42F); + __m256i const_18 = __lasx_xvldi(0x409); + __m256i const_8080 = {0x8080808080808080, 0x8080808080808080, + 0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_rgb565, 0, src_rgb565, 32, next_rgb565, 0, + next_rgb565, 32, src0, src1, src2, src3); + DUP2_ARG2(__lasx_xvpickev_b, src1, src0, src3, src2, tmp0, tmp2); + DUP2_ARG2(__lasx_xvpickod_b, src1, src0, src3, src2, tmp1, tmp3); + tmpb = __lasx_xvandi_b(tmp0, 0x1F); + tmpr = __lasx_xvandi_b(tmp1, 0xF8); + nexb = __lasx_xvandi_b(tmp2, 0x1F); + nexr = __lasx_xvandi_b(tmp3, 0xF8); + reg1 = __lasx_xvandi_b(tmp1, 0x07); + reg3 = __lasx_xvandi_b(tmp3, 0x07); + reg0 = __lasx_xvsrli_b(tmp0, 5); + reg1 = __lasx_xvslli_b(reg1, 3); + reg2 = __lasx_xvsrli_b(tmp2, 5); + reg3 = __lasx_xvslli_b(reg3, 3); + tmpg = __lasx_xvor_v(reg1, reg0); + nexg = __lasx_xvor_v(reg2, reg3); + reg0 = __lasx_xvslli_b(tmpb, 3); + reg1 = __lasx_xvsrli_b(tmpb, 2); + reg2 = __lasx_xvslli_b(nexb, 3); + reg3 = __lasx_xvsrli_b(nexb, 2); + tmpb = __lasx_xvor_v(reg1, reg0); + nexb = __lasx_xvor_v(reg2, reg3); + reg0 = __lasx_xvslli_b(tmpg, 2); + reg1 = __lasx_xvsrli_b(tmpg, 4); + reg2 = __lasx_xvslli_b(nexg, 2); + reg3 = __lasx_xvsrli_b(nexg, 4); + tmpg = __lasx_xvor_v(reg1, reg0); + nexg = __lasx_xvor_v(reg2, reg3); + reg0 = __lasx_xvsrli_b(tmpr, 5); + reg2 = __lasx_xvsrli_b(nexr, 5); + tmpr = __lasx_xvor_v(tmpr, reg0); + nexr = __lasx_xvor_v(nexr, reg2); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, reg0, reg1); + reg0 = __lasx_xvpermi_d(reg0, 0xD8); + reg1 = __lasx_xvpermi_d(reg1, 0xD8); + dst0 = __lasx_xvpickod_b(reg1, reg0); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_v, 0, 1); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst0, dst_v, 8, 3); + dst_u += 16; + dst_v += 16; + src_rgb565 += 64; + next_rgb565 += 64; + } +} + +void RGB24ToYRow_LASX(const uint8_t* src_rgb24, uint8_t* dst_y, int width) { + int x; + int len = width / 32; + __m256i src0, src1, src2; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i reg0, reg1, reg2, dst0; + __m256i const_129 = __lasx_xvldi(129); + __m256i const_br = {0x4219421942194219, 0x4219421942194219, + 0x4219421942194219, 0x4219421942194219}; + __m256i const_1080 = {0x1080108010801080, 0x1080108010801080, + 0x1080108010801080, 0x1080108010801080}; + __m256i shuff0 = {0x0B09080605030200, 0x17151412110F0E0C, 0x0B09080605030200, + 0x17151412110F0E0C}; + __m256i shuff1 = {0x0301001E1D1B1A18, 0x0F0D0C0A09070604, 0x0301001E1D1B1A18, + 0x0F0D0C0A09070604}; + __m256i shuff2 = {0x000A000700040001, 0x001600130010000D, 0x000A000700040001, + 0x001600130010000D}; + __m256i shuff3 = {0x0002001F001C0019, 0x000E000B00080005, 0x0002001F001C0019, + 0x000E000B00080005}; + + for (x = 0; x < len; x++) { + reg0 = __lasx_xvld(src_rgb24, 0); + reg1 = __lasx_xvld(src_rgb24, 32); + reg2 = __lasx_xvld(src_rgb24, 64); + src0 = __lasx_xvpermi_q(reg1, reg0, 0x30); + src1 = __lasx_xvpermi_q(reg2, reg0, 0x21); + src2 = __lasx_xvpermi_q(reg2, reg1, 0x30); + tmp0 = __lasx_xvshuf_b(src1, src0, shuff0); + tmp1 = __lasx_xvshuf_b(src1, src2, shuff1); + tmp2 = __lasx_xvshuf_b(src1, src0, shuff2); + tmp3 = __lasx_xvshuf_b(src1, src2, shuff3); + reg0 = __lasx_xvmaddwev_h_bu(const_1080, tmp2, const_129); + reg1 = __lasx_xvmaddwev_h_bu(const_1080, tmp3, const_129); + reg0 = __lasx_xvdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lasx_xvdp2add_h_bu(reg1, const_br, tmp1); + dst0 = __lasx_xvpickod_b(reg1, reg0); + __lasx_xvst(dst0, dst_y, 0); + dst_y += 32; + src_rgb24 += 96; + } +} + +void RGB24ToUVRow_LASX(const uint8_t* src_rgb24, + int src_stride_rgb24, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_rgb24 = src_rgb24 + src_stride_rgb24; + int len = width / 32; + __m256i src0, src1, src2, reg0, reg1, reg2; + __m256i nex0, nex1, nex2, dst0, tmp0, tmp1, tmp2; + __m256i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m256i const_112 = __lasx_xvldi(0x438); + __m256i const_74 = __lasx_xvldi(0x425); + __m256i const_38 = __lasx_xvldi(0x413); + __m256i const_94 = __lasx_xvldi(0x42F); + __m256i const_18 = __lasx_xvldi(0x409); + __m256i const_8080 = {0x8080808080808080, 0x8080808080808080, + 0x8080808080808080, 0x8080808080808080}; + __m256i shuff0_b = {0x15120F0C09060300, 0x00000000001E1B18, + 0x15120F0C09060300, 0x00000000001E1B18}; + __m256i shuff1_b = {0x0706050403020100, 0x1D1A1714110A0908, + 0x0706050403020100, 0x1D1A1714110A0908}; + __m256i shuff0_g = {0x1613100D0A070401, 0x00000000001F1C19, + 0x1613100D0A070401, 0x00000000001F1C19}; + __m256i shuff1_g = {0x0706050403020100, 0x1E1B1815120A0908, + 0x0706050403020100, 0x1E1B1815120A0908}; + __m256i shuff0_r = {0x1714110E0B080502, 0x0000000000001D1A, + 0x1714110E0B080502, 0x0000000000001D1A}; + __m256i shuff1_r = {0x0706050403020100, 0x1F1C191613100908, + 0x0706050403020100, 0x1F1C191613100908}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_rgb24, 0, src_rgb24, 32, src_rgb24, 64, + next_rgb24, 0, reg0, reg1, reg2, tmp0); + DUP2_ARG2(__lasx_xvld, next_rgb24, 32, next_rgb24, 64, tmp1, tmp2); + DUP4_ARG3(__lasx_xvpermi_q, reg1, reg0, 0x30, reg2, reg0, 0x21, reg2, reg1, + 0x30, tmp1, tmp0, 0x30, src0, src1, src2, nex0); + DUP2_ARG3(__lasx_xvpermi_q, tmp2, tmp0, 0x21, tmp2, tmp1, 0x30, nex1, nex2); + DUP2_ARG3(__lasx_xvshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb, + nexb); + DUP2_ARG3(__lasx_xvshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg, + nexg); + DUP2_ARG3(__lasx_xvshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr, + nexr); + DUP2_ARG3(__lasx_xvshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb, + nexb); + DUP2_ARG3(__lasx_xvshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg, + nexg); + DUP2_ARG3(__lasx_xvshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr, + nexr); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, reg0, reg1); + dst0 = __lasx_xvpickod_b(reg1, reg0); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_v, 0, 1); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst0, dst_v, 8, 3); + src_rgb24 += 96; + next_rgb24 += 96; + dst_u += 16; + dst_v += 16; + } +} + +void RAWToYRow_LASX(const uint8_t* src_raw, uint8_t* dst_y, int width) { + int x; + int len = width / 32; + __m256i src0, src1, src2; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i reg0, reg1, reg2, dst0; + __m256i const_129 = __lasx_xvldi(129); + __m256i const_br = {0x1942194219421942, 0x1942194219421942, + 0x1942194219421942, 0x1942194219421942}; + __m256i const_1080 = {0x1080108010801080, 0x1080108010801080, + 0x1080108010801080, 0x1080108010801080}; + __m256i shuff0 = {0x0B09080605030200, 0x17151412110F0E0C, 0x0B09080605030200, + 0x17151412110F0E0C}; + __m256i shuff1 = {0x0301001E1D1B1A18, 0x0F0D0C0A09070604, 0x0301001E1D1B1A18, + 0x0F0D0C0A09070604}; + __m256i shuff2 = {0x000A000700040001, 0x001600130010000D, 0x000A000700040001, + 0x001600130010000D}; + __m256i shuff3 = {0x0002001F001C0019, 0x000E000B00080005, 0x0002001F001C0019, + 0x000E000B00080005}; + + for (x = 0; x < len; x++) { + reg0 = __lasx_xvld(src_raw, 0); + reg1 = __lasx_xvld(src_raw, 32); + reg2 = __lasx_xvld(src_raw, 64); + src0 = __lasx_xvpermi_q(reg1, reg0, 0x30); + src1 = __lasx_xvpermi_q(reg2, reg0, 0x21); + src2 = __lasx_xvpermi_q(reg2, reg1, 0x30); + tmp0 = __lasx_xvshuf_b(src1, src0, shuff0); + tmp1 = __lasx_xvshuf_b(src1, src2, shuff1); + tmp2 = __lasx_xvshuf_b(src1, src0, shuff2); + tmp3 = __lasx_xvshuf_b(src1, src2, shuff3); + reg0 = __lasx_xvmaddwev_h_bu(const_1080, tmp2, const_129); + reg1 = __lasx_xvmaddwev_h_bu(const_1080, tmp3, const_129); + reg0 = __lasx_xvdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lasx_xvdp2add_h_bu(reg1, const_br, tmp1); + dst0 = __lasx_xvpickod_b(reg1, reg0); + __lasx_xvst(dst0, dst_y, 0); + dst_y += 32; + src_raw += 96; + } +} + +void RAWToUVRow_LASX(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_raw = src_raw + src_stride_raw; + int len = width / 32; + __m256i src0, src1, src2, reg0, reg1, reg2; + __m256i nex0, nex1, nex2, dst0, tmp0, tmp1, tmp2; + __m256i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m256i const_112 = __lasx_xvldi(0x438); + __m256i const_74 = __lasx_xvldi(0x425); + __m256i const_38 = __lasx_xvldi(0x413); + __m256i const_94 = __lasx_xvldi(0x42F); + __m256i const_18 = __lasx_xvldi(0x409); + __m256i const_8080 = {0x8080808080808080, 0x8080808080808080, + 0x8080808080808080, 0x8080808080808080}; + __m256i shuff0_r = {0x15120F0C09060300, 0x00000000001E1B18, + 0x15120F0C09060300, 0x00000000001E1B18}; + __m256i shuff1_r = {0x0706050403020100, 0x1D1A1714110A0908, + 0x0706050403020100, 0x1D1A1714110A0908}; + __m256i shuff0_g = {0x1613100D0A070401, 0x00000000001F1C19, + 0x1613100D0A070401, 0x00000000001F1C19}; + __m256i shuff1_g = {0x0706050403020100, 0x1E1B1815120A0908, + 0x0706050403020100, 0x1E1B1815120A0908}; + __m256i shuff0_b = {0x1714110E0B080502, 0x0000000000001D1A, + 0x1714110E0B080502, 0x0000000000001D1A}; + __m256i shuff1_b = {0x0706050403020100, 0x1F1C191613100908, + 0x0706050403020100, 0x1F1C191613100908}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_raw, 0, src_raw, 32, src_raw, 64, next_raw, 0, + reg0, reg1, reg2, tmp0); + DUP2_ARG2(__lasx_xvld, next_raw, 32, next_raw, 64, tmp1, tmp2); + DUP4_ARG3(__lasx_xvpermi_q, reg1, reg0, 0x30, reg2, reg0, 0x21, reg2, reg1, + 0x30, tmp1, tmp0, 0x30, src0, src1, src2, nex0); + DUP2_ARG3(__lasx_xvpermi_q, tmp2, tmp0, 0x21, tmp2, tmp1, 0x30, nex1, nex2); + DUP2_ARG3(__lasx_xvshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb, + nexb); + DUP2_ARG3(__lasx_xvshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg, + nexg); + DUP2_ARG3(__lasx_xvshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr, + nexr); + DUP2_ARG3(__lasx_xvshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb, + nexb); + DUP2_ARG3(__lasx_xvshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg, + nexg); + DUP2_ARG3(__lasx_xvshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr, + nexr); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, reg0, reg1); + dst0 = __lasx_xvpickod_b(reg1, reg0); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_v, 0, 1); + __lasx_xvstelm_d(dst0, dst_u, 8, 2); + __lasx_xvstelm_d(dst0, dst_v, 8, 3); + src_raw += 96; + next_raw += 96; + dst_u += 16; + dst_v += 16; + } +} + +void NV12ToARGBRow_LASX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 16; + __m256i vec_yg, vec_yb, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_vrub, vec_vgug, vec_y, vec_vu; + __m256i out_b, out_g, out_r; + __m256i const_0x80 = __lasx_xvldi(0x80); + __m256i alpha = __lasx_xvldi(0xFF); + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_vrub = __lasx_xvilvl_h(vec_vr, vec_ub); + vec_vgug = __lasx_xvilvl_h(vec_vg, vec_ug); + + for (x = 0; x < len; x++) { + vec_y = __lasx_xvld(src_y, 0); + vec_vu = __lasx_xvld(src_uv, 0); + vec_vu = __lasx_xvsub_b(vec_vu, const_0x80); + vec_vu = __lasx_vext2xv_h_b(vec_vu); + YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_r, out_g, + out_b); + STOREARGB(alpha, out_r, out_g, out_b, dst_argb); + src_y += 16; + src_uv += 16; + } +} + +void NV12ToRGB565Row_LASX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 16; + __m256i vec_yg, vec_yb, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_vrub, vec_vgug, vec_y, vec_vu; + __m256i out_b, out_g, out_r; + __m256i const_0x80 = __lasx_xvldi(0x80); + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_vrub = __lasx_xvilvl_h(vec_vr, vec_ub); + vec_vgug = __lasx_xvilvl_h(vec_vg, vec_ug); + + for (x = 0; x < len; x++) { + vec_y = __lasx_xvld(src_y, 0); + vec_vu = __lasx_xvld(src_uv, 0); + vec_vu = __lasx_xvsub_b(vec_vu, const_0x80); + vec_vu = __lasx_vext2xv_h_b(vec_vu); + YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_r, out_g, + out_b); + out_b = __lasx_xvsrli_h(out_b, 3); + out_g = __lasx_xvsrli_h(out_g, 2); + out_r = __lasx_xvsrli_h(out_r, 3); + out_g = __lasx_xvslli_h(out_g, 5); + out_r = __lasx_xvslli_h(out_r, 11); + out_r = __lasx_xvor_v(out_r, out_g); + out_r = __lasx_xvor_v(out_r, out_b); + __lasx_xvst(out_r, dst_rgb565, 0); + src_y += 16; + src_uv += 16; + dst_rgb565 += 32; + } +} + +void NV21ToARGBRow_LASX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 16; + __m256i vec_yg, vec_yb, vec_ub, vec_vr, vec_ug, vec_vg; + __m256i vec_ubvr, vec_ugvg, vec_y, vec_uv; + __m256i out_b, out_g, out_r; + __m256i const_0x80 = __lasx_xvldi(0x80); + __m256i alpha = __lasx_xvldi(0xFF); + + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); + vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr); + vec_ugvg = __lasx_xvilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + vec_y = __lasx_xvld(src_y, 0); + vec_uv = __lasx_xvld(src_uv, 0); + vec_uv = __lasx_xvsub_b(vec_uv, const_0x80); + vec_uv = __lasx_vext2xv_h_b(vec_uv); + YUVTORGB(vec_y, vec_uv, vec_ubvr, vec_ugvg, vec_yg, vec_yb, out_b, out_g, + out_r); + STOREARGB(alpha, out_r, out_g, out_b, dst_argb); + src_y += 16; + src_uv += 16; + } +} + +void ARGBToYJRow_LASX(const uint8_t* src_argb, uint8_t* dst_y, int width) { + int x; + int len = width / 32; + __m256i src0, src1, src2, src3, dst0; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i reg0, reg1; + __m256i const_128 = __lasx_xvldi(0x480); + __m256i const_150 = __lasx_xvldi(0x96); + __m256i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D, + 0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D}; + __m256i shuff = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002, + 0x0000000700000003}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, + 96, src0, src1, src2, src3); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmp2 = __lasx_xvpickev_b(src3, src2); + tmp3 = __lasx_xvpickod_b(src3, src2); + reg0 = __lasx_xvmaddwev_h_bu(const_128, tmp1, const_150); + reg1 = __lasx_xvmaddwev_h_bu(const_128, tmp3, const_150); + reg0 = __lasx_xvdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lasx_xvdp2add_h_bu(reg1, const_br, tmp2); + dst0 = __lasx_xvpickod_b(reg1, reg0); + dst0 = __lasx_xvperm_w(dst0, shuff); + __lasx_xvst(dst0, dst_y, 0); + dst_y += 32; + src_argb += 128; + } +} + +void ARGBToUVJRow_LASX(const uint8_t* src_argb, + int src_stride_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_argb = src_argb + src_stride_argb; + int len = width / 32; + __m256i src0, src1, src2, src3; + __m256i nex0, nex1, nex2, nex3; + __m256i tmp0, tmp1, tmp2, tmp3; + __m256i reg0, reg1, dst0; + __m256i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m256i const_63 = __lasx_xvldi(0x43F); + __m256i const_42 = __lasx_xvldi(0x42A); + __m256i const_21 = __lasx_xvldi(0x415); + __m256i const_53 = __lasx_xvldi(0x435); + __m256i const_10 = __lasx_xvldi(0x40A); + __m256i const_8080 = {0x8080808080808080, 0x8080808080808080, + 0x8080808080808080, 0x8080808080808080}; + __m256i shuff = {0x1614060412100200, 0x1E1C0E0C1A180A08, 0x1715070513110301, + 0x1F1D0F0D1B190B09}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src_argb, 64, src_argb, + 96, src0, src1, src2, src3); + DUP4_ARG2(__lasx_xvld, next_argb, 0, next_argb, 32, next_argb, 64, + next_argb, 96, nex0, nex1, nex2, nex3); + tmp0 = __lasx_xvpickev_b(src1, src0); + tmp1 = __lasx_xvpickod_b(src1, src0); + tmp2 = __lasx_xvpickev_b(src3, src2); + tmp3 = __lasx_xvpickod_b(src3, src2); + tmpr = __lasx_xvpickod_b(tmp2, tmp0); + tmpb = __lasx_xvpickev_b(tmp2, tmp0); + tmpg = __lasx_xvpickev_b(tmp3, tmp1); + tmp0 = __lasx_xvpickev_b(nex1, nex0); + tmp1 = __lasx_xvpickod_b(nex1, nex0); + tmp2 = __lasx_xvpickev_b(nex3, nex2); + tmp3 = __lasx_xvpickod_b(nex3, nex2); + nexr = __lasx_xvpickod_b(tmp2, tmp0); + nexb = __lasx_xvpickev_b(tmp2, tmp0); + nexg = __lasx_xvpickev_b(tmp3, tmp1); + tmp0 = __lasx_xvaddwev_h_bu(tmpb, nexb); + tmp1 = __lasx_xvaddwod_h_bu(tmpb, nexb); + tmp2 = __lasx_xvaddwev_h_bu(tmpg, nexg); + tmp3 = __lasx_xvaddwod_h_bu(tmpg, nexg); + reg0 = __lasx_xvaddwev_h_bu(tmpr, nexr); + reg1 = __lasx_xvaddwod_h_bu(tmpr, nexr); + tmpb = __lasx_xvavgr_hu(tmp0, tmp1); + tmpg = __lasx_xvavgr_hu(tmp2, tmp3); + tmpr = __lasx_xvavgr_hu(reg0, reg1); + reg0 = __lasx_xvmadd_h(const_8080, const_63, tmpb); + reg1 = __lasx_xvmadd_h(const_8080, const_63, tmpr); + reg0 = __lasx_xvmsub_h(reg0, const_42, tmpg); + reg1 = __lasx_xvmsub_h(reg1, const_53, tmpg); + reg0 = __lasx_xvmsub_h(reg0, const_21, tmpr); + reg1 = __lasx_xvmsub_h(reg1, const_10, tmpb); + dst0 = __lasx_xvpackod_b(reg1, reg0); + tmp0 = __lasx_xvpermi_d(dst0, 0x44); + tmp1 = __lasx_xvpermi_d(dst0, 0xEE); + dst0 = __lasx_xvshuf_b(tmp1, tmp0, shuff); + __lasx_xvstelm_d(dst0, dst_u, 0, 0); + __lasx_xvstelm_d(dst0, dst_v, 0, 2); + __lasx_xvstelm_d(dst0, dst_u, 8, 1); + __lasx_xvstelm_d(dst0, dst_v, 8, 3); + dst_u += 16; + dst_v += 16; + src_argb += 128; + next_argb += 128; + } +} + +#ifdef __cplusplus +} // extern "C" +} // namespace libyuv +#endif + +#endif // !defined(LIBYUV_DISABLE_LASX) && defined(__loongarch_asx) diff --git a/third-party/libyuv/third_party/libyuv/source/row_lsx.cc b/third-party/libyuv/third_party/libyuv/source/row_lsx.cc new file mode 100644 index 0000000000..3e8b901a65 --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/source/row_lsx.cc @@ -0,0 +1,1829 @@ +/* + * Copyright 2022 The LibYuv Project Authors. All rights reserved. + * + * Copyright (c) 2022 Loongson Technology Corporation Limited + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "libyuv/row.h" + +#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) +#include "libyuv/loongson_intrinsics.h" + +#ifdef __cplusplus +namespace libyuv { +extern "C" { +#endif + +// Fill YUV -> RGB conversion constants into vectors +#define YUVTORGB_SETUP(yuvconst, vr, ub, vg, ug, yg, yb) \ + { \ + ub = __lsx_vreplgr2vr_h(yuvconst->kUVToB[0]); \ + vr = __lsx_vreplgr2vr_h(yuvconst->kUVToR[1]); \ + ug = __lsx_vreplgr2vr_h(yuvconst->kUVToG[0]); \ + vg = __lsx_vreplgr2vr_h(yuvconst->kUVToG[1]); \ + yg = __lsx_vreplgr2vr_h(yuvconst->kYToRgb[0]); \ + yb = __lsx_vreplgr2vr_w(yuvconst->kYBiasToRgb[0]); \ + } + +// Convert 8 pixels of YUV420 to RGB. +#define YUVTORGB(in_y, in_vu, vrub, vgug, yg, yb, out_b, out_g, out_r) \ + { \ + __m128i y_ev, y_od, u_l, v_l; \ + __m128i tmp0, tmp1, tmp2, tmp3; \ + \ + tmp0 = __lsx_vilvl_b(in_y, in_y); \ + y_ev = __lsx_vmulwev_w_hu_h(tmp0, yg); \ + y_od = __lsx_vmulwod_w_hu_h(tmp0, yg); \ + y_ev = __lsx_vsrai_w(y_ev, 16); \ + y_od = __lsx_vsrai_w(y_od, 16); \ + y_ev = __lsx_vadd_w(y_ev, yb); \ + y_od = __lsx_vadd_w(y_od, yb); \ + in_vu = __lsx_vilvl_b(zero, in_vu); \ + in_vu = __lsx_vsub_h(in_vu, const_80); \ + u_l = __lsx_vmulwev_w_h(in_vu, vrub); \ + v_l = __lsx_vmulwod_w_h(in_vu, vrub); \ + tmp0 = __lsx_vadd_w(y_ev, u_l); \ + tmp1 = __lsx_vadd_w(y_od, u_l); \ + tmp2 = __lsx_vadd_w(y_ev, v_l); \ + tmp3 = __lsx_vadd_w(y_od, v_l); \ + tmp0 = __lsx_vsrai_w(tmp0, 6); \ + tmp1 = __lsx_vsrai_w(tmp1, 6); \ + tmp2 = __lsx_vsrai_w(tmp2, 6); \ + tmp3 = __lsx_vsrai_w(tmp3, 6); \ + tmp0 = __lsx_vclip255_w(tmp0); \ + tmp1 = __lsx_vclip255_w(tmp1); \ + tmp2 = __lsx_vclip255_w(tmp2); \ + tmp3 = __lsx_vclip255_w(tmp3); \ + out_b = __lsx_vpackev_h(tmp1, tmp0); \ + out_r = __lsx_vpackev_h(tmp3, tmp2); \ + tmp0 = __lsx_vdp2_w_h(in_vu, vgug); \ + tmp1 = __lsx_vsub_w(y_ev, tmp0); \ + tmp2 = __lsx_vsub_w(y_od, tmp0); \ + tmp1 = __lsx_vsrai_w(tmp1, 6); \ + tmp2 = __lsx_vsrai_w(tmp2, 6); \ + tmp1 = __lsx_vclip255_w(tmp1); \ + tmp2 = __lsx_vclip255_w(tmp2); \ + out_g = __lsx_vpackev_h(tmp2, tmp1); \ + } + +// Convert I444 pixels of YUV420 to RGB. +#define I444TORGB(in_yy, in_u, in_v, ub, vr, ugvg, yg, yb, out_b, out_g, \ + out_r) \ + { \ + __m128i y_ev, y_od, u_ev, v_ev, u_od, v_od; \ + __m128i tmp0, tmp1, tmp2, tmp3; \ + \ + y_ev = __lsx_vmulwev_w_hu_h(in_yy, yg); \ + y_od = __lsx_vmulwod_w_hu_h(in_yy, yg); \ + y_ev = __lsx_vsrai_w(y_ev, 16); \ + y_od = __lsx_vsrai_w(y_od, 16); \ + y_ev = __lsx_vadd_w(y_ev, yb); \ + y_od = __lsx_vadd_w(y_od, yb); \ + in_u = __lsx_vsub_h(in_u, const_80); \ + in_v = __lsx_vsub_h(in_v, const_80); \ + u_ev = __lsx_vmulwev_w_h(in_u, ub); \ + u_od = __lsx_vmulwod_w_h(in_u, ub); \ + v_ev = __lsx_vmulwev_w_h(in_v, vr); \ + v_od = __lsx_vmulwod_w_h(in_v, vr); \ + tmp0 = __lsx_vadd_w(y_ev, u_ev); \ + tmp1 = __lsx_vadd_w(y_od, u_od); \ + tmp2 = __lsx_vadd_w(y_ev, v_ev); \ + tmp3 = __lsx_vadd_w(y_od, v_od); \ + tmp0 = __lsx_vsrai_w(tmp0, 6); \ + tmp1 = __lsx_vsrai_w(tmp1, 6); \ + tmp2 = __lsx_vsrai_w(tmp2, 6); \ + tmp3 = __lsx_vsrai_w(tmp3, 6); \ + tmp0 = __lsx_vclip255_w(tmp0); \ + tmp1 = __lsx_vclip255_w(tmp1); \ + tmp2 = __lsx_vclip255_w(tmp2); \ + tmp3 = __lsx_vclip255_w(tmp3); \ + out_b = __lsx_vpackev_h(tmp1, tmp0); \ + out_r = __lsx_vpackev_h(tmp3, tmp2); \ + u_ev = __lsx_vpackev_h(in_u, in_v); \ + u_od = __lsx_vpackod_h(in_u, in_v); \ + v_ev = __lsx_vdp2_w_h(u_ev, ugvg); \ + v_od = __lsx_vdp2_w_h(u_od, ugvg); \ + tmp0 = __lsx_vsub_w(y_ev, v_ev); \ + tmp1 = __lsx_vsub_w(y_od, v_od); \ + tmp0 = __lsx_vsrai_w(tmp0, 6); \ + tmp1 = __lsx_vsrai_w(tmp1, 6); \ + tmp0 = __lsx_vclip255_w(tmp0); \ + tmp1 = __lsx_vclip255_w(tmp1); \ + out_g = __lsx_vpackev_h(tmp1, tmp0); \ + } + +// Pack and Store 8 ARGB values. +#define STOREARGB(in_a, in_r, in_g, in_b, pdst_argb) \ + { \ + __m128i temp0, temp1; \ + __m128i dst0, dst1; \ + \ + temp0 = __lsx_vpackev_b(in_g, in_b); \ + temp1 = __lsx_vpackev_b(in_a, in_r); \ + dst0 = __lsx_vilvl_h(temp1, temp0); \ + dst1 = __lsx_vilvh_h(temp1, temp0); \ + __lsx_vst(dst0, pdst_argb, 0); \ + __lsx_vst(dst1, pdst_argb, 16); \ + pdst_argb += 32; \ + } + +#define RGBTOUV(_tmpb, _tmpg, _tmpr, _nexb, _nexg, _nexr, _dst0) \ + { \ + __m128i _tmp0, _tmp1, _tmp2, _tmp3; \ + __m128i _reg0, _reg1; \ + _tmp0 = __lsx_vaddwev_h_bu(_tmpb, _nexb); \ + _tmp1 = __lsx_vaddwod_h_bu(_tmpb, _nexb); \ + _tmp2 = __lsx_vaddwev_h_bu(_tmpg, _nexg); \ + _tmp3 = __lsx_vaddwod_h_bu(_tmpg, _nexg); \ + _reg0 = __lsx_vaddwev_h_bu(_tmpr, _nexr); \ + _reg1 = __lsx_vaddwod_h_bu(_tmpr, _nexr); \ + _tmpb = __lsx_vavgr_hu(_tmp0, _tmp1); \ + _tmpg = __lsx_vavgr_hu(_tmp2, _tmp3); \ + _tmpr = __lsx_vavgr_hu(_reg0, _reg1); \ + _reg0 = __lsx_vmadd_h(const_8080, const_112, _tmpb); \ + _reg1 = __lsx_vmadd_h(const_8080, const_112, _tmpr); \ + _reg0 = __lsx_vmsub_h(_reg0, const_74, _tmpg); \ + _reg1 = __lsx_vmsub_h(_reg1, const_94, _tmpg); \ + _reg0 = __lsx_vmsub_h(_reg0, const_38, _tmpr); \ + _reg1 = __lsx_vmsub_h(_reg1, const_18, _tmpb); \ + _dst0 = __lsx_vpickod_b(_reg1, _reg0); \ + } + +void ARGB4444ToARGBRow_LSX(const uint8_t* src_argb4444, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 16; + __m128i src0, src1; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1, reg2, reg3; + __m128i dst0, dst1, dst2, dst3; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_argb4444, 0); + src1 = __lsx_vld(src_argb4444, 16); + tmp0 = __lsx_vandi_b(src0, 0x0F); + tmp1 = __lsx_vandi_b(src0, 0xF0); + tmp2 = __lsx_vandi_b(src1, 0x0F); + tmp3 = __lsx_vandi_b(src1, 0xF0); + reg0 = __lsx_vslli_b(tmp0, 4); + reg2 = __lsx_vslli_b(tmp2, 4); + reg1 = __lsx_vsrli_b(tmp1, 4); + reg3 = __lsx_vsrli_b(tmp3, 4); + DUP4_ARG2(__lsx_vor_v, tmp0, reg0, tmp1, reg1, tmp2, reg2, tmp3, reg3, tmp0, + tmp1, tmp2, tmp3); + dst0 = __lsx_vilvl_b(tmp1, tmp0); + dst2 = __lsx_vilvl_b(tmp3, tmp2); + dst1 = __lsx_vilvh_b(tmp1, tmp0); + dst3 = __lsx_vilvh_b(tmp3, tmp2); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + dst_argb += 64; + src_argb4444 += 32; + } +} + +void ARGB1555ToARGBRow_LSX(const uint8_t* src_argb1555, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 16; + __m128i src0, src1; + __m128i tmp0, tmp1, tmpb, tmpg, tmpr, tmpa; + __m128i reg0, reg1, reg2; + __m128i dst0, dst1, dst2, dst3; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_argb1555, 0); + src1 = __lsx_vld(src_argb1555, 16); + tmp0 = __lsx_vpickev_b(src1, src0); + tmp1 = __lsx_vpickod_b(src1, src0); + tmpb = __lsx_vandi_b(tmp0, 0x1F); + tmpg = __lsx_vsrli_b(tmp0, 5); + reg0 = __lsx_vandi_b(tmp1, 0x03); + reg0 = __lsx_vslli_b(reg0, 3); + tmpg = __lsx_vor_v(tmpg, reg0); + reg1 = __lsx_vandi_b(tmp1, 0x7C); + tmpr = __lsx_vsrli_b(reg1, 2); + tmpa = __lsx_vsrli_b(tmp1, 7); + tmpa = __lsx_vneg_b(tmpa); + reg0 = __lsx_vslli_b(tmpb, 3); + reg1 = __lsx_vslli_b(tmpg, 3); + reg2 = __lsx_vslli_b(tmpr, 3); + tmpb = __lsx_vsrli_b(tmpb, 2); + tmpg = __lsx_vsrli_b(tmpg, 2); + tmpr = __lsx_vsrli_b(tmpr, 2); + tmpb = __lsx_vor_v(reg0, tmpb); + tmpg = __lsx_vor_v(reg1, tmpg); + tmpr = __lsx_vor_v(reg2, tmpr); + DUP2_ARG2(__lsx_vilvl_b, tmpg, tmpb, tmpa, tmpr, reg0, reg1); + dst0 = __lsx_vilvl_h(reg1, reg0); + dst1 = __lsx_vilvh_h(reg1, reg0); + DUP2_ARG2(__lsx_vilvh_b, tmpg, tmpb, tmpa, tmpr, reg0, reg1); + dst2 = __lsx_vilvl_h(reg1, reg0); + dst3 = __lsx_vilvh_h(reg1, reg0); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + dst_argb += 64; + src_argb1555 += 32; + } +} + +void RGB565ToARGBRow_LSX(const uint8_t* src_rgb565, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 16; + __m128i src0, src1; + __m128i tmp0, tmp1, tmpb, tmpg, tmpr; + __m128i reg0, reg1, dst0, dst1, dst2, dst3; + __m128i alpha = __lsx_vldi(0xFF); + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_rgb565, 0); + src1 = __lsx_vld(src_rgb565, 16); + tmp0 = __lsx_vpickev_b(src1, src0); + tmp1 = __lsx_vpickod_b(src1, src0); + tmpb = __lsx_vandi_b(tmp0, 0x1F); + tmpr = __lsx_vandi_b(tmp1, 0xF8); + reg1 = __lsx_vandi_b(tmp1, 0x07); + reg0 = __lsx_vsrli_b(tmp0, 5); + reg1 = __lsx_vslli_b(reg1, 3); + tmpg = __lsx_vor_v(reg1, reg0); + reg0 = __lsx_vslli_b(tmpb, 3); + reg1 = __lsx_vsrli_b(tmpb, 2); + tmpb = __lsx_vor_v(reg1, reg0); + reg0 = __lsx_vslli_b(tmpg, 2); + reg1 = __lsx_vsrli_b(tmpg, 4); + tmpg = __lsx_vor_v(reg1, reg0); + reg0 = __lsx_vsrli_b(tmpr, 5); + tmpr = __lsx_vor_v(tmpr, reg0); + DUP2_ARG2(__lsx_vilvl_b, tmpg, tmpb, alpha, tmpr, reg0, reg1); + dst0 = __lsx_vilvl_h(reg1, reg0); + dst1 = __lsx_vilvh_h(reg1, reg0); + DUP2_ARG2(__lsx_vilvh_b, tmpg, tmpb, alpha, tmpr, reg0, reg1); + dst2 = __lsx_vilvl_h(reg1, reg0); + dst3 = __lsx_vilvh_h(reg1, reg0); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + dst_argb += 64; + src_rgb565 += 32; + } +} + +void RGB24ToARGBRow_LSX(const uint8_t* src_rgb24, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2; + __m128i tmp0, tmp1, tmp2; + __m128i dst0, dst1, dst2, dst3; + __m128i alpha = __lsx_vldi(0xFF); + __m128i shuf0 = {0x131211100F0E0D0C, 0x1B1A191817161514}; + __m128i shuf1 = {0x1F1E1D1C1B1A1918, 0x0706050403020100}; + __m128i shuf2 = {0x0B0A090807060504, 0x131211100F0E0D0C}; + __m128i shuf3 = {0x1005040310020100, 0x100B0A0910080706}; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_rgb24, 0); + src1 = __lsx_vld(src_rgb24, 16); + src2 = __lsx_vld(src_rgb24, 32); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuf0, src1, src2, shuf1, tmp0, tmp1); + tmp2 = __lsx_vshuf_b(src1, src2, shuf2); + DUP4_ARG3(__lsx_vshuf_b, alpha, src0, shuf3, alpha, tmp0, shuf3, alpha, + tmp1, shuf3, alpha, tmp2, shuf3, dst0, dst1, dst2, dst3); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + dst_argb += 64; + src_rgb24 += 48; + } +} + +void RAWToARGBRow_LSX(const uint8_t* src_raw, uint8_t* dst_argb, int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2; + __m128i tmp0, tmp1, tmp2; + __m128i dst0, dst1, dst2, dst3; + __m128i alpha = __lsx_vldi(0xFF); + __m128i shuf0 = {0x131211100F0E0D0C, 0x1B1A191817161514}; + __m128i shuf1 = {0x1F1E1D1C1B1A1918, 0x0706050403020100}; + __m128i shuf2 = {0x0B0A090807060504, 0x131211100F0E0D0C}; + __m128i shuf3 = {0x1003040510000102, 0x10090A0B10060708}; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_raw, 0); + src1 = __lsx_vld(src_raw, 16); + src2 = __lsx_vld(src_raw, 32); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuf0, src1, src2, shuf1, tmp0, tmp1); + tmp2 = __lsx_vshuf_b(src1, src2, shuf2); + DUP4_ARG3(__lsx_vshuf_b, alpha, src0, shuf3, alpha, tmp0, shuf3, alpha, + tmp1, shuf3, alpha, tmp2, shuf3, dst0, dst1, dst2, dst3); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + dst_argb += 64; + src_raw += 48; + } +} + +void ARGB1555ToYRow_LSX(const uint8_t* src_argb1555, + uint8_t* dst_y, + int width) { + int x; + int len = width / 16; + __m128i src0, src1; + __m128i tmp0, tmp1, tmpb, tmpg, tmpr; + __m128i reg0, reg1, reg2, dst0; + __m128i const_66 = __lsx_vldi(66); + __m128i const_129 = __lsx_vldi(129); + __m128i const_25 = __lsx_vldi(25); + __m128i const_1080 = {0x1080108010801080, 0x1080108010801080}; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_argb1555, 0); + src1 = __lsx_vld(src_argb1555, 16); + tmp0 = __lsx_vpickev_b(src1, src0); + tmp1 = __lsx_vpickod_b(src1, src0); + tmpb = __lsx_vandi_b(tmp0, 0x1F); + tmpg = __lsx_vsrli_b(tmp0, 5); + reg0 = __lsx_vandi_b(tmp1, 0x03); + reg0 = __lsx_vslli_b(reg0, 3); + tmpg = __lsx_vor_v(tmpg, reg0); + reg1 = __lsx_vandi_b(tmp1, 0x7C); + tmpr = __lsx_vsrli_b(reg1, 2); + reg0 = __lsx_vslli_b(tmpb, 3); + reg1 = __lsx_vslli_b(tmpg, 3); + reg2 = __lsx_vslli_b(tmpr, 3); + tmpb = __lsx_vsrli_b(tmpb, 2); + tmpg = __lsx_vsrli_b(tmpg, 2); + tmpr = __lsx_vsrli_b(tmpr, 2); + tmpb = __lsx_vor_v(reg0, tmpb); + tmpg = __lsx_vor_v(reg1, tmpg); + tmpr = __lsx_vor_v(reg2, tmpr); + reg0 = __lsx_vmaddwev_h_bu(const_1080, tmpb, const_25); + reg1 = __lsx_vmaddwod_h_bu(const_1080, tmpb, const_25); + reg0 = __lsx_vmaddwev_h_bu(reg0, tmpg, const_129); + reg1 = __lsx_vmaddwod_h_bu(reg1, tmpg, const_129); + reg0 = __lsx_vmaddwev_h_bu(reg0, tmpr, const_66); + reg1 = __lsx_vmaddwod_h_bu(reg1, tmpr, const_66); + dst0 = __lsx_vpackod_b(reg1, reg0); + __lsx_vst(dst0, dst_y, 0); + dst_y += 16; + src_argb1555 += 32; + } +} + +void ARGB1555ToUVRow_LSX(const uint8_t* src_argb1555, + int src_stride_argb1555, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 16; + const uint8_t* next_argb1555 = src_argb1555 + src_stride_argb1555; + __m128i src0, src1, src2, src3; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m128i reg0, reg1, reg2, reg3, dst0; + __m128i const_112 = __lsx_vldi(0x438); + __m128i const_74 = __lsx_vldi(0x425); + __m128i const_38 = __lsx_vldi(0x413); + __m128i const_94 = __lsx_vldi(0x42F); + __m128i const_18 = __lsx_vldi(0x409); + __m128i const_8080 = {0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_argb1555, 0, src_argb1555, 16, next_argb1555, 0, + next_argb1555, 16, src0, src1, src2, src3); + DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2); + DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3); + tmpb = __lsx_vandi_b(tmp0, 0x1F); + nexb = __lsx_vandi_b(tmp2, 0x1F); + tmpg = __lsx_vsrli_b(tmp0, 5); + nexg = __lsx_vsrli_b(tmp2, 5); + reg0 = __lsx_vandi_b(tmp1, 0x03); + reg2 = __lsx_vandi_b(tmp3, 0x03); + reg0 = __lsx_vslli_b(reg0, 3); + reg2 = __lsx_vslli_b(reg2, 3); + tmpg = __lsx_vor_v(tmpg, reg0); + nexg = __lsx_vor_v(nexg, reg2); + reg1 = __lsx_vandi_b(tmp1, 0x7C); + reg3 = __lsx_vandi_b(tmp3, 0x7C); + tmpr = __lsx_vsrli_b(reg1, 2); + nexr = __lsx_vsrli_b(reg3, 2); + reg0 = __lsx_vslli_b(tmpb, 3); + reg1 = __lsx_vslli_b(tmpg, 3); + reg2 = __lsx_vslli_b(tmpr, 3); + tmpb = __lsx_vsrli_b(tmpb, 2); + tmpg = __lsx_vsrli_b(tmpg, 2); + tmpr = __lsx_vsrli_b(tmpr, 2); + tmpb = __lsx_vor_v(reg0, tmpb); + tmpg = __lsx_vor_v(reg1, tmpg); + tmpr = __lsx_vor_v(reg2, tmpr); + reg0 = __lsx_vslli_b(nexb, 3); + reg1 = __lsx_vslli_b(nexg, 3); + reg2 = __lsx_vslli_b(nexr, 3); + nexb = __lsx_vsrli_b(nexb, 2); + nexg = __lsx_vsrli_b(nexg, 2); + nexr = __lsx_vsrli_b(nexr, 2); + nexb = __lsx_vor_v(reg0, nexb); + nexg = __lsx_vor_v(reg1, nexg); + nexr = __lsx_vor_v(reg2, nexr); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0); + __lsx_vstelm_d(dst0, dst_u, 0, 0); + __lsx_vstelm_d(dst0, dst_v, 0, 1); + dst_u += 8; + dst_v += 8; + src_argb1555 += 32; + next_argb1555 += 32; + } +} + +void RGB565ToYRow_LSX(const uint8_t* src_rgb565, uint8_t* dst_y, int width) { + int x; + int len = width / 16; + __m128i src0, src1; + __m128i tmp0, tmp1, tmpb, tmpg, tmpr; + __m128i reg0, reg1, dst0; + __m128i const_66 = __lsx_vldi(66); + __m128i const_129 = __lsx_vldi(129); + __m128i const_25 = __lsx_vldi(25); + __m128i const_1080 = {0x1080108010801080, 0x1080108010801080}; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_rgb565, 0); + src1 = __lsx_vld(src_rgb565, 16); + tmp0 = __lsx_vpickev_b(src1, src0); + tmp1 = __lsx_vpickod_b(src1, src0); + tmpb = __lsx_vandi_b(tmp0, 0x1F); + tmpr = __lsx_vandi_b(tmp1, 0xF8); + reg1 = __lsx_vandi_b(tmp1, 0x07); + reg0 = __lsx_vsrli_b(tmp0, 5); + reg1 = __lsx_vslli_b(reg1, 3); + tmpg = __lsx_vor_v(reg1, reg0); + reg0 = __lsx_vslli_b(tmpb, 3); + reg1 = __lsx_vsrli_b(tmpb, 2); + tmpb = __lsx_vor_v(reg1, reg0); + reg0 = __lsx_vslli_b(tmpg, 2); + reg1 = __lsx_vsrli_b(tmpg, 4); + tmpg = __lsx_vor_v(reg1, reg0); + reg0 = __lsx_vsrli_b(tmpr, 5); + tmpr = __lsx_vor_v(tmpr, reg0); + reg0 = __lsx_vmaddwev_h_bu(const_1080, tmpb, const_25); + reg1 = __lsx_vmaddwod_h_bu(const_1080, tmpb, const_25); + reg0 = __lsx_vmaddwev_h_bu(reg0, tmpg, const_129); + reg1 = __lsx_vmaddwod_h_bu(reg1, tmpg, const_129); + reg0 = __lsx_vmaddwev_h_bu(reg0, tmpr, const_66); + reg1 = __lsx_vmaddwod_h_bu(reg1, tmpr, const_66); + dst0 = __lsx_vpackod_b(reg1, reg0); + __lsx_vst(dst0, dst_y, 0); + dst_y += 16; + src_rgb565 += 32; + } +} + +void RGB565ToUVRow_LSX(const uint8_t* src_rgb565, + int src_stride_rgb565, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 16; + const uint8_t* next_rgb565 = src_rgb565 + src_stride_rgb565; + __m128i src0, src1, src2, src3; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m128i reg0, reg1, reg2, reg3, dst0; + __m128i const_112 = __lsx_vldi(0x438); + __m128i const_74 = __lsx_vldi(0x425); + __m128i const_38 = __lsx_vldi(0x413); + __m128i const_94 = __lsx_vldi(0x42F); + __m128i const_18 = __lsx_vldi(0x409); + __m128i const_8080 = {0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_rgb565, 0, src_rgb565, 16, next_rgb565, 0, + next_rgb565, 16, src0, src1, src2, src3); + DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2); + DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3); + tmpb = __lsx_vandi_b(tmp0, 0x1F); + tmpr = __lsx_vandi_b(tmp1, 0xF8); + nexb = __lsx_vandi_b(tmp2, 0x1F); + nexr = __lsx_vandi_b(tmp3, 0xF8); + reg1 = __lsx_vandi_b(tmp1, 0x07); + reg3 = __lsx_vandi_b(tmp3, 0x07); + reg0 = __lsx_vsrli_b(tmp0, 5); + reg1 = __lsx_vslli_b(reg1, 3); + reg2 = __lsx_vsrli_b(tmp2, 5); + reg3 = __lsx_vslli_b(reg3, 3); + tmpg = __lsx_vor_v(reg1, reg0); + nexg = __lsx_vor_v(reg2, reg3); + reg0 = __lsx_vslli_b(tmpb, 3); + reg1 = __lsx_vsrli_b(tmpb, 2); + reg2 = __lsx_vslli_b(nexb, 3); + reg3 = __lsx_vsrli_b(nexb, 2); + tmpb = __lsx_vor_v(reg1, reg0); + nexb = __lsx_vor_v(reg2, reg3); + reg0 = __lsx_vslli_b(tmpg, 2); + reg1 = __lsx_vsrli_b(tmpg, 4); + reg2 = __lsx_vslli_b(nexg, 2); + reg3 = __lsx_vsrli_b(nexg, 4); + tmpg = __lsx_vor_v(reg1, reg0); + nexg = __lsx_vor_v(reg2, reg3); + reg0 = __lsx_vsrli_b(tmpr, 5); + reg2 = __lsx_vsrli_b(nexr, 5); + tmpr = __lsx_vor_v(tmpr, reg0); + nexr = __lsx_vor_v(nexr, reg2); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0); + __lsx_vstelm_d(dst0, dst_u, 0, 0); + __lsx_vstelm_d(dst0, dst_v, 0, 1); + dst_u += 8; + dst_v += 8; + src_rgb565 += 32; + next_rgb565 += 32; + } +} + +void RGB24ToYRow_LSX(const uint8_t* src_rgb24, uint8_t* dst_y, int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1, dst0; + __m128i const_129 = __lsx_vldi(129); + __m128i const_br = {0x4219421942194219, 0x4219421942194219}; + __m128i const_1080 = {0x1080108010801080, 0x1080108010801080}; + __m128i shuff0 = {0x0B09080605030200, 0x17151412110F0E0C}; + __m128i shuff1 = {0x0301001E1D1B1A18, 0x0F0D0C0A09070604}; + __m128i shuff2 = {0x000A000700040001, 0x001600130010000D}; + __m128i shuff3 = {0x0002001F001C0019, 0x000E000B00080005}; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_rgb24, 0); + src1 = __lsx_vld(src_rgb24, 16); + src2 = __lsx_vld(src_rgb24, 32); + tmp0 = __lsx_vshuf_b(src1, src0, shuff0); + tmp1 = __lsx_vshuf_b(src1, src2, shuff1); + tmp2 = __lsx_vshuf_b(src1, src0, shuff2); + tmp3 = __lsx_vshuf_b(src1, src2, shuff3); + reg0 = __lsx_vmaddwev_h_bu(const_1080, tmp2, const_129); + reg1 = __lsx_vmaddwev_h_bu(const_1080, tmp3, const_129); + reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp1); + dst0 = __lsx_vpickod_b(reg1, reg0); + __lsx_vst(dst0, dst_y, 0); + dst_y += 16; + src_rgb24 += 48; + } +} + +void RGB24ToUVRow_LSX(const uint8_t* src_rgb24, + int src_stride_rgb24, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_rgb24 = src_rgb24 + src_stride_rgb24; + int len = width / 16; + __m128i src0, src1, src2; + __m128i nex0, nex1, nex2, dst0; + __m128i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m128i const_112 = __lsx_vldi(0x438); + __m128i const_74 = __lsx_vldi(0x425); + __m128i const_38 = __lsx_vldi(0x413); + __m128i const_94 = __lsx_vldi(0x42F); + __m128i const_18 = __lsx_vldi(0x409); + __m128i const_8080 = {0x8080808080808080, 0x8080808080808080}; + __m128i shuff0_b = {0x15120F0C09060300, 0x00000000001E1B18}; + __m128i shuff1_b = {0x0706050403020100, 0x1D1A1714110A0908}; + __m128i shuff0_g = {0x1613100D0A070401, 0x00000000001F1C19}; + __m128i shuff1_g = {0x0706050403020100, 0x1E1B1815120A0908}; + __m128i shuff0_r = {0x1714110E0B080502, 0x0000000000001D1A}; + __m128i shuff1_r = {0x0706050403020100, 0x1F1C191613100908}; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_rgb24, 0); + src1 = __lsx_vld(src_rgb24, 16); + src2 = __lsx_vld(src_rgb24, 32); + nex0 = __lsx_vld(next_rgb24, 0); + nex1 = __lsx_vld(next_rgb24, 16); + nex2 = __lsx_vld(next_rgb24, 32); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb, + nexb); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg, + nexg); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr, + nexr); + DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb, + nexb); + DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg, + nexg); + DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr, + nexr); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0); + __lsx_vstelm_d(dst0, dst_u, 0, 0); + __lsx_vstelm_d(dst0, dst_v, 0, 1); + dst_u += 8; + dst_v += 8; + src_rgb24 += 48; + next_rgb24 += 48; + } +} + +void RAWToYRow_LSX(const uint8_t* src_raw, uint8_t* dst_y, int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1, dst0; + __m128i const_129 = __lsx_vldi(129); + __m128i const_br = {0x1942194219421942, 0x1942194219421942}; + __m128i const_1080 = {0x1080108010801080, 0x1080108010801080}; + __m128i shuff0 = {0x0B09080605030200, 0x17151412110F0E0C}; + __m128i shuff1 = {0x0301001E1D1B1A18, 0x0F0D0C0A09070604}; + __m128i shuff2 = {0x000A000700040001, 0x001600130010000D}; + __m128i shuff3 = {0x0002001F001C0019, 0x000E000B00080005}; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_raw, 0); + src1 = __lsx_vld(src_raw, 16); + src2 = __lsx_vld(src_raw, 32); + tmp0 = __lsx_vshuf_b(src1, src0, shuff0); + tmp1 = __lsx_vshuf_b(src1, src2, shuff1); + tmp2 = __lsx_vshuf_b(src1, src0, shuff2); + tmp3 = __lsx_vshuf_b(src1, src2, shuff3); + reg0 = __lsx_vmaddwev_h_bu(const_1080, tmp2, const_129); + reg1 = __lsx_vmaddwev_h_bu(const_1080, tmp3, const_129); + reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp1); + dst0 = __lsx_vsrlni_b_h(reg1, reg0, 8); + __lsx_vst(dst0, dst_y, 0); + dst_y += 16; + src_raw += 48; + } +} + +void RAWToUVRow_LSX(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_raw = src_raw + src_stride_raw; + int len = width / 16; + __m128i src0, src1, src2; + __m128i nex0, nex1, nex2, dst0; + __m128i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m128i const_112 = __lsx_vldi(0x438); + __m128i const_74 = __lsx_vldi(0x425); + __m128i const_38 = __lsx_vldi(0x413); + __m128i const_94 = __lsx_vldi(0x42F); + __m128i const_18 = __lsx_vldi(0x409); + __m128i const_8080 = {0x8080808080808080, 0x8080808080808080}; + __m128i shuff0_r = {0x15120F0C09060300, 0x00000000001E1B18}; + __m128i shuff1_r = {0x0706050403020100, 0x1D1A1714110A0908}; + __m128i shuff0_g = {0x1613100D0A070401, 0x00000000001F1C19}; + __m128i shuff1_g = {0x0706050403020100, 0x1E1B1815120A0908}; + __m128i shuff0_b = {0x1714110E0B080502, 0x0000000000001D1A}; + __m128i shuff1_b = {0x0706050403020100, 0x1F1C191613100908}; + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_raw, 0); + src1 = __lsx_vld(src_raw, 16); + src2 = __lsx_vld(src_raw, 32); + nex0 = __lsx_vld(next_raw, 0); + nex1 = __lsx_vld(next_raw, 16); + nex2 = __lsx_vld(next_raw, 32); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_b, nex1, nex0, shuff0_b, tmpb, + nexb); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_g, nex1, nex0, shuff0_g, tmpg, + nexg); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0_r, nex1, nex0, shuff0_r, tmpr, + nexr); + DUP2_ARG3(__lsx_vshuf_b, src2, tmpb, shuff1_b, nex2, nexb, shuff1_b, tmpb, + nexb); + DUP2_ARG3(__lsx_vshuf_b, src2, tmpg, shuff1_g, nex2, nexg, shuff1_g, tmpg, + nexg); + DUP2_ARG3(__lsx_vshuf_b, src2, tmpr, shuff1_r, nex2, nexr, shuff1_r, tmpr, + nexr); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0); + __lsx_vstelm_d(dst0, dst_u, 0, 0); + __lsx_vstelm_d(dst0, dst_v, 0, 1); + dst_u += 8; + dst_v += 8; + src_raw += 48; + next_raw += 48; + } +} + +void NV12ToARGBRow_LSX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 8; + __m128i vec_y, vec_vu; + __m128i vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb; + __m128i vec_vrub, vec_vgug; + __m128i out_b, out_g, out_r; + __m128i const_80 = __lsx_vldi(0x480); + __m128i alpha = __lsx_vldi(0xFF); + __m128i zero = __lsx_vldi(0); + + YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb); + vec_vrub = __lsx_vilvl_h(vec_vr, vec_ub); + vec_vgug = __lsx_vilvl_h(vec_vg, vec_ug); + + for (x = 0; x < len; x++) { + vec_y = __lsx_vld(src_y, 0); + vec_vu = __lsx_vld(src_uv, 0); + YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g, + out_r); + STOREARGB(alpha, out_r, out_g, out_b, dst_argb); + src_y += 8; + src_uv += 8; + } +} + +void NV12ToRGB565Row_LSX(const uint8_t* src_y, + const uint8_t* src_uv, + uint8_t* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 8; + __m128i vec_y, vec_vu; + __m128i vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb; + __m128i vec_vrub, vec_vgug; + __m128i out_b, out_g, out_r; + __m128i const_80 = __lsx_vldi(0x480); + __m128i zero = __lsx_vldi(0); + + YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb); + vec_vrub = __lsx_vilvl_h(vec_vr, vec_ub); + vec_vgug = __lsx_vilvl_h(vec_vg, vec_ug); + + for (x = 0; x < len; x++) { + vec_y = __lsx_vld(src_y, 0); + vec_vu = __lsx_vld(src_uv, 0); + YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g, + out_r); + out_b = __lsx_vsrli_h(out_b, 3); + out_g = __lsx_vsrli_h(out_g, 2); + out_r = __lsx_vsrli_h(out_r, 3); + out_g = __lsx_vslli_h(out_g, 5); + out_r = __lsx_vslli_h(out_r, 11); + out_r = __lsx_vor_v(out_r, out_g); + out_r = __lsx_vor_v(out_r, out_b); + __lsx_vst(out_r, dst_rgb565, 0); + src_y += 8; + src_uv += 8; + dst_rgb565 += 16; + } +} + +void NV21ToARGBRow_LSX(const uint8_t* src_y, + const uint8_t* src_vu, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 8; + __m128i vec_y, vec_uv; + __m128i vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb; + __m128i vec_ubvr, vec_ugvg; + __m128i out_b, out_g, out_r; + __m128i const_80 = __lsx_vldi(0x480); + __m128i alpha = __lsx_vldi(0xFF); + __m128i zero = __lsx_vldi(0); + + YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb); + vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr); + vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + vec_y = __lsx_vld(src_y, 0); + vec_uv = __lsx_vld(src_vu, 0); + YUVTORGB(vec_y, vec_uv, vec_ubvr, vec_ugvg, vec_yg, vec_yb, out_r, out_g, + out_b); + STOREARGB(alpha, out_r, out_g, out_b, dst_argb); + src_y += 8; + src_vu += 8; + } +} + +void SobelRow_LSX(const uint8_t* src_sobelx, + const uint8_t* src_sobely, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 16; + __m128i src0, src1, tmp0; + __m128i out0, out1, out2, out3; + __m128i alpha = __lsx_vldi(0xFF); + __m128i shuff0 = {0x1001010110000000, 0x1003030310020202}; + __m128i shuff1 = __lsx_vaddi_bu(shuff0, 0x04); + __m128i shuff2 = __lsx_vaddi_bu(shuff1, 0x04); + __m128i shuff3 = __lsx_vaddi_bu(shuff2, 0x04); + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_sobelx, 0); + src1 = __lsx_vld(src_sobely, 0); + tmp0 = __lsx_vsadd_bu(src0, src1); + DUP4_ARG3(__lsx_vshuf_b, alpha, tmp0, shuff0, alpha, tmp0, shuff1, alpha, + tmp0, shuff2, alpha, tmp0, shuff3, out0, out1, out2, out3); + __lsx_vst(out0, dst_argb, 0); + __lsx_vst(out1, dst_argb, 16); + __lsx_vst(out2, dst_argb, 32); + __lsx_vst(out3, dst_argb, 48); + src_sobelx += 16; + src_sobely += 16; + dst_argb += 64; + } +} + +void SobelToPlaneRow_LSX(const uint8_t* src_sobelx, + const uint8_t* src_sobely, + uint8_t* dst_y, + int width) { + int x; + int len = width / 32; + __m128i src0, src1, src2, src3, dst0, dst1; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_sobelx, 0, src_sobelx, 16, src0, src1); + DUP2_ARG2(__lsx_vld, src_sobely, 0, src_sobely, 16, src2, src3); + dst0 = __lsx_vsadd_bu(src0, src2); + dst1 = __lsx_vsadd_bu(src1, src3); + __lsx_vst(dst0, dst_y, 0); + __lsx_vst(dst1, dst_y, 16); + src_sobelx += 32; + src_sobely += 32; + dst_y += 32; + } +} + +void SobelXYRow_LSX(const uint8_t* src_sobelx, + const uint8_t* src_sobely, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 16; + __m128i src_r, src_b, src_g; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i dst0, dst1, dst2, dst3; + __m128i alpha = __lsx_vldi(0xFF); + + for (x = 0; x < len; x++) { + src_r = __lsx_vld(src_sobelx, 0); + src_b = __lsx_vld(src_sobely, 0); + src_g = __lsx_vsadd_bu(src_r, src_b); + tmp0 = __lsx_vilvl_b(src_g, src_b); + tmp1 = __lsx_vilvh_b(src_g, src_b); + tmp2 = __lsx_vilvl_b(alpha, src_r); + tmp3 = __lsx_vilvh_b(alpha, src_r); + dst0 = __lsx_vilvl_h(tmp2, tmp0); + dst1 = __lsx_vilvh_h(tmp2, tmp0); + dst2 = __lsx_vilvl_h(tmp3, tmp1); + dst3 = __lsx_vilvh_h(tmp3, tmp1); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + src_sobelx += 16; + src_sobely += 16; + dst_argb += 64; + } +} + +void ARGBToYJRow_LSX(const uint8_t* src_argb, uint8_t* dst_y, int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2, src3, dst0; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1; + __m128i const_128 = __lsx_vldi(0x480); + __m128i const_150 = __lsx_vldi(0x96); + __m128i const_br = {0x4D1D4D1D4D1D4D1D, 0x4D1D4D1D4D1D4D1D}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48, + src0, src1, src2, src3); + tmp0 = __lsx_vpickev_b(src1, src0); + tmp1 = __lsx_vpickod_b(src1, src0); + tmp2 = __lsx_vpickev_b(src3, src2); + tmp3 = __lsx_vpickod_b(src3, src2); + reg0 = __lsx_vmaddwev_h_bu(const_128, tmp1, const_150); + reg1 = __lsx_vmaddwev_h_bu(const_128, tmp3, const_150); + reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp2); + dst0 = __lsx_vpickod_b(reg1, reg0); + __lsx_vst(dst0, dst_y, 0); + dst_y += 16; + src_argb += 64; + } +} + +void BGRAToYRow_LSX(const uint8_t* src_bgra, uint8_t* dst_y, int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2, src3, dst0; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1; + __m128i const_129 = __lsx_vldi(0x81); + __m128i const_br = {0x1942194219421942, 0x1942194219421942}; + __m128i const_1080 = {0x1080108010801080, 0x1080108010801080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32, src_bgra, 48, + src0, src1, src2, src3); + tmp0 = __lsx_vpickod_b(src1, src0); + tmp1 = __lsx_vpickev_b(src1, src0); + tmp2 = __lsx_vpickod_b(src3, src2); + tmp3 = __lsx_vpickev_b(src3, src2); + reg0 = __lsx_vmaddwod_h_bu(const_1080, tmp1, const_129); + reg1 = __lsx_vmaddwod_h_bu(const_1080, tmp3, const_129); + reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp2); + dst0 = __lsx_vsrlni_b_h(reg1, reg0, 8); + __lsx_vst(dst0, dst_y, 0); + dst_y += 16; + src_bgra += 64; + } +} + +void BGRAToUVRow_LSX(const uint8_t* src_bgra, + int src_stride_bgra, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_bgra = src_bgra + src_stride_bgra; + int len = width / 16; + __m128i src0, src1, src2, src3; + __m128i nex0, nex1, nex2, nex3; + __m128i tmp0, tmp1, tmp2, tmp3, dst0; + __m128i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m128i const_112 = __lsx_vldi(0x438); + __m128i const_74 = __lsx_vldi(0x425); + __m128i const_38 = __lsx_vldi(0x413); + __m128i const_94 = __lsx_vldi(0x42F); + __m128i const_18 = __lsx_vldi(0x409); + __m128i const_8080 = {0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32, src_bgra, 48, + src0, src1, src2, src3); + DUP4_ARG2(__lsx_vld, next_bgra, 0, next_bgra, 16, next_bgra, 32, next_bgra, + 48, nex0, nex1, nex2, nex3); + tmp0 = __lsx_vpickod_b(src1, src0); + tmp1 = __lsx_vpickev_b(src1, src0); + tmp2 = __lsx_vpickod_b(src3, src2); + tmp3 = __lsx_vpickev_b(src3, src2); + tmpb = __lsx_vpickod_b(tmp2, tmp0); + tmpr = __lsx_vpickev_b(tmp2, tmp0); + tmpg = __lsx_vpickod_b(tmp3, tmp1); + tmp0 = __lsx_vpickod_b(nex1, nex0); + tmp1 = __lsx_vpickev_b(nex1, nex0); + tmp2 = __lsx_vpickod_b(nex3, nex2); + tmp3 = __lsx_vpickev_b(nex3, nex2); + nexb = __lsx_vpickod_b(tmp2, tmp0); + nexr = __lsx_vpickev_b(tmp2, tmp0); + nexg = __lsx_vpickod_b(tmp3, tmp1); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0); + __lsx_vstelm_d(dst0, dst_u, 0, 0); + __lsx_vstelm_d(dst0, dst_v, 0, 1); + dst_u += 8; + dst_v += 8; + src_bgra += 64; + next_bgra += 64; + } +} + +void ABGRToYRow_LSX(const uint8_t* src_abgr, uint8_t* dst_y, int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2, src3, dst0; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1; + __m128i const_129 = __lsx_vldi(0x81); + __m128i const_br = {0x1942194219421942, 0x1942194219421942}; + __m128i const_1080 = {0x1080108010801080, 0x1080108010801080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32, src_abgr, 48, + src0, src1, src2, src3); + tmp0 = __lsx_vpickev_b(src1, src0); + tmp1 = __lsx_vpickod_b(src1, src0); + tmp2 = __lsx_vpickev_b(src3, src2); + tmp3 = __lsx_vpickod_b(src3, src2); + reg0 = __lsx_vmaddwev_h_bu(const_1080, tmp1, const_129); + reg1 = __lsx_vmaddwev_h_bu(const_1080, tmp3, const_129); + reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp2); + dst0 = __lsx_vsrlni_b_h(reg1, reg0, 8); + __lsx_vst(dst0, dst_y, 0); + dst_y += 16; + src_abgr += 64; + } +} + +void ABGRToUVRow_LSX(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_abgr = src_abgr + src_stride_abgr; + int len = width / 16; + __m128i src0, src1, src2, src3; + __m128i nex0, nex1, nex2, nex3; + __m128i tmp0, tmp1, tmp2, tmp3, dst0; + __m128i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m128i const_112 = __lsx_vldi(0x438); + __m128i const_74 = __lsx_vldi(0x425); + __m128i const_38 = __lsx_vldi(0x413); + __m128i const_94 = __lsx_vldi(0x42F); + __m128i const_18 = __lsx_vldi(0x409); + __m128i const_8080 = {0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32, src_abgr, 48, + src0, src1, src2, src3); + DUP4_ARG2(__lsx_vld, next_abgr, 0, next_abgr, 16, next_abgr, 32, next_abgr, + 48, nex0, nex1, nex2, nex3); + tmp0 = __lsx_vpickev_b(src1, src0); + tmp1 = __lsx_vpickod_b(src1, src0); + tmp2 = __lsx_vpickev_b(src3, src2); + tmp3 = __lsx_vpickod_b(src3, src2); + tmpb = __lsx_vpickod_b(tmp2, tmp0); + tmpr = __lsx_vpickev_b(tmp2, tmp0); + tmpg = __lsx_vpickev_b(tmp3, tmp1); + tmp0 = __lsx_vpickev_b(nex1, nex0); + tmp1 = __lsx_vpickod_b(nex1, nex0); + tmp2 = __lsx_vpickev_b(nex3, nex2); + tmp3 = __lsx_vpickod_b(nex3, nex2); + nexb = __lsx_vpickod_b(tmp2, tmp0); + nexr = __lsx_vpickev_b(tmp2, tmp0); + nexg = __lsx_vpickev_b(tmp3, tmp1); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0); + __lsx_vstelm_d(dst0, dst_u, 0, 0); + __lsx_vstelm_d(dst0, dst_v, 0, 1); + dst_u += 8; + dst_v += 8; + src_abgr += 64; + next_abgr += 64; + } +} + +void RGBAToYRow_LSX(const uint8_t* src_rgba, uint8_t* dst_y, int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2, src3, dst0; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1; + __m128i const_129 = __lsx_vldi(0x81); + __m128i const_br = {0x4219421942194219, 0x4219421942194219}; + __m128i const_1080 = {0x1080108010801080, 0x1080108010801080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32, src_rgba, 48, + src0, src1, src2, src3); + tmp0 = __lsx_vpickod_b(src1, src0); + tmp1 = __lsx_vpickev_b(src1, src0); + tmp2 = __lsx_vpickod_b(src3, src2); + tmp3 = __lsx_vpickev_b(src3, src2); + reg0 = __lsx_vmaddwod_h_bu(const_1080, tmp1, const_129); + reg1 = __lsx_vmaddwod_h_bu(const_1080, tmp3, const_129); + reg0 = __lsx_vdp2add_h_bu(reg0, const_br, tmp0); + reg1 = __lsx_vdp2add_h_bu(reg1, const_br, tmp2); + dst0 = __lsx_vsrlni_b_h(reg1, reg0, 8); + __lsx_vst(dst0, dst_y, 0); + dst_y += 16; + src_rgba += 64; + } +} + +void RGBAToUVRow_LSX(const uint8_t* src_rgba, + int src_stride_rgba, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_rgba = src_rgba + src_stride_rgba; + int len = width / 16; + __m128i src0, src1, src2, src3; + __m128i nex0, nex1, nex2, nex3; + __m128i tmp0, tmp1, tmp2, tmp3, dst0; + __m128i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m128i const_112 = __lsx_vldi(0x438); + __m128i const_74 = __lsx_vldi(0x425); + __m128i const_38 = __lsx_vldi(0x413); + __m128i const_94 = __lsx_vldi(0x42F); + __m128i const_18 = __lsx_vldi(0x409); + __m128i const_8080 = {0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32, src_rgba, 48, + src0, src1, src2, src3); + DUP4_ARG2(__lsx_vld, next_rgba, 0, next_rgba, 16, next_rgba, 32, next_rgba, + 48, nex0, nex1, nex2, nex3); + tmp0 = __lsx_vpickod_b(src1, src0); + tmp1 = __lsx_vpickev_b(src1, src0); + tmp2 = __lsx_vpickod_b(src3, src2); + tmp3 = __lsx_vpickev_b(src3, src2); + tmpr = __lsx_vpickod_b(tmp2, tmp0); + tmpb = __lsx_vpickev_b(tmp2, tmp0); + tmpg = __lsx_vpickod_b(tmp3, tmp1); + tmp0 = __lsx_vpickod_b(nex1, nex0); + tmp1 = __lsx_vpickev_b(nex1, nex0); + tmp2 = __lsx_vpickod_b(nex3, nex2); + tmp3 = __lsx_vpickev_b(nex3, nex2); + nexr = __lsx_vpickod_b(tmp2, tmp0); + nexb = __lsx_vpickev_b(tmp2, tmp0); + nexg = __lsx_vpickod_b(tmp3, tmp1); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst0); + __lsx_vstelm_d(dst0, dst_u, 0, 0); + __lsx_vstelm_d(dst0, dst_v, 0, 1); + dst_u += 8; + dst_v += 8; + src_rgba += 64; + next_rgba += 64; + } +} + +void ARGBToUVJRow_LSX(const uint8_t* src_argb, + int src_stride_argb, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + const uint8_t* next_argb = src_argb + src_stride_argb; + int len = width / 16; + __m128i src0, src1, src2, src3; + __m128i nex0, nex1, nex2, nex3; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i reg0, reg1, dst0; + __m128i tmpb, tmpg, tmpr, nexb, nexg, nexr; + __m128i const_63 = __lsx_vldi(0x43F); + __m128i const_42 = __lsx_vldi(0x42A); + __m128i const_21 = __lsx_vldi(0x415); + __m128i const_53 = __lsx_vldi(0x435); + __m128i const_10 = __lsx_vldi(0x40A); + __m128i const_8080 = {0x8080808080808080, 0x8080808080808080}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48, + src0, src1, src2, src3); + DUP4_ARG2(__lsx_vld, next_argb, 0, next_argb, 16, next_argb, 32, next_argb, + 48, nex0, nex1, nex2, nex3); + tmp0 = __lsx_vpickev_b(src1, src0); + tmp1 = __lsx_vpickod_b(src1, src0); + tmp2 = __lsx_vpickev_b(src3, src2); + tmp3 = __lsx_vpickod_b(src3, src2); + tmpr = __lsx_vpickod_b(tmp2, tmp0); + tmpb = __lsx_vpickev_b(tmp2, tmp0); + tmpg = __lsx_vpickev_b(tmp3, tmp1); + tmp0 = __lsx_vpickev_b(nex1, nex0); + tmp1 = __lsx_vpickod_b(nex1, nex0); + tmp2 = __lsx_vpickev_b(nex3, nex2); + tmp3 = __lsx_vpickod_b(nex3, nex2); + nexr = __lsx_vpickod_b(tmp2, tmp0); + nexb = __lsx_vpickev_b(tmp2, tmp0); + nexg = __lsx_vpickev_b(tmp3, tmp1); + tmp0 = __lsx_vaddwev_h_bu(tmpb, nexb); + tmp1 = __lsx_vaddwod_h_bu(tmpb, nexb); + tmp2 = __lsx_vaddwev_h_bu(tmpg, nexg); + tmp3 = __lsx_vaddwod_h_bu(tmpg, nexg); + reg0 = __lsx_vaddwev_h_bu(tmpr, nexr); + reg1 = __lsx_vaddwod_h_bu(tmpr, nexr); + tmpb = __lsx_vavgr_hu(tmp0, tmp1); + tmpg = __lsx_vavgr_hu(tmp2, tmp3); + tmpr = __lsx_vavgr_hu(reg0, reg1); + reg0 = __lsx_vmadd_h(const_8080, const_63, tmpb); + reg1 = __lsx_vmadd_h(const_8080, const_63, tmpr); + reg0 = __lsx_vmsub_h(reg0, const_42, tmpg); + reg1 = __lsx_vmsub_h(reg1, const_53, tmpg); + reg0 = __lsx_vmsub_h(reg0, const_21, tmpr); + reg1 = __lsx_vmsub_h(reg1, const_10, tmpb); + dst0 = __lsx_vpickod_b(reg1, reg0); + __lsx_vstelm_d(dst0, dst_u, 0, 0); + __lsx_vstelm_d(dst0, dst_v, 0, 1); + dst_u += 8; + dst_v += 8; + src_argb += 64; + next_argb += 64; + } +} + +void I444ToARGBRow_LSX(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 16; + __m128i vec_y, vec_u, vec_v, out_b, out_g, out_r; + __m128i vec_yl, vec_yh, vec_ul, vec_vl, vec_uh, vec_vh; + __m128i vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb, vec_ugvg; + __m128i const_80 = __lsx_vldi(0x480); + __m128i alpha = __lsx_vldi(0xFF); + __m128i zero = __lsx_vldi(0); + + YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb); + vec_ugvg = __lsx_vilvl_h(vec_ug, vec_vg); + + for (x = 0; x < len; x++) { + vec_y = __lsx_vld(src_y, 0); + vec_u = __lsx_vld(src_u, 0); + vec_v = __lsx_vld(src_v, 0); + vec_yl = __lsx_vilvl_b(vec_y, vec_y); + vec_ul = __lsx_vilvl_b(zero, vec_u); + vec_vl = __lsx_vilvl_b(zero, vec_v); + I444TORGB(vec_yl, vec_ul, vec_vl, vec_ub, vec_vr, vec_ugvg, vec_yg, vec_yb, + out_b, out_g, out_r); + STOREARGB(alpha, out_r, out_g, out_b, dst_argb); + vec_yh = __lsx_vilvh_b(vec_y, vec_y); + vec_uh = __lsx_vilvh_b(zero, vec_u); + vec_vh = __lsx_vilvh_b(zero, vec_v); + I444TORGB(vec_yh, vec_uh, vec_vh, vec_ub, vec_vr, vec_ugvg, vec_yg, vec_yb, + out_b, out_g, out_r); + STOREARGB(alpha, out_r, out_g, out_b, dst_argb); + src_y += 16; + src_u += 16; + src_v += 16; + } +} + +void I400ToARGBRow_LSX(const uint8_t* src_y, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 16; + __m128i vec_y, vec_yl, vec_yh, out0; + __m128i y_ev, y_od, dst0, dst1, dst2, dst3; + __m128i temp0, temp1; + __m128i alpha = __lsx_vldi(0xFF); + __m128i vec_yg = __lsx_vreplgr2vr_h(yuvconstants->kYToRgb[0]); + __m128i vec_yb = __lsx_vreplgr2vr_w(yuvconstants->kYBiasToRgb[0]); + + for (x = 0; x < len; x++) { + vec_y = __lsx_vld(src_y, 0); + vec_yl = __lsx_vilvl_b(vec_y, vec_y); + y_ev = __lsx_vmulwev_w_hu_h(vec_yl, vec_yg); + y_od = __lsx_vmulwod_w_hu_h(vec_yl, vec_yg); + y_ev = __lsx_vsrai_w(y_ev, 16); + y_od = __lsx_vsrai_w(y_od, 16); + y_ev = __lsx_vadd_w(y_ev, vec_yb); + y_od = __lsx_vadd_w(y_od, vec_yb); + y_ev = __lsx_vsrai_w(y_ev, 6); + y_od = __lsx_vsrai_w(y_od, 6); + y_ev = __lsx_vclip255_w(y_ev); + y_od = __lsx_vclip255_w(y_od); + out0 = __lsx_vpackev_h(y_od, y_ev); + temp0 = __lsx_vpackev_b(out0, out0); + temp1 = __lsx_vpackev_b(alpha, out0); + dst0 = __lsx_vilvl_h(temp1, temp0); + dst1 = __lsx_vilvh_h(temp1, temp0); + vec_yh = __lsx_vilvh_b(vec_y, vec_y); + y_ev = __lsx_vmulwev_w_hu_h(vec_yh, vec_yg); + y_od = __lsx_vmulwod_w_hu_h(vec_yh, vec_yg); + y_ev = __lsx_vsrai_w(y_ev, 16); + y_od = __lsx_vsrai_w(y_od, 16); + y_ev = __lsx_vadd_w(y_ev, vec_yb); + y_od = __lsx_vadd_w(y_od, vec_yb); + y_ev = __lsx_vsrai_w(y_ev, 6); + y_od = __lsx_vsrai_w(y_od, 6); + y_ev = __lsx_vclip255_w(y_ev); + y_od = __lsx_vclip255_w(y_od); + out0 = __lsx_vpackev_h(y_od, y_ev); + temp0 = __lsx_vpackev_b(out0, out0); + temp1 = __lsx_vpackev_b(alpha, out0); + dst2 = __lsx_vilvl_h(temp1, temp0); + dst3 = __lsx_vilvh_h(temp1, temp0); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + dst_argb += 64; + src_y += 16; + } +} + +void J400ToARGBRow_LSX(const uint8_t* src_y, uint8_t* dst_argb, int width) { + int x; + int len = width / 16; + __m128i vec_y, dst0, dst1, dst2, dst3; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i alpha = __lsx_vldi(0xFF); + + for (x = 0; x < len; x++) { + vec_y = __lsx_vld(src_y, 0); + tmp0 = __lsx_vilvl_b(vec_y, vec_y); + tmp1 = __lsx_vilvh_b(vec_y, vec_y); + tmp2 = __lsx_vilvl_b(alpha, vec_y); + tmp3 = __lsx_vilvh_b(alpha, vec_y); + dst0 = __lsx_vilvl_h(tmp2, tmp0); + dst1 = __lsx_vilvh_h(tmp2, tmp0); + dst2 = __lsx_vilvl_h(tmp3, tmp1); + dst3 = __lsx_vilvh_h(tmp3, tmp1); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + dst_argb += 64; + src_y += 16; + } +} + +void YUY2ToARGBRow_LSX(const uint8_t* src_yuy2, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 8; + __m128i src0, vec_y, vec_vu; + __m128i vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb; + __m128i vec_vrub, vec_vgug; + __m128i out_b, out_g, out_r; + __m128i const_80 = __lsx_vldi(0x480); + __m128i zero = __lsx_vldi(0); + __m128i alpha = __lsx_vldi(0xFF); + + YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb); + vec_vrub = __lsx_vilvl_h(vec_vr, vec_ub); + vec_vgug = __lsx_vilvl_h(vec_vg, vec_ug); + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_yuy2, 0); + vec_y = __lsx_vpickev_b(src0, src0); + vec_vu = __lsx_vpickod_b(src0, src0); + YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g, + out_r); + STOREARGB(alpha, out_r, out_g, out_b, dst_argb); + src_yuy2 += 16; + } +} + +void UYVYToARGBRow_LSX(const uint8_t* src_uyvy, + uint8_t* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + int x; + int len = width / 8; + __m128i src0, vec_y, vec_vu; + __m128i vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb; + __m128i vec_vrub, vec_vgug; + __m128i out_b, out_g, out_r; + __m128i const_80 = __lsx_vldi(0x480); + __m128i zero = __lsx_vldi(0); + __m128i alpha = __lsx_vldi(0xFF); + + YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb); + vec_vrub = __lsx_vilvl_h(vec_vr, vec_ub); + vec_vgug = __lsx_vilvl_h(vec_vg, vec_ug); + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_uyvy, 0); + vec_y = __lsx_vpickod_b(src0, src0); + vec_vu = __lsx_vpickev_b(src0, src0); + YUVTORGB(vec_y, vec_vu, vec_vrub, vec_vgug, vec_yg, vec_yb, out_b, out_g, + out_r); + STOREARGB(alpha, out_r, out_g, out_b, dst_argb); + src_uyvy += 16; + } +} + +void InterpolateRow_LSX(uint8_t* dst_ptr, + const uint8_t* src_ptr, + ptrdiff_t src_stride, + int width, + int32_t source_y_fraction) { + int x; + int y1_fraction = source_y_fraction; + int y0_fraction = 256 - y1_fraction; + const uint8_t* nex_ptr = src_ptr + src_stride; + uint16_t y_fractions; + int len = width / 32; + __m128i src0, src1, nex0, nex1; + __m128i dst0, dst1, y_frac; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i const_128 = __lsx_vldi(0x480); + + if (y1_fraction == 0) { + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src0, src1); + __lsx_vst(src0, dst_ptr, 0); + __lsx_vst(src1, dst_ptr, 16); + src_ptr += 32; + dst_ptr += 32; + } + return; + } + + if (y1_fraction == 128) { + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src0, src1); + DUP2_ARG2(__lsx_vld, nex_ptr, 0, nex_ptr, 16, nex0, nex1); + dst0 = __lsx_vavgr_bu(src0, nex0); + dst1 = __lsx_vavgr_bu(src1, nex1); + __lsx_vst(dst0, dst_ptr, 0); + __lsx_vst(dst1, dst_ptr, 16); + src_ptr += 32; + nex_ptr += 32; + dst_ptr += 32; + } + return; + } + + y_fractions = (uint16_t)(y0_fraction + (y1_fraction << 8)); + y_frac = __lsx_vreplgr2vr_h(y_fractions); + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src0, src1); + DUP2_ARG2(__lsx_vld, nex_ptr, 0, nex_ptr, 16, nex0, nex1); + tmp0 = __lsx_vilvl_b(nex0, src0); + tmp1 = __lsx_vilvh_b(nex0, src0); + tmp2 = __lsx_vilvl_b(nex1, src1); + tmp3 = __lsx_vilvh_b(nex1, src1); + tmp0 = __lsx_vdp2add_h_bu(const_128, tmp0, y_frac); + tmp1 = __lsx_vdp2add_h_bu(const_128, tmp1, y_frac); + tmp2 = __lsx_vdp2add_h_bu(const_128, tmp2, y_frac); + tmp3 = __lsx_vdp2add_h_bu(const_128, tmp3, y_frac); + dst0 = __lsx_vsrlni_b_h(tmp1, tmp0, 8); + dst1 = __lsx_vsrlni_b_h(tmp3, tmp2, 8); + __lsx_vst(dst0, dst_ptr, 0); + __lsx_vst(dst1, dst_ptr, 16); + src_ptr += 32; + nex_ptr += 32; + dst_ptr += 32; + } +} + +void ARGBSetRow_LSX(uint8_t* dst_argb, uint32_t v32, int width) { + int x; + int len = width / 4; + __m128i dst0 = __lsx_vreplgr2vr_w(v32); + + for (x = 0; x < len; x++) { + __lsx_vst(dst0, dst_argb, 0); + dst_argb += 16; + } +} + +void RAWToRGB24Row_LSX(const uint8_t* src_raw, uint8_t* dst_rgb24, int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2; + __m128i dst0, dst1, dst2; + __m128i shuf0 = {0x0708030405000102, 0x110C0D0E090A0B06}; + __m128i shuf1 = {0x1516171213140F10, 0x1F1E1B1C1D18191A}; + __m128i shuf2 = {0x090405060102031E, 0x0D0E0F0A0B0C0708}; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_raw, 0, src_raw, 16, src0, src1); + src2 = __lsx_vld(src_raw, 32); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuf0, src1, src0, shuf1, dst0, dst1); + dst2 = __lsx_vshuf_b(src1, src2, shuf2); + dst1 = __lsx_vinsgr2vr_b(dst1, src_raw[32], 0x0E); + __lsx_vst(dst0, dst_rgb24, 0); + __lsx_vst(dst1, dst_rgb24, 16); + __lsx_vst(dst2, dst_rgb24, 32); + dst_rgb24 += 48; + src_raw += 48; + } +} + +void MergeUVRow_LSX(const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_uv, + int width) { + int x; + int len = width / 16; + __m128i src0, src1, dst0, dst1; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_u, 0, src_v, 0, src0, src1); + dst0 = __lsx_vilvl_b(src1, src0); + dst1 = __lsx_vilvh_b(src1, src0); + __lsx_vst(dst0, dst_uv, 0); + __lsx_vst(dst1, dst_uv, 16); + src_u += 16; + src_v += 16; + dst_uv += 32; + } +} + +void ARGBExtractAlphaRow_LSX(const uint8_t* src_argb, + uint8_t* dst_a, + int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2, src3, tmp0, tmp1, dst0; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48, + src0, src1, src2, src3); + tmp0 = __lsx_vpickod_b(src1, src0); + tmp1 = __lsx_vpickod_b(src3, src2); + dst0 = __lsx_vpickod_b(tmp1, tmp0); + __lsx_vst(dst0, dst_a, 0); + src_argb += 64; + dst_a += 16; + } +} + +void ARGBBlendRow_LSX(const uint8_t* src_argb, + const uint8_t* src_argb1, + uint8_t* dst_argb, + int width) { + int x; + int len = width / 8; + __m128i src0, src1, src2, src3; + __m128i tmp0, tmp1, dst0, dst1; + __m128i reg0, reg1, reg2, reg3; + __m128i a0, a1, a2, a3; + __m128i const_256 = __lsx_vldi(0x500); + __m128i zero = __lsx_vldi(0); + __m128i alpha = __lsx_vldi(0xFF); + __m128i control = {0xFF000000FF000000, 0xFF000000FF000000}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb1, 0, src_argb1, 16, + src0, src1, src2, src3); + tmp0 = __lsx_vshuf4i_b(src0, 0xFF); + tmp1 = __lsx_vshuf4i_b(src1, 0xFF); + a0 = __lsx_vilvl_b(zero, tmp0); + a1 = __lsx_vilvh_b(zero, tmp0); + a2 = __lsx_vilvl_b(zero, tmp1); + a3 = __lsx_vilvh_b(zero, tmp1); + reg0 = __lsx_vilvl_b(zero, src2); + reg1 = __lsx_vilvh_b(zero, src2); + reg2 = __lsx_vilvl_b(zero, src3); + reg3 = __lsx_vilvh_b(zero, src3); + DUP4_ARG2(__lsx_vsub_h, const_256, a0, const_256, a1, const_256, a2, + const_256, a3, a0, a1, a2, a3); + DUP4_ARG2(__lsx_vmul_h, a0, reg0, a1, reg1, a2, reg2, a3, reg3, reg0, reg1, + reg2, reg3); + DUP2_ARG3(__lsx_vsrani_b_h, reg1, reg0, 8, reg3, reg2, 8, dst0, dst1); + dst0 = __lsx_vsadd_bu(dst0, src0); + dst1 = __lsx_vsadd_bu(dst1, src1); + dst0 = __lsx_vbitsel_v(dst0, alpha, control); + dst1 = __lsx_vbitsel_v(dst1, alpha, control); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + src_argb += 32; + src_argb1 += 32; + dst_argb += 32; + } +} + +void ARGBQuantizeRow_LSX(uint8_t* dst_argb, + int scale, + int interval_size, + int interval_offset, + int width) { + int x; + int len = width / 16; + __m128i src0, src1, src2, src3, dst0, dst1, dst2, dst3; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; + __m128i vec_size = __lsx_vreplgr2vr_b(interval_size); + __m128i vec_offset = __lsx_vreplgr2vr_b(interval_offset); + __m128i vec_scale = __lsx_vreplgr2vr_w(scale); + __m128i zero = __lsx_vldi(0); + __m128i control = {0xFF000000FF000000, 0xFF000000FF000000}; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, dst_argb, 0, dst_argb, 16, dst_argb, 32, dst_argb, 48, + src0, src1, src2, src3); + reg0 = __lsx_vilvl_b(zero, src0); + reg1 = __lsx_vilvh_b(zero, src0); + reg2 = __lsx_vilvl_b(zero, src1); + reg3 = __lsx_vilvh_b(zero, src1); + reg4 = __lsx_vilvl_b(zero, src2); + reg5 = __lsx_vilvh_b(zero, src2); + reg6 = __lsx_vilvl_b(zero, src3); + reg7 = __lsx_vilvh_b(zero, src3); + tmp0 = __lsx_vilvl_h(zero, reg0); + tmp1 = __lsx_vilvh_h(zero, reg0); + tmp2 = __lsx_vilvl_h(zero, reg1); + tmp3 = __lsx_vilvh_h(zero, reg1); + tmp4 = __lsx_vilvl_h(zero, reg2); + tmp5 = __lsx_vilvh_h(zero, reg2); + tmp6 = __lsx_vilvl_h(zero, reg3); + tmp7 = __lsx_vilvh_h(zero, reg3); + DUP4_ARG2(__lsx_vmul_w, tmp0, vec_scale, tmp1, vec_scale, tmp2, vec_scale, + tmp3, vec_scale, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vmul_w, tmp4, vec_scale, tmp5, vec_scale, tmp6, vec_scale, + tmp7, vec_scale, tmp4, tmp5, tmp6, tmp7); + DUP4_ARG3(__lsx_vsrani_h_w, tmp1, tmp0, 16, tmp3, tmp2, 16, tmp5, tmp4, 16, + tmp7, tmp6, 16, reg0, reg1, reg2, reg3); + dst0 = __lsx_vpickev_b(reg1, reg0); + dst1 = __lsx_vpickev_b(reg3, reg2); + tmp0 = __lsx_vilvl_h(zero, reg4); + tmp1 = __lsx_vilvh_h(zero, reg4); + tmp2 = __lsx_vilvl_h(zero, reg5); + tmp3 = __lsx_vilvh_h(zero, reg5); + tmp4 = __lsx_vilvl_h(zero, reg6); + tmp5 = __lsx_vilvh_h(zero, reg6); + tmp6 = __lsx_vilvl_h(zero, reg7); + tmp7 = __lsx_vilvh_h(zero, reg7); + DUP4_ARG2(__lsx_vmul_w, tmp0, vec_scale, tmp1, vec_scale, tmp2, vec_scale, + tmp3, vec_scale, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vmul_w, tmp4, vec_scale, tmp5, vec_scale, tmp6, vec_scale, + tmp7, vec_scale, tmp4, tmp5, tmp6, tmp7); + DUP4_ARG3(__lsx_vsrani_h_w, tmp1, tmp0, 16, tmp3, tmp2, 16, tmp5, tmp4, 16, + tmp7, tmp6, 16, reg0, reg1, reg2, reg3); + dst2 = __lsx_vpickev_b(reg1, reg0); + dst3 = __lsx_vpickev_b(reg3, reg2); + DUP4_ARG2(__lsx_vmul_b, dst0, vec_size, dst1, vec_size, dst2, vec_size, + dst3, vec_size, dst0, dst1, dst2, dst3); + DUP4_ARG2(__lsx_vadd_b, dst0, vec_offset, dst1, vec_offset, dst2, + vec_offset, dst3, vec_offset, dst0, dst1, dst2, dst3); + DUP4_ARG3(__lsx_vbitsel_v, dst0, src0, control, dst1, src1, control, dst2, + src2, control, dst3, src3, control, dst0, dst1, dst2, dst3); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + __lsx_vst(dst2, dst_argb, 32); + __lsx_vst(dst3, dst_argb, 48); + dst_argb += 64; + } +} + +void ARGBColorMatrixRow_LSX(const uint8_t* src_argb, + uint8_t* dst_argb, + const int8_t* matrix_argb, + int width) { + int x; + int len = width / 8; + __m128i src0, src1, tmp0, tmp1, dst0, dst1; + __m128i tmp_b, tmp_g, tmp_r, tmp_a; + __m128i reg_b, reg_g, reg_r, reg_a; + __m128i matrix_b = __lsx_vldrepl_w(matrix_argb, 0); + __m128i matrix_g = __lsx_vldrepl_w(matrix_argb, 4); + __m128i matrix_r = __lsx_vldrepl_w(matrix_argb, 8); + __m128i matrix_a = __lsx_vldrepl_w(matrix_argb, 12); + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1); + DUP4_ARG2(__lsx_vdp2_h_bu_b, src0, matrix_b, src0, matrix_g, src0, matrix_r, + src0, matrix_a, tmp_b, tmp_g, tmp_r, tmp_a); + DUP4_ARG2(__lsx_vdp2_h_bu_b, src1, matrix_b, src1, matrix_g, src1, matrix_r, + src1, matrix_a, reg_b, reg_g, reg_r, reg_a); + DUP4_ARG2(__lsx_vhaddw_w_h, tmp_b, tmp_b, tmp_g, tmp_g, tmp_r, tmp_r, tmp_a, + tmp_a, tmp_b, tmp_g, tmp_r, tmp_a); + DUP4_ARG2(__lsx_vhaddw_w_h, reg_b, reg_b, reg_g, reg_g, reg_r, reg_r, reg_a, + reg_a, reg_b, reg_g, reg_r, reg_a); + DUP4_ARG2(__lsx_vsrai_w, tmp_b, 6, tmp_g, 6, tmp_r, 6, tmp_a, 6, tmp_b, + tmp_g, tmp_r, tmp_a); + DUP4_ARG2(__lsx_vsrai_w, reg_b, 6, reg_g, 6, reg_r, 6, reg_a, 6, reg_b, + reg_g, reg_r, reg_a); + DUP4_ARG1(__lsx_vclip255_w, tmp_b, tmp_g, tmp_r, tmp_a, tmp_b, tmp_g, tmp_r, + tmp_a) + DUP4_ARG1(__lsx_vclip255_w, reg_b, reg_g, reg_r, reg_a, reg_b, reg_g, reg_r, + reg_a) + DUP4_ARG2(__lsx_vpickev_h, reg_b, tmp_b, reg_g, tmp_g, reg_r, tmp_r, reg_a, + tmp_a, tmp_b, tmp_g, tmp_r, tmp_a); + tmp0 = __lsx_vpackev_b(tmp_g, tmp_b); + tmp1 = __lsx_vpackev_b(tmp_a, tmp_r); + dst0 = __lsx_vilvl_h(tmp1, tmp0); + dst1 = __lsx_vilvh_h(tmp1, tmp0); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + src_argb += 32; + dst_argb += 32; + } +} + +void SplitUVRow_LSX(const uint8_t* src_uv, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 32; + __m128i src0, src1, src2, src3; + __m128i dst0, dst1, dst2, dst3; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32, src_uv, 48, src0, + src1, src2, src3); + DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, dst0, dst1); + DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, dst2, dst3); + __lsx_vst(dst0, dst_u, 0); + __lsx_vst(dst1, dst_u, 16); + __lsx_vst(dst2, dst_v, 0); + __lsx_vst(dst3, dst_v, 16); + src_uv += 64; + dst_u += 32; + dst_v += 32; + } +} + +void SetRow_LSX(uint8_t* dst, uint8_t v8, int width) { + int x; + int len = width / 16; + __m128i dst0 = __lsx_vreplgr2vr_b(v8); + + for (x = 0; x < len; x++) { + __lsx_vst(dst0, dst, 0); + dst += 16; + } +} + +void MirrorSplitUVRow_LSX(const uint8_t* src_uv, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + int x; + int len = width / 32; + __m128i src0, src1, src2, src3; + __m128i dst0, dst1, dst2, dst3; + __m128i shuff0 = {0x10121416181A1C1E, 0x00020406080A0C0E}; + __m128i shuff1 = {0x11131517191B1D1F, 0x01030507090B0D0F}; + + src_uv += (width << 1); + for (x = 0; x < len; x++) { + src_uv -= 64; + DUP4_ARG2(__lsx_vld, src_uv, 0, src_uv, 16, src_uv, 32, src_uv, 48, src2, + src3, src0, src1); + DUP4_ARG3(__lsx_vshuf_b, src1, src0, shuff1, src3, src2, shuff1, src1, src0, + shuff0, src3, src2, shuff0, dst0, dst1, dst2, dst3); + __lsx_vst(dst0, dst_v, 0); + __lsx_vst(dst1, dst_v, 16); + __lsx_vst(dst2, dst_u, 0); + __lsx_vst(dst3, dst_u, 16); + dst_u += 32; + dst_v += 32; + } +} + +void HalfFloatRow_LSX(const uint16_t* src, + uint16_t* dst, + float scale, + int width) { + int x; + int len = width / 32; + float mult = 1.9259299444e-34f * scale; + __m128i src0, src1, src2, src3, dst0, dst1, dst2, dst3; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; + __m128 vec_mult = (__m128)__lsx_vldrepl_w(&mult, 0); + __m128i zero = __lsx_vldi(0); + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, src0, src1, src2, + src3); + DUP4_ARG2(__lsx_vilvl_h, zero, src0, zero, src1, zero, src2, zero, src3, + tmp0, tmp2, tmp4, tmp6); + DUP4_ARG2(__lsx_vilvh_h, zero, src0, zero, src1, zero, src2, zero, src3, + tmp1, tmp3, tmp5, tmp7); + DUP4_ARG1(__lsx_vffint_s_wu, tmp0, tmp2, tmp4, tmp6, reg0, reg2, reg4, + reg6); + DUP4_ARG1(__lsx_vffint_s_wu, tmp1, tmp3, tmp5, tmp7, reg1, reg3, reg5, + reg7); + DUP4_ARG2(__lsx_vfmul_s, reg0, vec_mult, reg1, vec_mult, reg2, vec_mult, + reg3, vec_mult, reg0, reg1, reg2, reg3); + DUP4_ARG2(__lsx_vfmul_s, reg4, vec_mult, reg5, vec_mult, reg6, vec_mult, + reg7, vec_mult, reg4, reg5, reg6, reg7); + DUP4_ARG2(__lsx_vsrli_w, (v4u32)reg0, 13, (v4u32)reg1, 13, (v4u32)reg2, 13, + (v4u32)reg3, 13, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vsrli_w, (v4u32)reg4, 13, (v4u32)reg5, 13, (v4u32)reg6, 13, + (v4u32)reg7, 13, tmp4, tmp5, tmp6, tmp7); + DUP4_ARG2(__lsx_vpickev_h, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, + dst0, dst1, dst2, dst3); + __lsx_vst(dst0, dst, 0); + __lsx_vst(dst1, dst, 16); + __lsx_vst(dst2, dst, 32); + __lsx_vst(dst3, dst, 48); + src += 32; + dst += 32; + } +} + +#ifdef __cplusplus +} // extern "C" +} // namespace libyuv +#endif + +#endif // !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) diff --git a/third-party/libyuv/third_party/libyuv/source/row_msa.cc b/third-party/libyuv/third_party/libyuv/source/row_msa.cc index c0b13b0fd0..b7d5bb5ecf 100644 --- a/third-party/libyuv/third_party/libyuv/source/row_msa.cc +++ b/third-party/libyuv/third_party/libyuv/source/row_msa.cc @@ -24,16 +24,14 @@ extern "C" { #define ALPHA_VAL (-1) // Fill YUV -> RGB conversion constants into vectors -#define YUVTORGB_SETUP(yuvconst, ub, vr, ug, vg, bb, bg, br, yg) \ - { \ - ub = __msa_fill_w(yuvconst->kUVToB[0]); \ - vr = __msa_fill_w(yuvconst->kUVToR[1]); \ - ug = __msa_fill_w(yuvconst->kUVToG[0]); \ - vg = __msa_fill_w(yuvconst->kUVToG[1]); \ - bb = __msa_fill_w(yuvconst->kUVBiasB[0]); \ - bg = __msa_fill_w(yuvconst->kUVBiasG[0]); \ - br = __msa_fill_w(yuvconst->kUVBiasR[0]); \ - yg = __msa_fill_w(yuvconst->kYToRgb[0]); \ +#define YUVTORGB_SETUP(yuvconst, ub, vr, ug, vg, yg, yb) \ + { \ + ub = __msa_fill_w(yuvconst->kUVToB[0]); \ + vr = __msa_fill_w(yuvconst->kUVToR[1]); \ + ug = __msa_fill_w(yuvconst->kUVToG[0]); \ + vg = __msa_fill_w(yuvconst->kUVToG[1]); \ + yg = __msa_fill_w(yuvconst->kYToRgb[0]); \ + yb = __msa_fill_w(yuvconst->kYBiasToRgb[0]); \ } // Load YUV 422 pixel data @@ -70,54 +68,52 @@ extern "C" { } // Convert 8 pixels of YUV 420 to RGB. -#define YUVTORGB(in_y, in_uv, ubvr, ugvg, bb, bg, br, yg, out_b, out_g, out_r) \ - { \ - v8i16 vec0_m, vec1_m; \ - v4i32 reg0_m, reg1_m, reg2_m, reg3_m, reg4_m; \ - v4i32 reg5_m, reg6_m, reg7_m; \ - v16i8 zero_m = {0}; \ - \ - vec0_m = (v8i16)__msa_ilvr_b((v16i8)in_y, (v16i8)in_y); \ - vec1_m = (v8i16)__msa_ilvr_b((v16i8)zero_m, (v16i8)in_uv); \ - reg0_m = (v4i32)__msa_ilvr_h((v8i16)zero_m, (v8i16)vec0_m); \ - reg1_m = (v4i32)__msa_ilvl_h((v8i16)zero_m, (v8i16)vec0_m); \ - reg2_m = (v4i32)__msa_ilvr_h((v8i16)zero_m, (v8i16)vec1_m); \ - reg3_m = (v4i32)__msa_ilvl_h((v8i16)zero_m, (v8i16)vec1_m); \ - reg0_m *= yg; \ - reg1_m *= yg; \ - reg2_m *= ubvr; \ - reg3_m *= ubvr; \ - reg0_m = __msa_srai_w(reg0_m, 16); \ - reg1_m = __msa_srai_w(reg1_m, 16); \ - reg4_m = __msa_dotp_s_w((v8i16)vec1_m, (v8i16)ugvg); \ - reg5_m = __msa_ilvev_w(reg2_m, reg2_m); \ - reg6_m = __msa_ilvev_w(reg3_m, reg3_m); \ - reg7_m = __msa_ilvr_w(reg4_m, reg4_m); \ - reg2_m = __msa_ilvod_w(reg2_m, reg2_m); \ - reg3_m = __msa_ilvod_w(reg3_m, reg3_m); \ - reg4_m = __msa_ilvl_w(reg4_m, reg4_m); \ - reg5_m = reg0_m - reg5_m; \ - reg6_m = reg1_m - reg6_m; \ - reg2_m = reg0_m - reg2_m; \ - reg3_m = reg1_m - reg3_m; \ - reg7_m = reg0_m - reg7_m; \ - reg4_m = reg1_m - reg4_m; \ - reg5_m += bb; \ - reg6_m += bb; \ - reg7_m += bg; \ - reg4_m += bg; \ - reg2_m += br; \ - reg3_m += br; \ - reg5_m = __msa_srai_w(reg5_m, 6); \ - reg6_m = __msa_srai_w(reg6_m, 6); \ - reg7_m = __msa_srai_w(reg7_m, 6); \ - reg4_m = __msa_srai_w(reg4_m, 6); \ - reg2_m = __msa_srai_w(reg2_m, 6); \ - reg3_m = __msa_srai_w(reg3_m, 6); \ - CLIP_0TO255(reg5_m, reg6_m, reg7_m, reg4_m, reg2_m, reg3_m); \ - out_b = __msa_pckev_h((v8i16)reg6_m, (v8i16)reg5_m); \ - out_g = __msa_pckev_h((v8i16)reg4_m, (v8i16)reg7_m); \ - out_r = __msa_pckev_h((v8i16)reg3_m, (v8i16)reg2_m); \ +#define YUVTORGB(in_y, in_uv, ubvr, ugvg, yg, yb, out_b, out_g, out_r) \ + { \ + v8i16 vec0_m, vec1_m; \ + v4i32 reg0_m, reg1_m, reg2_m, reg3_m, reg4_m; \ + v4i32 reg5_m, reg6_m, reg7_m; \ + v16i8 temp_m, zero_m = {0}; \ + \ + vec0_m = (v8i16)__msa_ilvr_b((v16i8)in_y, (v16i8)in_y); \ + vec1_m = (v8i16)__msa_ilvr_b((v16i8)zero_m, (v16i8)in_uv); \ + reg0_m = (v4i32)__msa_ilvr_h((v8i16)zero_m, (v8i16)vec0_m); \ + reg1_m = (v4i32)__msa_ilvl_h((v8i16)zero_m, (v8i16)vec0_m); \ + vec1_m = (v8i16)__msa_subv_h(vec1_m, const_0x80); \ + temp_m = (v16i8)__msa_clti_s_h(vec1_m, 0); \ + reg2_m = (v4i32)__msa_ilvr_h((v8i16)temp_m, (v8i16)vec1_m); \ + reg3_m = (v4i32)__msa_ilvl_h((v8i16)temp_m, (v8i16)vec1_m); \ + reg0_m *= yg; \ + reg1_m *= yg; \ + reg2_m *= ubvr; \ + reg3_m *= ubvr; \ + reg0_m = __msa_srai_w(reg0_m, 16); \ + reg1_m = __msa_srai_w(reg1_m, 16); \ + reg0_m += yb; \ + reg1_m += yb; \ + reg4_m = __msa_dotp_s_w((v8i16)vec1_m, (v8i16)ugvg); \ + reg5_m = __msa_ilvev_w(reg2_m, reg2_m); \ + reg6_m = __msa_ilvev_w(reg3_m, reg3_m); \ + reg7_m = __msa_ilvr_w(reg4_m, reg4_m); \ + reg2_m = __msa_ilvod_w(reg2_m, reg2_m); \ + reg3_m = __msa_ilvod_w(reg3_m, reg3_m); \ + reg4_m = __msa_ilvl_w(reg4_m, reg4_m); \ + reg5_m = reg0_m + reg5_m; \ + reg6_m = reg1_m + reg6_m; \ + reg2_m = reg0_m + reg2_m; \ + reg3_m = reg1_m + reg3_m; \ + reg7_m = reg0_m - reg7_m; \ + reg4_m = reg1_m - reg4_m; \ + reg5_m = __msa_srai_w(reg5_m, 6); \ + reg6_m = __msa_srai_w(reg6_m, 6); \ + reg7_m = __msa_srai_w(reg7_m, 6); \ + reg4_m = __msa_srai_w(reg4_m, 6); \ + reg2_m = __msa_srai_w(reg2_m, 6); \ + reg3_m = __msa_srai_w(reg3_m, 6); \ + CLIP_0TO255(reg5_m, reg6_m, reg7_m, reg4_m, reg2_m, reg3_m); \ + out_b = __msa_pckev_h((v8i16)reg6_m, (v8i16)reg5_m); \ + out_g = __msa_pckev_h((v8i16)reg4_m, (v8i16)reg7_m); \ + out_r = __msa_pckev_h((v8i16)reg3_m, (v8i16)reg2_m); \ } // Pack and Store 8 ARGB values. @@ -284,6 +280,34 @@ extern "C" { out_v = (v16u8)__msa_insert_d(zero_m, 0, (int64_t)v_m); \ } +#define RGBTOUV(_tmpb, _tmpg, _tmpr, _nexb, _nexg, _nexr, _dst0) \ + { \ + v16u8 _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5; \ + v8i16 _reg0, _reg1, _reg2, _reg3, _reg4, _reg5; \ + _tmp0 = (v16u8)__msa_ilvev_b(_tmpb, _nexb); \ + _tmp1 = (v16u8)__msa_ilvod_b(_tmpb, _nexb); \ + _tmp2 = (v16u8)__msa_ilvev_b(_tmpg, _nexg); \ + _tmp3 = (v16u8)__msa_ilvod_b(_tmpg, _nexg); \ + _tmp4 = (v16u8)__msa_ilvev_b(_tmpr, _nexr); \ + _tmp5 = (v16u8)__msa_ilvod_b(_tmpr, _nexr); \ + _reg0 = (v8i16)__msa_hadd_u_h(_tmp0, _tmp0); \ + _reg1 = (v8i16)__msa_hadd_u_h(_tmp1, _tmp1); \ + _reg2 = (v8i16)__msa_hadd_u_h(_tmp2, _tmp2); \ + _reg3 = (v8i16)__msa_hadd_u_h(_tmp3, _tmp3); \ + _reg4 = (v8i16)__msa_hadd_u_h(_tmp4, _tmp4); \ + _reg5 = (v8i16)__msa_hadd_u_h(_tmp5, _tmp5); \ + _reg0 = (v8i16)__msa_aver_u_h(_reg0, _reg1); \ + _reg2 = (v8i16)__msa_aver_u_h(_reg2, _reg3); \ + _reg4 = (v8i16)__msa_aver_u_h(_reg4, _reg5); \ + _reg1 = const_8080 + const_112 * _reg0; \ + _reg3 = const_8080 + const_112 * _reg4; \ + _reg1 = (v8i16)__msa_msubv_h(_reg1, const_74, _reg2); \ + _reg3 = (v8i16)__msa_msubv_h(_reg3, const_94, _reg2); \ + _reg1 = (v8i16)__msa_msubv_h(_reg1, const_38, _reg4); \ + _reg3 = (v8i16)__msa_msubv_h(_reg3, const_18, _reg0); \ + _dst0 = (v16u8)__msa_pckod_b(_reg3, _reg1); \ + } + void MirrorRow_MSA(const uint8_t* src, uint8_t* dst, int width) { int x; v16u8 src0, src1, src2, src3; @@ -389,20 +413,19 @@ void I422ToARGBRow_MSA(const uint8_t* src_y, int x; v16u8 src0, src1, src2; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v16u8 alpha = (v16u8)__msa_ldi_b(ALPHA_VAL); + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); for (x = 0; x < width; x += 8) { READYUV422(src_y, src_u, src_v, src0, src1, src2); src1 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src1); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); STOREARGB(vec0, vec1, vec2, alpha, dst_argb); src_y += 8; src_u += 4; @@ -420,20 +443,19 @@ void I422ToRGBARow_MSA(const uint8_t* src_y, int x; v16u8 src0, src1, src2; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v16u8 alpha = (v16u8)__msa_ldi_b(ALPHA_VAL); + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); for (x = 0; x < width; x += 8) { READYUV422(src_y, src_u, src_v, src0, src1, src2); src1 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src1); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); STOREARGB(alpha, vec0, vec1, vec2, dst_argb); src_y += 8; src_u += 4; @@ -453,12 +475,12 @@ void I422AlphaToARGBRow_MSA(const uint8_t* src_y, int64_t data_a; v16u8 src0, src1, src2, src3; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v4i32 zero = {0}; + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); @@ -467,8 +489,7 @@ void I422AlphaToARGBRow_MSA(const uint8_t* src_y, READYUV422(src_y, src_u, src_v, src0, src1, src2); src1 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src1); src3 = (v16u8)__msa_insert_d((v2i64)zero, 0, data_a); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); src3 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src3); STOREARGB(vec0, vec1, vec2, src3, dst_argb); src_y += 8; @@ -489,17 +510,17 @@ void I422ToRGB24Row_MSA(const uint8_t* src_y, int64_t data_u, data_v; v16u8 src0, src1, src2, src3, src4, dst0, dst1, dst2; v8i16 vec0, vec1, vec2, vec3, vec4, vec5; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v16u8 reg0, reg1, reg2, reg3; v2i64 zero = {0}; + v8i16 const_0x80 = __msa_ldi_h(0x80); v16i8 shuffler0 = {0, 1, 16, 2, 3, 17, 4, 5, 18, 6, 7, 19, 8, 9, 20, 10}; v16i8 shuffler1 = {0, 21, 1, 2, 22, 3, 4, 23, 5, 6, 24, 7, 8, 25, 9, 10}; v16i8 shuffler2 = {26, 6, 7, 27, 8, 9, 28, 10, 11, 29, 12, 13, 30, 14, 15, 31}; - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); @@ -512,10 +533,8 @@ void I422ToRGB24Row_MSA(const uint8_t* src_y, src1 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src1); src3 = (v16u8)__msa_sldi_b((v16i8)src0, (v16i8)src0, 8); src4 = (v16u8)__msa_sldi_b((v16i8)src1, (v16i8)src1, 8); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); - YUVTORGB(src3, src4, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec3, vec4, vec5); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); + YUVTORGB(src3, src4, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec3, vec4, vec5); reg0 = (v16u8)__msa_ilvev_b((v16i8)vec1, (v16i8)vec0); reg2 = (v16u8)__msa_ilvev_b((v16i8)vec4, (v16i8)vec3); reg3 = (v16u8)__msa_pckev_b((v16i8)vec5, (v16i8)vec2); @@ -542,24 +561,23 @@ void I422ToRGB565Row_MSA(const uint8_t* src_y, int x; v16u8 src0, src1, src2, dst0; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); for (x = 0; x < width; x += 8) { READYUV422(src_y, src_u, src_v, src0, src1, src2); src1 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src1); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec2, vec1); - vec0 = __msa_srai_h(vec0, 3); - vec1 = __msa_srai_h(vec1, 3); - vec2 = __msa_srai_h(vec2, 2); - vec1 = __msa_slli_h(vec1, 11); - vec2 = __msa_slli_h(vec2, 5); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); + vec0 = __msa_srli_h(vec0, 3); + vec1 = __msa_srli_h(vec1, 2); + vec2 = __msa_srli_h(vec2, 3); + vec2 = __msa_slli_h(vec2, 11); + vec1 = __msa_slli_h(vec1, 5); vec0 |= vec1; dst0 = (v16u8)(vec2 | vec0); ST_UB(dst0, dst_rgb565); @@ -581,25 +599,24 @@ void I422ToARGB4444Row_MSA(const uint8_t* src_y, v16u8 src0, src1, src2, dst0; v8i16 vec0, vec1, vec2; v8u16 reg0, reg1, reg2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v8u16 const_0xF000 = (v8u16)__msa_fill_h(0xF000); + v8u16 mask = (v8u16)__msa_fill_h(0x00F0); + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); for (x = 0; x < width; x += 8) { READYUV422(src_y, src_u, src_v, src0, src1, src2); src1 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src1); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); - reg0 = (v8u16)__msa_srai_h(vec0, 4); - reg1 = (v8u16)__msa_srai_h(vec1, 4); - reg2 = (v8u16)__msa_srai_h(vec2, 4); - reg1 = (v8u16)__msa_slli_h((v8i16)reg1, 4); - reg2 = (v8u16)__msa_slli_h((v8i16)reg2, 8); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); + reg0 = (v8u16)__msa_srli_h(vec0, 4); + reg2 = (v8u16)__msa_srli_h(vec2, 4); + reg1 = (v8u16)__msa_and_v(vec1, mask); + reg2 = (v8u16)__msa_slli_h(reg2, 8); reg1 |= const_0xF000; reg0 |= reg2; dst0 = (v16u8)(reg1 | reg0); @@ -621,23 +638,22 @@ void I422ToARGB1555Row_MSA(const uint8_t* src_y, v16u8 src0, src1, src2, dst0; v8i16 vec0, vec1, vec2; v8u16 reg0, reg1, reg2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v8u16 const_0x8000 = (v8u16)__msa_fill_h(0x8000); + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); for (x = 0; x < width; x += 8) { READYUV422(src_y, src_u, src_v, src0, src1, src2); src1 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src1); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); - reg0 = (v8u16)__msa_srai_h(vec0, 3); - reg1 = (v8u16)__msa_srai_h(vec1, 3); - reg2 = (v8u16)__msa_srai_h(vec2, 3); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); + reg0 = (v8u16)__msa_srli_h(vec0, 3); + reg1 = (v8u16)__msa_srli_h(vec1, 3); + reg2 = (v8u16)__msa_srli_h(vec2, 3); reg1 = (v8u16)__msa_slli_h((v8i16)reg1, 5); reg2 = (v8u16)__msa_slli_h((v8i16)reg2, 10); reg1 |= const_0x8000; @@ -1676,56 +1692,51 @@ void ARGB1555ToYRow_MSA(const uint8_t* src_argb1555, uint8_t* dst_y, int width) { int x; - v8u16 src0, src1, vec0, vec1, vec2, vec3, vec4, vec5; - v8u16 reg0, reg1, reg2, reg3, reg4, reg5; - v16u8 dst0; - v8u16 const_0x19 = (v8u16)__msa_ldi_h(0x19); - v8u16 const_0x81 = (v8u16)__msa_ldi_h(0x81); - v8u16 const_0x42 = (v8u16)__msa_ldi_h(0x42); - v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F); - v8u16 const_0x1080 = (v8u16)__msa_fill_h(0x1080); + v16u8 src0, src1, tmp0, tmp1, tmpb, tmpg, tmpr; + v16u8 reg0, reg1, reg2, dst; + v8i16 tmpr_l, tmpr_r, tmpg_l, tmpg_r, tmpb_l, tmpb_r; + v8i16 res0, res1; + v8i16 const_66 = (v8i16)__msa_ldi_h(66); + v8i16 const_129 = (v8i16)__msa_ldi_h(129); + v8i16 const_25 = (v8i16)__msa_ldi_h(25); + v8u16 const_1080 = (v8u16)__msa_fill_h(0x1080); + v16u8 zero = (v16u8)__msa_ldi_b(0); for (x = 0; x < width; x += 16) { - src0 = (v8u16)__msa_ld_b((void*)src_argb1555, 0); - src1 = (v8u16)__msa_ld_b((void*)src_argb1555, 16); - vec0 = src0 & const_0x1F; - vec1 = src1 & const_0x1F; - src0 = (v8u16)__msa_srai_h((v8i16)src0, 5); - src1 = (v8u16)__msa_srai_h((v8i16)src1, 5); - vec2 = src0 & const_0x1F; - vec3 = src1 & const_0x1F; - src0 = (v8u16)__msa_srai_h((v8i16)src0, 5); - src1 = (v8u16)__msa_srai_h((v8i16)src1, 5); - vec4 = src0 & const_0x1F; - vec5 = src1 & const_0x1F; - reg0 = (v8u16)__msa_slli_h((v8i16)vec0, 3); - reg1 = (v8u16)__msa_slli_h((v8i16)vec1, 3); - reg0 |= (v8u16)__msa_srai_h((v8i16)vec0, 2); - reg1 |= (v8u16)__msa_srai_h((v8i16)vec1, 2); - reg2 = (v8u16)__msa_slli_h((v8i16)vec2, 3); - reg3 = (v8u16)__msa_slli_h((v8i16)vec3, 3); - reg2 |= (v8u16)__msa_srai_h((v8i16)vec2, 2); - reg3 |= (v8u16)__msa_srai_h((v8i16)vec3, 2); - reg4 = (v8u16)__msa_slli_h((v8i16)vec4, 3); - reg5 = (v8u16)__msa_slli_h((v8i16)vec5, 3); - reg4 |= (v8u16)__msa_srai_h((v8i16)vec4, 2); - reg5 |= (v8u16)__msa_srai_h((v8i16)vec5, 2); - reg0 *= const_0x19; - reg1 *= const_0x19; - reg2 *= const_0x81; - reg3 *= const_0x81; - reg4 *= const_0x42; - reg5 *= const_0x42; - reg0 += reg2; - reg1 += reg3; - reg0 += reg4; - reg1 += reg5; - reg0 += const_0x1080; - reg1 += const_0x1080; - reg0 = (v8u16)__msa_srai_h((v8i16)reg0, 8); - reg1 = (v8u16)__msa_srai_h((v8i16)reg1, 8); - dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0); - ST_UB(dst0, dst_y); + src0 = (v16u8)__msa_ld_b((void*)src_argb1555, 0); + src1 = (v16u8)__msa_ld_b((void*)src_argb1555, 16); + tmp0 = (v16u8)__msa_pckev_b(src1, src0); + tmp1 = (v16u8)__msa_pckod_b(src1, src0); + tmpb = (v16u8)__msa_andi_b(tmp0, 0x1F); + tmpg = (v16u8)__msa_srli_b(tmp0, 5); + reg0 = (v16u8)__msa_andi_b(tmp1, 0x03); + reg0 = (v16u8)__msa_slli_b(reg0, 3); + tmpg = (v16u8)__msa_or_v(tmpg, reg0); + reg1 = (v16u8)__msa_andi_b(tmp1, 0x7C); + tmpr = (v16u8)__msa_srli_b(reg1, 2); + reg0 = (v16u8)__msa_slli_b(tmpb, 3); + reg1 = (v16u8)__msa_slli_b(tmpg, 3); + reg2 = (v16u8)__msa_slli_b(tmpr, 3); + tmpb = (v16u8)__msa_srli_b(tmpb, 2); + tmpg = (v16u8)__msa_srli_b(tmpg, 2); + tmpr = (v16u8)__msa_srli_b(tmpr, 2); + tmpb = (v16u8)__msa_or_v(reg0, tmpb); + tmpg = (v16u8)__msa_or_v(reg1, tmpg); + tmpr = (v16u8)__msa_or_v(reg2, tmpr); + tmpb_r = (v8i16)__msa_ilvr_b(zero, tmpb); + tmpb_l = (v8i16)__msa_ilvl_b(zero, tmpb); + tmpg_r = (v8i16)__msa_ilvr_b(zero, tmpg); + tmpg_l = (v8i16)__msa_ilvl_b(zero, tmpg); + tmpr_r = (v8i16)__msa_ilvr_b(zero, tmpr); + tmpr_l = (v8i16)__msa_ilvl_b(zero, tmpr); + res0 = const_1080 + const_25 * tmpb_r; + res1 = const_1080 + const_25 * tmpb_l; + res0 += const_129 * tmpg_r; + res1 += const_129 * tmpg_l; + res0 += const_66 * tmpr_r; + res1 += const_66 * tmpr_l; + dst = (v16u8)__msa_pckod_b(res1, res0); + ST_UB(dst, dst_y); src_argb1555 += 32; dst_y += 16; } @@ -1733,62 +1744,49 @@ void ARGB1555ToYRow_MSA(const uint8_t* src_argb1555, void RGB565ToYRow_MSA(const uint8_t* src_rgb565, uint8_t* dst_y, int width) { int x; - v8u16 src0, src1, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 reg0, reg1, reg2, reg3, reg4, reg5; - v4u32 res0, res1, res2, res3; - v16u8 dst0; - v4u32 const_0x810019 = (v4u32)__msa_fill_w(0x810019); - v4u32 const_0x010042 = (v4u32)__msa_fill_w(0x010042); - v8i16 const_0x1080 = __msa_fill_h(0x1080); - v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F); - v8u16 const_0x7E0 = (v8u16)__msa_fill_h(0x7E0); - v8u16 const_0xF800 = (v8u16)__msa_fill_h(0xF800); + v16u8 src0, src1, tmp0, tmp1, tmpb, tmpg, tmpr; + v16u8 reg0, reg1, dst; + v8i16 tmpr_l, tmpr_r, tmpg_l, tmpg_r, tmpb_l, tmpb_r; + v8i16 res0, res1; + v8i16 const_66 = (v8i16)__msa_ldi_h(66); + v8i16 const_129 = (v8i16)__msa_ldi_h(129); + v8i16 const_25 = (v8i16)__msa_ldi_h(25); + v8i16 const_1080 = (v8i16)__msa_fill_h(0x1080); + v16u8 zero = __msa_ldi_b(0); for (x = 0; x < width; x += 16) { - src0 = (v8u16)__msa_ld_b((void*)src_rgb565, 0); - src1 = (v8u16)__msa_ld_b((void*)src_rgb565, 16); - vec0 = src0 & const_0x1F; - vec1 = src0 & const_0x7E0; - vec2 = src0 & const_0xF800; - vec3 = src1 & const_0x1F; - vec4 = src1 & const_0x7E0; - vec5 = src1 & const_0xF800; - reg0 = (v8u16)__msa_slli_h((v8i16)vec0, 3); - reg1 = (v8u16)__msa_srli_h((v8i16)vec1, 3); - reg2 = (v8u16)__msa_srli_h((v8i16)vec2, 8); - reg3 = (v8u16)__msa_slli_h((v8i16)vec3, 3); - reg4 = (v8u16)__msa_srli_h((v8i16)vec4, 3); - reg5 = (v8u16)__msa_srli_h((v8i16)vec5, 8); - reg0 |= (v8u16)__msa_srli_h((v8i16)vec0, 2); - reg1 |= (v8u16)__msa_srli_h((v8i16)vec1, 9); - reg2 |= (v8u16)__msa_srli_h((v8i16)vec2, 13); - reg3 |= (v8u16)__msa_srli_h((v8i16)vec3, 2); - reg4 |= (v8u16)__msa_srli_h((v8i16)vec4, 9); - reg5 |= (v8u16)__msa_srli_h((v8i16)vec5, 13); - vec0 = (v8u16)__msa_ilvr_h((v8i16)reg1, (v8i16)reg0); - vec1 = (v8u16)__msa_ilvl_h((v8i16)reg1, (v8i16)reg0); - vec2 = (v8u16)__msa_ilvr_h((v8i16)reg4, (v8i16)reg3); - vec3 = (v8u16)__msa_ilvl_h((v8i16)reg4, (v8i16)reg3); - vec4 = (v8u16)__msa_ilvr_h(const_0x1080, (v8i16)reg2); - vec5 = (v8u16)__msa_ilvl_h(const_0x1080, (v8i16)reg2); - vec6 = (v8u16)__msa_ilvr_h(const_0x1080, (v8i16)reg5); - vec7 = (v8u16)__msa_ilvl_h(const_0x1080, (v8i16)reg5); - res0 = __msa_dotp_u_w(vec0, (v8u16)const_0x810019); - res1 = __msa_dotp_u_w(vec1, (v8u16)const_0x810019); - res2 = __msa_dotp_u_w(vec2, (v8u16)const_0x810019); - res3 = __msa_dotp_u_w(vec3, (v8u16)const_0x810019); - res0 = __msa_dpadd_u_w(res0, vec4, (v8u16)const_0x010042); - res1 = __msa_dpadd_u_w(res1, vec5, (v8u16)const_0x010042); - res2 = __msa_dpadd_u_w(res2, vec6, (v8u16)const_0x010042); - res3 = __msa_dpadd_u_w(res3, vec7, (v8u16)const_0x010042); - res0 = (v4u32)__msa_srai_w((v4i32)res0, 8); - res1 = (v4u32)__msa_srai_w((v4i32)res1, 8); - res2 = (v4u32)__msa_srai_w((v4i32)res2, 8); - res3 = (v4u32)__msa_srai_w((v4i32)res3, 8); - vec0 = (v8u16)__msa_pckev_h((v8i16)res1, (v8i16)res0); - vec1 = (v8u16)__msa_pckev_h((v8i16)res3, (v8i16)res2); - dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0); - ST_UB(dst0, dst_y); + src0 = (v16u8)__msa_ld_b((void*)src_rgb565, 0); + src1 = (v16u8)__msa_ld_b((void*)src_rgb565, 16); + tmp0 = (v16u8)__msa_pckev_b(src1, src0); + tmp1 = (v16u8)__msa_pckod_b(src1, src0); + tmpb = (v16u8)__msa_andi_b(tmp0, 0x1F); + tmpr = (v16u8)__msa_andi_b(tmp1, 0xF8); + reg1 = (v16u8)__msa_andi_b(tmp1, 0x07); + reg0 = (v16u8)__msa_srli_b(tmp0, 5); + reg1 = (v16u8)__msa_slli_b(reg1, 3); + tmpg = (v16u8)__msa_or_v(reg1, reg0); + reg0 = (v16u8)__msa_slli_b(tmpb, 3); + reg1 = (v16u8)__msa_srli_b(tmpb, 2); + tmpb = (v16u8)__msa_or_v(reg1, reg0); + reg0 = (v16u8)__msa_slli_b(tmpg, 2); + reg1 = (v16u8)__msa_srli_b(tmpg, 4); + tmpg = (v16u8)__msa_or_v(reg1, reg0); + reg0 = (v16u8)__msa_srli_b(tmpr, 5); + tmpr = (v16u8)__msa_or_v(tmpr, reg0); + tmpb_r = (v8i16)__msa_ilvr_b(zero, tmpb); + tmpb_l = (v8i16)__msa_ilvl_b(zero, tmpb); + tmpg_r = (v8i16)__msa_ilvr_b(zero, tmpg); + tmpg_l = (v8i16)__msa_ilvl_b(zero, tmpg); + tmpr_r = (v8i16)__msa_ilvr_b(zero, tmpr); + tmpr_l = (v8i16)__msa_ilvl_b(zero, tmpr); + res0 = const_1080 + const_25 * tmpb_r; + res1 = const_1080 + const_25 * tmpb_l; + res0 += const_129 * tmpg_r; + res1 += const_129 * tmpg_l; + res0 += const_66 * tmpr_r; + res1 += const_66 * tmpr_l; + dst = (v16u8)__msa_pckod_b(res1, res0); + ST_UB(dst, dst_y); src_rgb565 += 32; dst_y += 16; } @@ -1885,69 +1883,61 @@ void ARGB1555ToUVRow_MSA(const uint8_t* src_argb1555, const uint16_t* s = (const uint16_t*)src_argb1555; const uint16_t* t = (const uint16_t*)(src_argb1555 + src_stride_argb1555); int64_t res0, res1; - v8u16 src0, src1, src2, src3, reg0, reg1, reg2, reg3; - v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6; - v16u8 dst0; - v8u16 const_0x70 = (v8u16)__msa_ldi_h(0x70); - v8u16 const_0x4A = (v8u16)__msa_ldi_h(0x4A); - v8u16 const_0x26 = (v8u16)__msa_ldi_h(0x26); - v8u16 const_0x5E = (v8u16)__msa_ldi_h(0x5E); - v8u16 const_0x12 = (v8u16)__msa_ldi_h(0x12); - v8u16 const_0x8080 = (v8u16)__msa_fill_h(0x8080); - v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F); + v16u8 src0, src1, src2, src3, dst; + v16u8 tmp0, tmp1, tmp2, tmp3; + v16u8 reg0, reg1, reg2, reg3; + v16u8 tmpb, tmpg, tmpr, nexb, nexg, nexr; + v8i16 const_112 = (v8i16)__msa_ldi_h(0x38); + v8i16 const_74 = (v8i16)__msa_ldi_h(0x25); + v8i16 const_38 = (v8i16)__msa_ldi_h(0x13); + v8i16 const_94 = (v8i16)__msa_ldi_h(0x2F); + v8i16 const_18 = (v8i16)__msa_ldi_h(0x09); + v8u16 const_8080 = (v8u16)__msa_fill_h(0x8080); for (x = 0; x < width; x += 16) { src0 = (v8u16)__msa_ld_b((void*)s, 0); src1 = (v8u16)__msa_ld_b((void*)s, 16); src2 = (v8u16)__msa_ld_b((void*)t, 0); src3 = (v8u16)__msa_ld_b((void*)t, 16); - vec0 = src0 & const_0x1F; - vec1 = src1 & const_0x1F; - vec0 += src2 & const_0x1F; - vec1 += src3 & const_0x1F; - vec0 = (v8u16)__msa_pckev_b((v16i8)vec1, (v16i8)vec0); - src0 = (v8u16)__msa_srai_h((v8i16)src0, 5); - src1 = (v8u16)__msa_srai_h((v8i16)src1, 5); - src2 = (v8u16)__msa_srai_h((v8i16)src2, 5); - src3 = (v8u16)__msa_srai_h((v8i16)src3, 5); - vec2 = src0 & const_0x1F; - vec3 = src1 & const_0x1F; - vec2 += src2 & const_0x1F; - vec3 += src3 & const_0x1F; - vec2 = (v8u16)__msa_pckev_b((v16i8)vec3, (v16i8)vec2); - src0 = (v8u16)__msa_srai_h((v8i16)src0, 5); - src1 = (v8u16)__msa_srai_h((v8i16)src1, 5); - src2 = (v8u16)__msa_srai_h((v8i16)src2, 5); - src3 = (v8u16)__msa_srai_h((v8i16)src3, 5); - vec4 = src0 & const_0x1F; - vec5 = src1 & const_0x1F; - vec4 += src2 & const_0x1F; - vec5 += src3 & const_0x1F; - vec4 = (v8u16)__msa_pckev_b((v16i8)vec5, (v16i8)vec4); - vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0); - vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2); - vec4 = __msa_hadd_u_h((v16u8)vec4, (v16u8)vec4); - vec6 = (v8u16)__msa_slli_h((v8i16)vec0, 1); - vec6 |= (v8u16)__msa_srai_h((v8i16)vec0, 6); - vec0 = (v8u16)__msa_slli_h((v8i16)vec2, 1); - vec0 |= (v8u16)__msa_srai_h((v8i16)vec2, 6); - vec2 = (v8u16)__msa_slli_h((v8i16)vec4, 1); - vec2 |= (v8u16)__msa_srai_h((v8i16)vec4, 6); - reg0 = vec6 * const_0x70; - reg1 = vec0 * const_0x4A; - reg2 = vec2 * const_0x70; - reg3 = vec0 * const_0x5E; - reg0 += const_0x8080; - reg1 += vec2 * const_0x26; - reg2 += const_0x8080; - reg3 += vec6 * const_0x12; - reg0 -= reg1; - reg2 -= reg3; - reg0 = (v8u16)__msa_srai_h((v8i16)reg0, 8); - reg2 = (v8u16)__msa_srai_h((v8i16)reg2, 8); - dst0 = (v16u8)__msa_pckev_b((v16i8)reg2, (v16i8)reg0); - res0 = __msa_copy_u_d((v2i64)dst0, 0); - res1 = __msa_copy_u_d((v2i64)dst0, 1); + tmp0 = (v16u8)__msa_pckev_b(src1, src0); + tmp1 = (v16u8)__msa_pckod_b(src1, src0); + tmp2 = (v16u8)__msa_pckev_b(src3, src2); + tmp3 = (v16u8)__msa_pckod_b(src3, src2); + tmpb = (v16u8)__msa_andi_b(tmp0, 0x1F); + nexb = (v16u8)__msa_andi_b(tmp2, 0x1F); + tmpg = (v16u8)__msa_srli_b(tmp0, 5); + nexg = (v16u8)__msa_srli_b(tmp2, 5); + reg0 = (v16u8)__msa_andi_b(tmp1, 0x03); + reg2 = (v16u8)__msa_andi_b(tmp3, 0x03); + reg0 = (v16u8)__msa_slli_b(reg0, 3); + reg2 = (v16u8)__msa_slli_b(reg2, 3); + tmpg = (v16u8)__msa_or_v(tmpg, reg0); + nexg = (v16u8)__msa_or_v(nexg, reg2); + reg1 = (v16u8)__msa_andi_b(tmp1, 0x7C); + reg3 = (v16u8)__msa_andi_b(tmp3, 0x7C); + tmpr = (v16u8)__msa_srli_b(reg1, 2); + nexr = (v16u8)__msa_srli_b(reg3, 2); + reg0 = (v16u8)__msa_slli_b(tmpb, 3); + reg1 = (v16u8)__msa_slli_b(tmpg, 3); + reg2 = (v16u8)__msa_slli_b(tmpr, 3); + tmpb = (v16u8)__msa_srli_b(tmpb, 2); + tmpg = (v16u8)__msa_srli_b(tmpg, 2); + tmpr = (v16u8)__msa_srli_b(tmpr, 2); + tmpb = (v16u8)__msa_or_v(reg0, tmpb); + tmpg = (v16u8)__msa_or_v(reg1, tmpg); + tmpr = (v16u8)__msa_or_v(reg2, tmpr); + reg0 = (v16u8)__msa_slli_b(nexb, 3); + reg1 = (v16u8)__msa_slli_b(nexg, 3); + reg2 = (v16u8)__msa_slli_b(nexr, 3); + nexb = (v16u8)__msa_srli_b(nexb, 2); + nexg = (v16u8)__msa_srli_b(nexg, 2); + nexr = (v16u8)__msa_srli_b(nexr, 2); + nexb = (v16u8)__msa_or_v(reg0, nexb); + nexg = (v16u8)__msa_or_v(reg1, nexg); + nexr = (v16u8)__msa_or_v(reg2, nexr); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst); + res0 = __msa_copy_u_d((v2i64)dst, 0); + res1 = __msa_copy_u_d((v2i64)dst, 1); SD(res0, dst_u); SD(res1, dst_v); s += 16; @@ -1966,68 +1956,57 @@ void RGB565ToUVRow_MSA(const uint8_t* src_rgb565, const uint16_t* s = (const uint16_t*)src_rgb565; const uint16_t* t = (const uint16_t*)(src_rgb565 + src_stride_rgb565); int64_t res0, res1; - v8u16 src0, src1, src2, src3, reg0, reg1, reg2, reg3; - v8u16 vec0, vec1, vec2, vec3, vec4, vec5; - v16u8 dst0; - v8u16 const_0x70 = (v8u16)__msa_ldi_h(0x70); - v8u16 const_0x4A = (v8u16)__msa_ldi_h(0x4A); - v8u16 const_0x26 = (v8u16)__msa_ldi_h(0x26); - v8u16 const_0x5E = (v8u16)__msa_ldi_h(0x5E); - v8u16 const_0x12 = (v8u16)__msa_ldi_h(0x12); - v8u16 const_32896 = (v8u16)__msa_fill_h(0x8080); - v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F); - v8u16 const_0x3F = (v8u16)__msa_fill_h(0x3F); + v16u8 src0, src1, src2, src3, dst; + v16u8 tmp0, tmp1, tmp2, tmp3; + v16u8 reg0, reg1, reg2, reg3; + v16u8 tmpb, tmpg, tmpr, nexb, nexg, nexr; + v8i16 const_112 = (v8i16)__msa_ldi_h(0x38); + v8i16 const_74 = (v8i16)__msa_ldi_h(0x25); + v8i16 const_38 = (v8i16)__msa_ldi_h(0x13); + v8i16 const_94 = (v8i16)__msa_ldi_h(0x2F); + v8i16 const_18 = (v8i16)__msa_ldi_h(0x09); + v8u16 const_8080 = (v8u16)__msa_fill_h(0x8080); for (x = 0; x < width; x += 16) { - src0 = (v8u16)__msa_ld_b((void*)s, 0); - src1 = (v8u16)__msa_ld_b((void*)s, 16); - src2 = (v8u16)__msa_ld_b((void*)t, 0); - src3 = (v8u16)__msa_ld_b((void*)t, 16); - vec0 = src0 & const_0x1F; - vec1 = src1 & const_0x1F; - vec0 += src2 & const_0x1F; - vec1 += src3 & const_0x1F; - vec0 = (v8u16)__msa_pckev_b((v16i8)vec1, (v16i8)vec0); - src0 = (v8u16)__msa_srai_h((v8i16)src0, 5); - src1 = (v8u16)__msa_srai_h((v8i16)src1, 5); - src2 = (v8u16)__msa_srai_h((v8i16)src2, 5); - src3 = (v8u16)__msa_srai_h((v8i16)src3, 5); - vec2 = src0 & const_0x3F; - vec3 = src1 & const_0x3F; - vec2 += src2 & const_0x3F; - vec3 += src3 & const_0x3F; - vec1 = (v8u16)__msa_pckev_b((v16i8)vec3, (v16i8)vec2); - src0 = (v8u16)__msa_srai_h((v8i16)src0, 6); - src1 = (v8u16)__msa_srai_h((v8i16)src1, 6); - src2 = (v8u16)__msa_srai_h((v8i16)src2, 6); - src3 = (v8u16)__msa_srai_h((v8i16)src3, 6); - vec4 = src0 & const_0x1F; - vec5 = src1 & const_0x1F; - vec4 += src2 & const_0x1F; - vec5 += src3 & const_0x1F; - vec2 = (v8u16)__msa_pckev_b((v16i8)vec5, (v16i8)vec4); - vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0); - vec1 = __msa_hadd_u_h((v16u8)vec1, (v16u8)vec1); - vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2); - vec3 = (v8u16)__msa_slli_h((v8i16)vec0, 1); - vec3 |= (v8u16)__msa_srai_h((v8i16)vec0, 6); - vec4 = (v8u16)__msa_slli_h((v8i16)vec2, 1); - vec4 |= (v8u16)__msa_srai_h((v8i16)vec2, 6); - reg0 = vec3 * const_0x70; - reg1 = vec1 * const_0x4A; - reg2 = vec4 * const_0x70; - reg3 = vec1 * const_0x5E; - reg0 += const_32896; - reg1 += vec4 * const_0x26; - reg2 += const_32896; - reg3 += vec3 * const_0x12; - reg0 -= reg1; - reg2 -= reg3; - reg0 = (v8u16)__msa_srai_h((v8i16)reg0, 8); - reg2 = (v8u16)__msa_srai_h((v8i16)reg2, 8); - dst0 = (v16u8)__msa_pckev_b((v16i8)reg2, (v16i8)reg0); - res0 = __msa_copy_u_d((v2i64)dst0, 0); - res1 = __msa_copy_u_d((v2i64)dst0, 1); + src0 = (v16u8)__msa_ld_b((void*)s, 0); + src1 = (v16u8)__msa_ld_b((void*)s, 16); + src2 = (v16u8)__msa_ld_b((void*)t, 0); + src3 = (v16u8)__msa_ld_b((void*)t, 16); + tmp0 = (v16u8)__msa_pckev_b(src1, src0); + tmp1 = (v16u8)__msa_pckod_b(src1, src0); + tmp2 = (v16u8)__msa_pckev_b(src3, src2); + tmp3 = (v16u8)__msa_pckod_b(src3, src2); + tmpb = (v16u8)__msa_andi_b(tmp0, 0x1F); + tmpr = (v16u8)__msa_andi_b(tmp1, 0xF8); + nexb = (v16u8)__msa_andi_b(tmp2, 0x1F); + nexr = (v16u8)__msa_andi_b(tmp3, 0xF8); + reg1 = (v16u8)__msa_andi_b(tmp1, 0x07); + reg3 = (v16u8)__msa_andi_b(tmp3, 0x07); + reg0 = (v16u8)__msa_srli_b(tmp0, 5); + reg1 = (v16u8)__msa_slli_b(reg1, 3); + reg2 = (v16u8)__msa_srli_b(tmp2, 5); + reg3 = (v16u8)__msa_slli_b(reg3, 3); + tmpg = (v16u8)__msa_or_v(reg1, reg0); + nexg = (v16u8)__msa_or_v(reg2, reg3); + reg0 = (v16u8)__msa_slli_b(tmpb, 3); + reg1 = (v16u8)__msa_srli_b(tmpb, 2); + reg2 = (v16u8)__msa_slli_b(nexb, 3); + reg3 = (v16u8)__msa_srli_b(nexb, 2); + tmpb = (v16u8)__msa_or_v(reg1, reg0); + nexb = (v16u8)__msa_or_v(reg2, reg3); + reg0 = (v16u8)__msa_slli_b(tmpg, 2); + reg1 = (v16u8)__msa_srli_b(tmpg, 4); + reg2 = (v16u8)__msa_slli_b(nexg, 2); + reg3 = (v16u8)__msa_srli_b(nexg, 4); + tmpg = (v16u8)__msa_or_v(reg1, reg0); + nexg = (v16u8)__msa_or_v(reg2, reg3); + reg0 = (v16u8)__msa_srli_b(tmpr, 5); + reg2 = (v16u8)__msa_srli_b(nexr, 5); + tmpr = (v16u8)__msa_or_v(tmpr, reg0); + nexr = (v16u8)__msa_or_v(nexr, reg2); + RGBTOUV(tmpb, tmpg, tmpr, nexb, nexg, nexr, dst); + res0 = __msa_copy_u_d((v2i64)dst, 0); + res1 = __msa_copy_u_d((v2i64)dst, 1); SD(res0, dst_u); SD(res1, dst_v); s += 16; @@ -2266,13 +2245,13 @@ void NV12ToARGBRow_MSA(const uint8_t* src_y, uint64_t val0, val1; v16u8 src0, src1, res0, res1, dst0, dst1; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v16u8 zero = {0}; v16u8 alpha = (v16u8)__msa_ldi_b(ALPHA_VAL); + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); @@ -2281,8 +2260,7 @@ void NV12ToARGBRow_MSA(const uint8_t* src_y, val1 = LD(src_uv); src0 = (v16u8)__msa_insert_d((v2i64)zero, 0, val0); src1 = (v16u8)__msa_insert_d((v2i64)zero, 0, val1); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); res0 = (v16u8)__msa_ilvev_b((v16i8)vec2, (v16i8)vec0); res1 = (v16u8)__msa_ilvev_b((v16i8)alpha, (v16i8)vec1); dst0 = (v16u8)__msa_ilvr_b((v16i8)res1, (v16i8)res0); @@ -2303,12 +2281,12 @@ void NV12ToRGB565Row_MSA(const uint8_t* src_y, uint64_t val0, val1; v16u8 src0, src1, dst0; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; + v8i16 const_0x80 = __msa_ldi_h(0x80); v16u8 zero = {0}; - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); @@ -2317,8 +2295,7 @@ void NV12ToRGB565Row_MSA(const uint8_t* src_y, val1 = LD(src_uv); src0 = (v16u8)__msa_insert_d((v2i64)zero, 0, val0); src1 = (v16u8)__msa_insert_d((v2i64)zero, 0, val1); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); vec0 = vec0 >> 3; vec1 = (vec1 >> 2) << 5; vec2 = (vec2 >> 3) << 11; @@ -2339,14 +2316,14 @@ void NV21ToARGBRow_MSA(const uint8_t* src_y, uint64_t val0, val1; v16u8 src0, src1, res0, res1, dst0, dst1; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v16u8 alpha = (v16u8)__msa_ldi_b(ALPHA_VAL); v16u8 zero = {0}; v16i8 shuffler = {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}; + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); @@ -2356,8 +2333,7 @@ void NV21ToARGBRow_MSA(const uint8_t* src_y, src0 = (v16u8)__msa_insert_d((v2i64)zero, 0, val0); src1 = (v16u8)__msa_insert_d((v2i64)zero, 0, val1); src1 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src1, (v16i8)src1); - YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); + YUVTORGB(src0, src1, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); res0 = (v16u8)__msa_ilvev_b((v16i8)vec2, (v16i8)vec0); res1 = (v16u8)__msa_ilvev_b((v16i8)alpha, (v16i8)vec1); dst0 = (v16u8)__msa_ilvr_b((v16i8)res1, (v16i8)res0); @@ -2771,54 +2747,57 @@ void I444ToARGBRow_MSA(const uint8_t* src_y, int width) { int x; v16u8 src0, src1, src2, dst0, dst1; - v8u16 vec0, vec1, vec2; + v8i16 vec0, vec1, vec2; v4i32 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v16u8 alpha = (v16u8)__msa_ldi_b(ALPHA_VAL); v8i16 zero = {0}; + v4i32 const_0x80 = __msa_fill_w(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); for (x = 0; x < width; x += 8) { READI444(src_y, src_u, src_v, src0, src1, src2); - vec0 = (v8u16)__msa_ilvr_b((v16i8)src0, (v16i8)src0); + vec0 = (v8i16)__msa_ilvr_b((v16i8)src0, (v16i8)src0); reg0 = (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)vec0); reg1 = (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)vec0); reg0 *= vec_yg; reg1 *= vec_yg; reg0 = __msa_srai_w(reg0, 16); reg1 = __msa_srai_w(reg1, 16); - reg4 = reg0 + vec_br; - reg5 = reg1 + vec_br; - reg2 = reg0 + vec_bg; - reg3 = reg1 + vec_bg; - reg0 += vec_bb; - reg1 += vec_bb; + reg0 += vec_yb; + reg1 += vec_yb; vec0 = (v8u16)__msa_ilvr_b((v16i8)zero, (v16i8)src1); vec1 = (v8u16)__msa_ilvr_b((v16i8)zero, (v16i8)src2); reg6 = (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)vec0); reg7 = (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)vec0); reg8 = (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)vec1); reg9 = (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)vec1); - reg0 -= reg6 * vec_ub; - reg1 -= reg7 * vec_ub; - reg2 -= reg6 * vec_ug; - reg3 -= reg7 * vec_ug; - reg4 -= reg8 * vec_vr; - reg5 -= reg9 * vec_vr; - reg2 -= reg8 * vec_vg; - reg3 -= reg9 * vec_vg; - reg0 = __msa_srai_w(reg0, 6); - reg1 = __msa_srai_w(reg1, 6); - reg2 = __msa_srai_w(reg2, 6); - reg3 = __msa_srai_w(reg3, 6); - reg4 = __msa_srai_w(reg4, 6); - reg5 = __msa_srai_w(reg5, 6); + reg6 -= const_0x80; + reg7 -= const_0x80; + reg8 -= const_0x80; + reg9 -= const_0x80; + tmp0 = reg0 + reg6 * vec_ub; + tmp1 = reg1 + reg7 * vec_ub; + tmp2 = reg0 + reg8 * vec_vr; + tmp3 = reg1 + reg9 * vec_vr; + tmp4 = reg6 * vec_ug; + tmp5 = reg7 * vec_ug; + tmp4 += reg8 * vec_vg; + tmp5 += reg9 * vec_vg; + tmp4 = reg0 - tmp4; + tmp5 = reg1 - tmp5; + reg0 = __msa_srai_w(tmp0, 6); + reg1 = __msa_srai_w(tmp1, 6); + reg2 = __msa_srai_w(tmp2, 6); + reg3 = __msa_srai_w(tmp3, 6); + reg4 = __msa_srai_w(tmp4, 6); + reg5 = __msa_srai_w(tmp5, 6); CLIP_0TO255(reg0, reg1, reg2, reg3, reg4, reg5); vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0); - vec1 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2); - vec2 = (v8u16)__msa_pckev_h((v8i16)reg5, (v8i16)reg4); + vec1 = (v8u16)__msa_pckev_h((v8i16)reg5, (v8i16)reg4); + vec2 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2); vec0 = (v8u16)__msa_ilvev_b((v16i8)vec1, (v16i8)vec0); vec1 = (v8u16)__msa_ilvev_b((v16i8)alpha, (v16i8)vec2); dst0 = (v16u8)__msa_ilvr_h((v8i16)vec1, (v8i16)vec0); @@ -2922,12 +2901,12 @@ void YUY2ToARGBRow_MSA(const uint8_t* src_yuy2, int x; v16u8 src0, src1, src2; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; v16u8 alpha = (v16u8)__msa_ldi_b(ALPHA_VAL); + v8i16 const_0x80 = __msa_ldi_h(0x80); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); @@ -2935,8 +2914,7 @@ void YUY2ToARGBRow_MSA(const uint8_t* src_yuy2, src0 = (v16u8)__msa_ld_b((void*)src_yuy2, 0); src1 = (v16u8)__msa_pckev_b((v16i8)src0, (v16i8)src0); src2 = (v16u8)__msa_pckod_b((v16i8)src0, (v16i8)src0); - YUVTORGB(src1, src2, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); + YUVTORGB(src1, src2, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); STOREARGB(vec0, vec1, vec2, alpha, dst_argb); src_yuy2 += 16; dst_argb += 32; @@ -2950,12 +2928,12 @@ void UYVYToARGBRow_MSA(const uint8_t* src_uyvy, int x; v16u8 src0, src1, src2; v8i16 vec0, vec1, vec2; - v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, vec_br, vec_yg; + v4i32 vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb; v4i32 vec_ubvr, vec_ugvg; + v8i16 const_0x80 = __msa_ldi_h(0x80); v16u8 alpha = (v16u8)__msa_ldi_b(ALPHA_VAL); - YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_bb, vec_bg, - vec_br, vec_yg); + YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb); vec_ubvr = __msa_ilvr_w(vec_vr, vec_ub); vec_ugvg = (v4i32)__msa_ilvev_h((v8i16)vec_vg, (v8i16)vec_ug); @@ -2963,8 +2941,7 @@ void UYVYToARGBRow_MSA(const uint8_t* src_uyvy, src0 = (v16u8)__msa_ld_b((void*)src_uyvy, 0); src1 = (v16u8)__msa_pckod_b((v16i8)src0, (v16i8)src0); src2 = (v16u8)__msa_pckev_b((v16i8)src0, (v16i8)src0); - YUVTORGB(src1, src2, vec_ubvr, vec_ugvg, vec_bb, vec_bg, vec_br, vec_yg, - vec0, vec1, vec2); + YUVTORGB(src1, src2, vec_ubvr, vec_ugvg, vec_yg, vec_yb, vec0, vec1, vec2); STOREARGB(vec0, vec1, vec2, alpha, dst_argb); src_uyvy += 16; dst_argb += 32; diff --git a/third-party/libyuv/third_party/libyuv/source/row_neon.cc b/third-party/libyuv/third_party/libyuv/source/row_neon.cc index 6ef6f1c463..d2815d17ba 100644 --- a/third-party/libyuv/third_party/libyuv/source/row_neon.cc +++ b/third-party/libyuv/third_party/libyuv/source/row_neon.cc @@ -10,8 +10,6 @@ #include "libyuv/row.h" -#include - #ifdef __cplusplus namespace libyuv { extern "C" { @@ -21,6 +19,9 @@ extern "C" { #if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \ !defined(__aarch64__) +// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are +// reserved. + // q0: Y uint16x8_t // d2: U uint8x8_t // d3: V uint8x8_t @@ -155,6 +156,29 @@ void I444ToARGBRow_NEON(const uint8_t* src_y, : "cc", "memory", YUVTORGB_REGS, "d6"); } +void I444ToRGB24Row_NEON(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile( + YUVTORGB_SETUP + "1: \n" READYUV444 YUVTORGB + RGBTORGB8 + "subs %[width], %[width], #8 \n" + "vst3.8 {d0, d2, d4}, [%[dst_rgb24]]! \n" + "bgt 1b \n" + : [src_y] "+r"(src_y), // %[src_y] + [src_u] "+r"(src_u), // %[src_u] + [src_v] "+r"(src_v), // %[src_v] + [dst_rgb24] "+r"(dst_rgb24), // %[dst_argb] + [width] "+r"(width) // %[width] + : [kUVCoeff] "r"(&yuvconstants->kUVCoeff), // %[kUVCoeff] + [kRGBCoeffBias] "r"(&yuvconstants->kRGBCoeffBias) // %[kRGBCoeffBias] + : "cc", "memory", YUVTORGB_REGS); +} + void I422ToARGBRow_NEON(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, @@ -575,6 +599,127 @@ void SplitUVRow_NEON(const uint8_t* src_uv, ); } +// Reads 16 byte Y's from tile and writes out 16 Y's. +// MM21 Y tiles are 16x32 so src_tile_stride = 512 bytes +// MM21 UV tiles are 8x16 so src_tile_stride = 256 bytes +// width measured in bytes so 8 UV = 16. +void DetileRow_NEON(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width) { + asm volatile( + "1: \n" + "vld1.8 {q0}, [%0], %3 \n" // load 16 bytes + "subs %2, %2, #16 \n" // 16 processed per loop + "pld [%0, #1792] \n" + "vst1.8 {q0}, [%1]! \n" // store 16 bytes + "bgt 1b \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "r"(src_tile_stride) // %3 + : "cc", "memory", "q0" // Clobber List + ); +} + +// Reads 16 byte Y's of 16 bits from tile and writes out 16 Y's. +void DetileRow_16_NEON(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width) { + asm volatile( + "1: \n" + "vld1.16 {q0, q1}, [%0], %3 \n" // load 16 pixels + "subs %2, %2, #16 \n" // 16 processed per loop + "pld [%0, #3584] \n" + "vst1.16 {q0, q1}, [%1]! \n" // store 16 pixels + "bgt 1b \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "r"(src_tile_stride * 2) // %3 + : "cc", "memory", "q0", "q1" // Clobber List + ); +} + +// Read 16 bytes of UV, detile, and write 8 bytes of U and 8 bytes of V. +void DetileSplitUVRow_NEON(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + asm volatile( + "1: \n" + "vld2.8 {d0, d1}, [%0], %4 \n" + "subs %3, %3, #16 \n" + "pld [%0, #1792] \n" + "vst1.8 {d0}, [%1]! \n" + "vst1.8 {d1}, [%2]! \n" + "bgt 1b \n" + : "+r"(src_uv), // %0 + "+r"(dst_u), // %1 + "+r"(dst_v), // %2 + "+r"(width) // %3 + : "r"(src_tile_stride) // %4 + : "cc", "memory", "d0", "d1" // Clobber List + ); +} + +#if LIBYUV_USE_ST2 +// Read 16 Y, 8 UV, and write 8 YUYV. +void DetileToYUY2_NEON(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width) { + asm volatile( + "1: \n" + "vld1.8 {q0}, [%0], %4 \n" // Load 16 Y + "pld [%0, #1792] \n" + "vld1.8 {q1}, [%1], %5 \n" // Load 8 UV + "pld [%1, #1792] \n" + "subs %3, %3, #16 \n" + "vst2.8 {q0, q1}, [%2]! \n" + "bgt 1b \n" + : "+r"(src_y), // %0 + "+r"(src_uv), // %1 + "+r"(dst_yuy2), // %2 + "+r"(width) // %3 + : "r"(src_y_tile_stride), // %4 + "r"(src_uv_tile_stride) // %5 + : "cc", "memory", "d0", "d1", "d2", "d3" // Clobber list + ); +} +#else +// Read 16 Y, 8 UV, and write 8 YUYV. +void DetileToYUY2_NEON(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width) { + asm volatile( + "1: \n" + "vld1.8 {q0}, [%0], %4 \n" // Load 16 Y + "vld1.8 {q1}, [%1], %5 \n" // Load 8 UV + "subs %3, %3, #16 \n" + "pld [%0, #1792] \n" + "vzip.8 q0, q1 \n" + "pld [%1, #1792] \n" + "vst1.8 {q0, q1}, [%2]! \n" + "bgt 1b \n" + : "+r"(src_y), // %0 + "+r"(src_uv), // %1 + "+r"(dst_yuy2), // %2 + "+r"(width) // %3 + : "r"(src_y_tile_stride), // %4 + "r"(src_uv_tile_stride) // %5 + : "cc", "memory", "q0", "q1", "q2", "q3" // Clobber list + ); +} +#endif + // Reads 16 U's and V's and writes out 16 pairs of UV. void MergeUVRow_NEON(const uint8_t* src_u, const uint8_t* src_v, @@ -1304,16 +1449,17 @@ void ARGBToRGB24Row_NEON(const uint8_t* src_argb, int width) { asm volatile( "1: \n" - "vld4.8 {d1, d2, d3, d4}, [%0]! \n" // load 8 pixels of ARGB. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vst3.8 {d1, d2, d3}, [%1]! \n" // store 8 pixels of - // RGB24. + "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 16 pixels of ARGB. + "vld4.8 {d1, d3, d5, d7}, [%0]! \n" + "subs %2, %2, #16 \n" // 16 processed per loop. + "vst3.8 {d0, d2, d4}, [%1]! \n" // store 16 RGB24 pixels. + "vst3.8 {d1, d3, d5}, [%1]! \n" "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_rgb24), // %1 "+r"(width) // %2 : - : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List + : "cc", "memory", "q0", "q1", "q2", "q3" // Clobber List ); } @@ -1457,6 +1603,29 @@ void UYVYToUVRow_NEON(const uint8_t* src_uyvy, ); } +void YUY2ToNVUVRow_NEON(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width) { + asm volatile( + "add %1, %0, %1 \n" // stride + src_yuy2 + "1: \n" + "vld2.8 {q0, q1}, [%0]! \n" // load 16 pixels of YUY2. + "subs %3, %3, #16 \n" // 16 pixels = 8 UVs. + "vld2.8 {q2, q3}, [%1]! \n" // load next row YUY2. + "vrhadd.u8 q4, q1, q3 \n" // average rows of UV + "vst1.8 {q4}, [%2]! \n" // store 8 UV. + "bgt 1b \n" + : "+r"(src_yuy2), // %0 + "+r"(stride_yuy2), // %1 + "+r"(dst_uv), // %2 + "+r"(width) // %3 + : + : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", + "d7" // Clobber List + ); +} + // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. void ARGBShuffleRow_NEON(const uint8_t* src_argb, uint8_t* dst_argb, @@ -1598,29 +1767,6 @@ void ARGBToARGB4444Row_NEON(const uint8_t* src_argb, : "cc", "memory", "q0", "q1", "q2", "q3"); } -void ARGBToYRow_NEON(const uint8_t* src_argb, uint8_t* dst_y, int width) { - asm volatile( - "vmov.u8 d24, #25 \n" // B * 0.1016 coefficient - "vmov.u8 d25, #129 \n" // G * 0.5078 coefficient - "vmov.u8 d26, #66 \n" // R * 0.2578 coefficient - "vmov.u8 d27, #16 \n" // Add 16 constant - "1: \n" - "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q2, d0, d24 \n" // B - "vmlal.u8 q2, d1, d25 \n" // G - "vmlal.u8 q2, d2, d26 \n" // R - "vqrshrn.u16 d0, q2, #8 \n" // 16 bit to 8 bit Y - "vqadd.u8 d0, d27 \n" - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. - "bgt 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "q0", "q1", "q2", "q12", "q13"); -} - void ARGBExtractAlphaRow_NEON(const uint8_t* src_argb, uint8_t* dst_a, int width) { @@ -1639,48 +1785,6 @@ void ARGBExtractAlphaRow_NEON(const uint8_t* src_argb, ); } -void ARGBToYJRow_NEON(const uint8_t* src_argb, uint8_t* dst_y, int width) { - asm volatile( - "vmov.u8 d24, #29 \n" // B * 0.1140 coefficient - "vmov.u8 d25, #150 \n" // G * 0.5870 coefficient - "vmov.u8 d26, #77 \n" // R * 0.2990 coefficient - "1: \n" - "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q2, d0, d24 \n" // B - "vmlal.u8 q2, d1, d25 \n" // G - "vmlal.u8 q2, d2, d26 \n" // R - "vqrshrn.u16 d0, q2, #8 \n" // 16 bit to 8 bit Y - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. - "bgt 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "q0", "q1", "q2", "q12", "q13"); -} - -void RGBAToYJRow_NEON(const uint8_t* src_rgba, uint8_t* dst_y, int width) { - asm volatile( - "vmov.u8 d24, #29 \n" // B * 0.1140 coefficient - "vmov.u8 d25, #150 \n" // G * 0.5870 coefficient - "vmov.u8 d26, #77 \n" // R * 0.2990 coefficient - "1: \n" - "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 RGBA pixels. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q2, d1, d24 \n" // B - "vmlal.u8 q2, d2, d25 \n" // G - "vmlal.u8 q2, d3, d26 \n" // R - "vqrshrn.u16 d0, q2, #8 \n" // 16 bit to 8 bit Y - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. - "bgt 1b \n" - : "+r"(src_rgba), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "q0", "q1", "q2", "q12", "q13"); -} - // 8x1 pixels. void ARGBToUV444Row_NEON(const uint8_t* src_argb, uint8_t* dst_u, @@ -1700,15 +1804,13 @@ void ARGBToUV444Row_NEON(const uint8_t* src_argb, "vmull.u8 q2, d0, d24 \n" // B "vmlsl.u8 q2, d1, d25 \n" // G "vmlsl.u8 q2, d2, d26 \n" // R - "vadd.u16 q2, q2, q15 \n" // +128 -> unsigned "vmull.u8 q3, d2, d24 \n" // R "vmlsl.u8 q3, d1, d28 \n" // G "vmlsl.u8 q3, d0, d27 \n" // B - "vadd.u16 q3, q3, q15 \n" // +128 -> unsigned - "vqshrn.u16 d0, q2, #8 \n" // 16 bit to 8 bit U - "vqshrn.u16 d1, q3, #8 \n" // 16 bit to 8 bit V + "vaddhn.u16 d0, q2, q15 \n" // +128 -> unsigned + "vaddhn.u16 d1, q3, q15 \n" // +128 -> unsigned "vst1.8 {d0}, [%1]! \n" // store 8 pixels U. "vst1.8 {d1}, [%2]! \n" // store 8 pixels V. @@ -1728,13 +1830,11 @@ void ARGBToUV444Row_NEON(const uint8_t* src_argb, "vmul.s16 q8, " #QB ", q10 \n" /* B */ \ "vmls.s16 q8, " #QG ", q11 \n" /* G */ \ "vmls.s16 q8, " #QR ", q12 \n" /* R */ \ - "vadd.u16 q8, q8, q15 \n" /* +128 -> unsigned */ \ "vmul.s16 q9, " #QR ", q10 \n" /* R */ \ "vmls.s16 q9, " #QG ", q14 \n" /* G */ \ "vmls.s16 q9, " #QB ", q13 \n" /* B */ \ - "vadd.u16 q9, q9, q15 \n" /* +128 -> unsigned */ \ - "vqshrn.u16 d0, q8, #8 \n" /* 16 bit to 8 bit U */ \ - "vqshrn.u16 d1, q9, #8 \n" /* 16 bit to 8 bit V */ + "vaddhn.u16 d0, q8, q15 \n" /* +128 -> unsigned */ \ + "vaddhn.u16 d1, q9, q15 \n" /* +128 -> unsigned */ // clang-format on // TODO(fbarchard): Consider vhadd vertical, then vpaddl horizontal, avoid shr. @@ -1783,7 +1883,7 @@ void ARGBToUVRow_NEON(const uint8_t* src_argb, ); } -// TODO(fbarchard): Subsample match C code. +// TODO(fbarchard): Subsample match Intel code. void ARGBToUVJRow_NEON(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_u, @@ -1829,6 +1929,143 @@ void ARGBToUVJRow_NEON(const uint8_t* src_argb, ); } +void ABGRToUVJRow_NEON(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_uj, + uint8_t* dst_vj, + int width) { + asm volatile ( + "add %1, %0, %1 \n" // src_stride + src_argb + "vmov.s16 q10, #127 / 2 \n" // UB / VR 0.500 coefficient + "vmov.s16 q11, #84 / 2 \n" // UG -0.33126 coefficient + "vmov.s16 q12, #43 / 2 \n" // UR -0.16874 coefficient + "vmov.s16 q13, #20 / 2 \n" // VB -0.08131 coefficient + "vmov.s16 q14, #107 / 2 \n" // VG -0.41869 coefficient + "vmov.u16 q15, #0x8080 \n" // 128.5 + "1: \n" + "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ABGR pixels. + "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ABGR pixels. + "vpaddl.u8 q0, q0 \n" // R 16 bytes -> 8 shorts. + "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts. + "vpaddl.u8 q2, q2 \n" // B 16 bytes -> 8 shorts. + "vld4.8 {d8, d10, d12, d14}, [%1]! \n" // load 8 more ABGR pixels. + "vld4.8 {d9, d11, d13, d15}, [%1]! \n" // load last 8 ABGR pixels. + "vpadal.u8 q0, q4 \n" // R 16 bytes -> 8 shorts. + "vpadal.u8 q1, q5 \n" // G 16 bytes -> 8 shorts. + "vpadal.u8 q2, q6 \n" // B 16 bytes -> 8 shorts. + + "vrshr.u16 q0, q0, #1 \n" // 2x average + "vrshr.u16 q1, q1, #1 \n" + "vrshr.u16 q2, q2, #1 \n" + + "subs %4, %4, #16 \n" // 16 processed per loop. + RGBTOUV(q2, q1, q0) + "vst1.8 {d0}, [%2]! \n" // store 8 pixels U. + "vst1.8 {d1}, [%3]! \n" // store 8 pixels V. + "bgt 1b \n" + : "+r"(src_abgr), // %0 + "+r"(src_stride_abgr), // %1 + "+r"(dst_uj), // %2 + "+r"(dst_vj), // %3 + "+r"(width) // %4 + : + : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); +} + +// TODO(fbarchard): Subsample match C code. +void RGB24ToUVJRow_NEON(const uint8_t* src_rgb24, + int src_stride_rgb24, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + asm volatile ( + "add %1, %0, %1 \n" // src_stride + src_rgb24 + "vmov.s16 q10, #127 / 2 \n" // UB / VR 0.500 coefficient + "vmov.s16 q11, #84 / 2 \n" // UG -0.33126 coefficient + "vmov.s16 q12, #43 / 2 \n" // UR -0.16874 coefficient + "vmov.s16 q13, #20 / 2 \n" // VB -0.08131 coefficient + "vmov.s16 q14, #107 / 2 \n" // VG -0.41869 coefficient + "vmov.u16 q15, #0x8080 \n" // 128.5 + "1: \n" + "vld3.8 {d0, d2, d4}, [%0]! \n" // load 8 RGB24 pixels. + "vld3.8 {d1, d3, d5}, [%0]! \n" // load next 8 RGB24 pixels. + "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts. + "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts. + "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts. + "vld3.8 {d8, d10, d12}, [%1]! \n" // load 8 more RGB24 pixels. + "vld3.8 {d9, d11, d13}, [%1]! \n" // load last 8 RGB24 pixels. + "vpadal.u8 q0, q4 \n" // B 16 bytes -> 8 shorts. + "vpadal.u8 q1, q5 \n" // G 16 bytes -> 8 shorts. + "vpadal.u8 q2, q6 \n" // R 16 bytes -> 8 shorts. + + "vrshr.u16 q0, q0, #1 \n" // 2x average + "vrshr.u16 q1, q1, #1 \n" + "vrshr.u16 q2, q2, #1 \n" + + "subs %4, %4, #16 \n" // 16 processed per loop. + RGBTOUV(q0, q1, q2) + "vst1.8 {d0}, [%2]! \n" // store 8 pixels U. + "vst1.8 {d1}, [%3]! \n" // store 8 pixels V. + "bgt 1b \n" + : "+r"(src_rgb24), // %0 + "+r"(src_stride_rgb24), // %1 + "+r"(dst_u), // %2 + "+r"(dst_v), // %3 + "+r"(width) // %4 + : + : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); +} + +// TODO(fbarchard): Subsample match C code. +void RAWToUVJRow_NEON(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + asm volatile ( + "add %1, %0, %1 \n" // src_stride + src_raw + "vmov.s16 q10, #127 / 2 \n" // UB / VR 0.500 coefficient + "vmov.s16 q11, #84 / 2 \n" // UG -0.33126 coefficient + "vmov.s16 q12, #43 / 2 \n" // UR -0.16874 coefficient + "vmov.s16 q13, #20 / 2 \n" // VB -0.08131 coefficient + "vmov.s16 q14, #107 / 2 \n" // VG -0.41869 coefficient + "vmov.u16 q15, #0x8080 \n" // 128.5 + "1: \n" + "vld3.8 {d0, d2, d4}, [%0]! \n" // load 8 RAW pixels. + "vld3.8 {d1, d3, d5}, [%0]! \n" // load next 8 RAW pixels. + "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts. + "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts. + "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts. + "vld3.8 {d8, d10, d12}, [%1]! \n" // load 8 more RAW pixels. + "vld3.8 {d9, d11, d13}, [%1]! \n" // load last 8 RAW pixels. + "vpadal.u8 q0, q4 \n" // B 16 bytes -> 8 shorts. + "vpadal.u8 q1, q5 \n" // G 16 bytes -> 8 shorts. + "vpadal.u8 q2, q6 \n" // R 16 bytes -> 8 shorts. + + "vrshr.u16 q0, q0, #1 \n" // 2x average + "vrshr.u16 q1, q1, #1 \n" + "vrshr.u16 q2, q2, #1 \n" + + "subs %4, %4, #16 \n" // 16 processed per loop. + RGBTOUV(q2, q1, q0) + "vst1.8 {d0}, [%2]! \n" // store 8 pixels U. + "vst1.8 {d1}, [%3]! \n" // store 8 pixels V. + "bgt 1b \n" + : "+r"(src_raw), // %0 + "+r"(src_stride_raw), // %1 + "+r"(dst_u), // %2 + "+r"(dst_v), // %3 + "+r"(width) // %4 + : + : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); +} + void BGRAToUVRow_NEON(const uint8_t* src_bgra, int src_stride_bgra, uint8_t* dst_u, @@ -2319,9 +2556,6 @@ void ARGB4444ToYRow_NEON(const uint8_t* src_argb4444, : "cc", "memory", "q0", "q1", "q2", "q3", "q12", "q13"); } -static const uvec8 kShuffleARGBToABGR = {2, 1, 0, 3, 6, 5, 4, 7, - 10, 9, 8, 11, 14, 13, 12, 15}; - void ARGBToAR64Row_NEON(const uint8_t* src_argb, uint16_t* dst_ar64, int width) { @@ -2342,11 +2576,15 @@ void ARGBToAR64Row_NEON(const uint8_t* src_argb, : "cc", "memory", "q0", "q1", "q2", "q3"); } +static const uvec8 kShuffleARGBToABGR = {2, 1, 0, 3, 6, 5, 4, 7, + 10, 9, 8, 11, 14, 13, 12, 15}; + void ARGBToAB64Row_NEON(const uint8_t* src_argb, uint16_t* dst_ab64, int width) { asm volatile( - "vld1.8 q4, %3 \n" // shuffler + "vld1.8 {q4}, [%3] \n" // shuffler + "1: \n" "vld1.8 {q0}, [%0]! \n" "vld1.8 {q2}, [%0]! \n" @@ -2360,10 +2598,10 @@ void ARGBToAB64Row_NEON(const uint8_t* src_argb, "vst2.8 {q0, q1}, [%1]! \n" // store 4 pixels "vst2.8 {q2, q3}, [%1]! \n" // store 4 pixels "bgt 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_ab64), // %1 - "+r"(width) // %2 - : "m"(kShuffleARGBToABGR) // %3 + : "+r"(src_argb), // %0 + "+r"(dst_ab64), // %1 + "+r"(width) // %2 + : "r"(&kShuffleARGBToABGR) // %3 : "cc", "memory", "q0", "q1", "q2", "q3", "q4"); } @@ -2397,7 +2635,8 @@ void AB64ToARGBRow_NEON(const uint16_t* src_ab64, uint8_t* dst_argb, int width) { asm volatile( - "vld1.8 d8, %3 \n" // shuffler + "vld1.8 {d8}, [%3] \n" // shuffler + "1: \n" "vld1.16 {q0}, [%0]! \n" "vld1.16 {q1}, [%0]! \n" @@ -2411,168 +2650,186 @@ void AB64ToARGBRow_NEON(const uint16_t* src_ab64, "vst1.8 {q0}, [%1]! \n" // store 4 pixels "vst1.8 {q2}, [%1]! \n" // store 4 pixels "bgt 1b \n" - : "+r"(src_ab64), // %0 - "+r"(dst_argb), // %1 - "+r"(width) // %2 - : "m"(kShuffleAB64ToARGB) // %3 + : "+r"(src_ab64), // %0 + "+r"(dst_argb), // %1 + "+r"(width) // %2 + : "r"(&kShuffleAB64ToARGB) // %3 : "cc", "memory", "q0", "q1", "q2", "q3", "q4"); } -void BGRAToYRow_NEON(const uint8_t* src_bgra, uint8_t* dst_y, int width) { +struct RgbConstants { + uint8_t kRGBToY[4]; + uint16_t kAddY; + uint16_t pad; +}; + +// RGB to JPeg coefficients +// B * 0.1140 coefficient = 29 +// G * 0.5870 coefficient = 150 +// R * 0.2990 coefficient = 77 +// Add 0.5 = 0x80 +static const struct RgbConstants kRgb24JPEGConstants = {{29, 150, 77, 0}, + 128, + 0}; + +static const struct RgbConstants kRawJPEGConstants = {{77, 150, 29, 0}, 128, 0}; + +// RGB to BT.601 coefficients +// B * 0.1016 coefficient = 25 +// G * 0.5078 coefficient = 129 +// R * 0.2578 coefficient = 66 +// Add 16.5 = 0x1080 + +static const struct RgbConstants kRgb24I601Constants = {{25, 129, 66, 0}, + 0x1080, + 0}; + +static const struct RgbConstants kRawI601Constants = {{66, 129, 25, 0}, + 0x1080, + 0}; + +// ARGB expects first 3 values to contain RGB and 4th value is ignored. +void ARGBToYMatrixRow_NEON(const uint8_t* src_argb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile( - "vmov.u8 d6, #25 \n" // B * 0.1016 coefficient - "vmov.u8 d5, #129 \n" // G * 0.5078 coefficient - "vmov.u8 d4, #66 \n" // R * 0.2578 coefficient - "vmov.u8 d7, #16 \n" // Add 16 constant + "vld1.8 {d0}, [%3] \n" // load rgbconstants + "vdup.u8 d20, d0[0] \n" + "vdup.u8 d21, d0[1] \n" + "vdup.u8 d22, d0[2] \n" + "vdup.u16 q12, d0[2] \n" "1: \n" - "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of BGRA. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q8, d1, d4 \n" // R - "vmlal.u8 q8, d2, d5 \n" // G - "vmlal.u8 q8, d3, d6 \n" // B - "vqrshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit Y - "vqadd.u8 d0, d7 \n" - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. + "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 16 pixels of ARGB + "vld4.8 {d1, d3, d5, d7}, [%0]! \n" + "subs %2, %2, #16 \n" // 16 processed per loop. + "vmull.u8 q8, d0, d20 \n" // B + "vmull.u8 q9, d1, d20 \n" + "vmlal.u8 q8, d2, d21 \n" // G + "vmlal.u8 q9, d3, d21 \n" + "vmlal.u8 q8, d4, d22 \n" // R + "vmlal.u8 q9, d5, d22 \n" + "vaddhn.u16 d0, q8, q12 \n" // 16 bit to 8 bit Y + "vaddhn.u16 d1, q9, q12 \n" + "vst1.8 {d0, d1}, [%1]! \n" // store 16 pixels Y. "bgt 1b \n" - : "+r"(src_bgra), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"); + : "+r"(src_argb), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "r"(rgbconstants) // %3 + : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "d20", "d21", "d22", + "q12"); +} + +void ARGBToYRow_NEON(const uint8_t* src_argb, uint8_t* dst_y, int width) { + ARGBToYMatrixRow_NEON(src_argb, dst_y, width, &kRgb24I601Constants); +} + +void ARGBToYJRow_NEON(const uint8_t* src_argb, uint8_t* dst_yj, int width) { + ARGBToYMatrixRow_NEON(src_argb, dst_yj, width, &kRgb24JPEGConstants); } void ABGRToYRow_NEON(const uint8_t* src_abgr, uint8_t* dst_y, int width) { + ARGBToYMatrixRow_NEON(src_abgr, dst_y, width, &kRawI601Constants); +} + +void ABGRToYJRow_NEON(const uint8_t* src_abgr, uint8_t* dst_yj, int width) { + ARGBToYMatrixRow_NEON(src_abgr, dst_yj, width, &kRawJPEGConstants); +} + +// RGBA expects first value to be A and ignored, then 3 values to contain RGB. +// Same code as ARGB, except the LD4 +void RGBAToYMatrixRow_NEON(const uint8_t* src_rgba, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile( - "vmov.u8 d6, #25 \n" // B * 0.1016 coefficient - "vmov.u8 d5, #129 \n" // G * 0.5078 coefficient - "vmov.u8 d4, #66 \n" // R * 0.2578 coefficient - "vmov.u8 d7, #16 \n" // Add 16 constant + "vld1.8 {d0}, [%3] \n" // load rgbconstants + "vdup.u8 d20, d0[0] \n" + "vdup.u8 d21, d0[1] \n" + "vdup.u8 d22, d0[2] \n" + "vdup.u16 q12, d0[2] \n" "1: \n" - "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of ABGR. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q8, d0, d4 \n" // R - "vmlal.u8 q8, d1, d5 \n" // G - "vmlal.u8 q8, d2, d6 \n" // B - "vqrshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit Y - "vqadd.u8 d0, d7 \n" - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. + "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 16 pixels of RGBA + "vld4.8 {d1, d3, d5, d7}, [%0]! \n" + "subs %2, %2, #16 \n" // 16 processed per loop. + "vmull.u8 q8, d2, d20 \n" // B + "vmull.u8 q9, d3, d20 \n" + "vmlal.u8 q8, d4, d21 \n" // G + "vmlal.u8 q9, d5, d21 \n" + "vmlal.u8 q8, d6, d22 \n" // R + "vmlal.u8 q9, d7, d22 \n" + "vaddhn.u16 d0, q8, q12 \n" // 16 bit to 8 bit Y + "vaddhn.u16 d1, q9, q12 \n" + "vst1.8 {d0, d1}, [%1]! \n" // store 16 pixels Y. "bgt 1b \n" - : "+r"(src_abgr), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"); + : "+r"(src_rgba), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "r"(rgbconstants) // %3 + : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "d20", "d21", "d22", + "q12"); } void RGBAToYRow_NEON(const uint8_t* src_rgba, uint8_t* dst_y, int width) { - asm volatile( - "vmov.u8 d4, #25 \n" // B * 0.1016 coefficient - "vmov.u8 d5, #129 \n" // G * 0.5078 coefficient - "vmov.u8 d6, #66 \n" // R * 0.2578 coefficient - "vmov.u8 d7, #16 \n" // Add 16 constant - "1: \n" - "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of RGBA. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q8, d1, d4 \n" // B - "vmlal.u8 q8, d2, d5 \n" // G - "vmlal.u8 q8, d3, d6 \n" // R - "vqrshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit Y - "vqadd.u8 d0, d7 \n" - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. - "bgt 1b \n" - : "+r"(src_rgba), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"); + RGBAToYMatrixRow_NEON(src_rgba, dst_y, width, &kRgb24I601Constants); } -void RGB24ToYRow_NEON(const uint8_t* src_rgb24, uint8_t* dst_y, int width) { - asm volatile( - "vmov.u8 d4, #25 \n" // B * 0.1016 coefficient - "vmov.u8 d5, #129 \n" // G * 0.5078 coefficient - "vmov.u8 d6, #66 \n" // R * 0.2578 coefficient - "vmov.u8 d7, #16 \n" // Add 16 constant - "1: \n" - "vld3.8 {d0, d1, d2}, [%0]! \n" // load 8 pixels of RGB24. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q8, d0, d4 \n" // B - "vmlal.u8 q8, d1, d5 \n" // G - "vmlal.u8 q8, d2, d6 \n" // R - "vqrshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit Y - "vqadd.u8 d0, d7 \n" - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. - "bgt 1b \n" - : "+r"(src_rgb24), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"); +void RGBAToYJRow_NEON(const uint8_t* src_rgba, uint8_t* dst_yj, int width) { + RGBAToYMatrixRow_NEON(src_rgba, dst_yj, width, &kRgb24JPEGConstants); } -void RAWToYRow_NEON(const uint8_t* src_raw, uint8_t* dst_y, int width) { +void BGRAToYRow_NEON(const uint8_t* src_bgra, uint8_t* dst_y, int width) { + RGBAToYMatrixRow_NEON(src_bgra, dst_y, width, &kRawI601Constants); +} + +void RGBToYMatrixRow_NEON(const uint8_t* src_rgb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile( - "vmov.u8 d6, #25 \n" // B * 0.1016 coefficient - "vmov.u8 d5, #129 \n" // G * 0.5078 coefficient - "vmov.u8 d4, #66 \n" // R * 0.2578 coefficient - "vmov.u8 d7, #16 \n" // Add 16 constant + "vld1.8 {d0}, [%3] \n" // load rgbconstants + "vdup.u8 d20, d0[0] \n" + "vdup.u8 d21, d0[1] \n" + "vdup.u8 d22, d0[2] \n" + "vdup.u16 q12, d0[2] \n" "1: \n" - "vld3.8 {d0, d1, d2}, [%0]! \n" // load 8 pixels of RAW. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q8, d0, d4 \n" // B - "vmlal.u8 q8, d1, d5 \n" // G - "vmlal.u8 q8, d2, d6 \n" // R - "vqrshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit Y - "vqadd.u8 d0, d7 \n" - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. + "vld3.8 {d2, d4, d6}, [%0]! \n" // load 16 pixels of + // RGB24. + "vld3.8 {d3, d5, d7}, [%0]! \n" + "subs %2, %2, #16 \n" // 16 processed per loop. + "vmull.u8 q8, d2, d20 \n" // B + "vmull.u8 q9, d3, d20 \n" + "vmlal.u8 q8, d4, d21 \n" // G + "vmlal.u8 q9, d5, d21 \n" + "vmlal.u8 q8, d6, d22 \n" // R + "vmlal.u8 q9, d7, d22 \n" + "vaddhn.u16 d0, q8, q12 \n" // 16 bit to 8 bit Y + "vaddhn.u16 d1, q9, q12 \n" + "vst1.8 {d0, d1}, [%1]! \n" // store 16 pixels Y. "bgt 1b \n" - : "+r"(src_raw), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"); + : "+r"(src_rgb), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "r"(rgbconstants) // %3 + : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "d20", "d21", "d22", + "q12"); } void RGB24ToYJRow_NEON(const uint8_t* src_rgb24, uint8_t* dst_yj, int width) { - asm volatile( - "vmov.u8 d4, #29 \n" // B * 0.1140 coefficient - "vmov.u8 d5, #150 \n" // G * 0.5870 coefficient - "vmov.u8 d6, #77 \n" // R * 0.2990 coefficient - "1: \n" - "vld3.8 {d0, d1, d2}, [%0]! \n" // load 8 pixels of RGB24. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q4, d0, d4 \n" // B - "vmlal.u8 q4, d1, d5 \n" // G - "vmlal.u8 q4, d2, d6 \n" // R - "vqrshrn.u16 d0, q4, #8 \n" // 16 bit to 8 bit Y - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. - "bgt 1b \n" - : "+r"(src_rgb24), // %0 - "+r"(dst_yj), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "q4"); + RGBToYMatrixRow_NEON(src_rgb24, dst_yj, width, &kRgb24JPEGConstants); } void RAWToYJRow_NEON(const uint8_t* src_raw, uint8_t* dst_yj, int width) { - asm volatile( - "vmov.u8 d6, #29 \n" // B * 0.1140 coefficient - "vmov.u8 d5, #150 \n" // G * 0.5870 coefficient - "vmov.u8 d4, #77 \n" // R * 0.2990 coefficient - "1: \n" - "vld3.8 {d0, d1, d2}, [%0]! \n" // load 8 pixels of RAW. - "subs %2, %2, #8 \n" // 8 processed per loop. - "vmull.u8 q4, d0, d4 \n" // R - "vmlal.u8 q4, d1, d5 \n" // G - "vmlal.u8 q4, d2, d6 \n" // B - "vqrshrn.u16 d0, q4, #8 \n" // 16 bit to 8 bit Y - "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y. - "bgt 1b \n" - : "+r"(src_raw), // %0 - "+r"(dst_yj), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "q4"); + RGBToYMatrixRow_NEON(src_raw, dst_yj, width, &kRawJPEGConstants); +} + +void RGB24ToYRow_NEON(const uint8_t* src_rgb24, uint8_t* dst_y, int width) { + RGBToYMatrixRow_NEON(src_rgb24, dst_y, width, &kRgb24I601Constants); +} + +void RAWToYRow_NEON(const uint8_t* src_raw, uint8_t* dst_y, int width) { + RGBToYMatrixRow_NEON(src_raw, dst_y, width, &kRawI601Constants); } // Bilinear filter 16x2 -> 16x1 @@ -2634,6 +2891,66 @@ void InterpolateRow_NEON(uint8_t* dst_ptr, : "cc", "memory", "q0", "q1", "d4", "d5", "q13", "q14"); } +// Bilinear filter 8x2 -> 8x1 +void InterpolateRow_16_NEON(uint16_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int dst_width, + int source_y_fraction) { + int y1_fraction = source_y_fraction; + int y0_fraction = 256 - y1_fraction; + const uint16_t* src_ptr1 = src_ptr + src_stride; + + asm volatile( + "cmp %4, #0 \n" + "beq 100f \n" + "cmp %4, #128 \n" + "beq 50f \n" + + "vdup.16 d17, %4 \n" + "vdup.16 d16, %5 \n" + // General purpose row blend. + "1: \n" + "vld1.16 {q0}, [%1]! \n" + "vld1.16 {q1}, [%2]! \n" + "subs %3, %3, #8 \n" + "vmull.u16 q2, d0, d16 \n" + "vmull.u16 q3, d1, d16 \n" + "vmlal.u16 q2, d2, d17 \n" + "vmlal.u16 q3, d3, d17 \n" + "vrshrn.u32 d0, q2, #8 \n" + "vrshrn.u32 d1, q3, #8 \n" + "vst1.16 {q0}, [%0]! \n" + "bgt 1b \n" + "b 99f \n" + + // Blend 50 / 50. + "50: \n" + "vld1.16 {q0}, [%1]! \n" + "vld1.16 {q1}, [%2]! \n" + "subs %3, %3, #8 \n" + "vrhadd.u16 q0, q1 \n" + "vst1.16 {q0}, [%0]! \n" + "bgt 50b \n" + "b 99f \n" + + // Blend 100 / 0 - Copy row unchanged. + "100: \n" + "vld1.16 {q0}, [%1]! \n" + "subs %3, %3, #8 \n" + "vst1.16 {q0}, [%0]! \n" + "bgt 100b \n" + + "99: \n" + : "+r"(dst_ptr), // %0 + "+r"(src_ptr), // %1 + "+r"(src_ptr1), // %2 + "+r"(dst_width) // %3 + : "r"(y1_fraction), // %4 + "r"(y0_fraction) // %5 + : "cc", "memory", "q0", "q1", "q2", "q3", "q8"); +} + // dr * (256 - sa) / 256 + sr = dr - dr * sa / 256 + sr void ARGBBlendRow_NEON(const uint8_t* src_argb, const uint8_t* src_argb1, @@ -3518,7 +3835,7 @@ void MultiplyRow_16_NEON(const uint16_t* src_y, int scale, int width) { asm volatile( - "vdup.16 q2, %2 \n" + "vdup.16 q2, %3 \n" "1: \n" "vld1.16 {q0}, [%0]! \n" "vld1.16 {q1}, [%0]! \n" @@ -3526,13 +3843,12 @@ void MultiplyRow_16_NEON(const uint16_t* src_y, "vmul.u16 q1, q1, q2 \n" "vst1.16 {q0}, [%1]! \n" "vst1.16 {q1}, [%1]! \n" - "subs %3, %3, #16 \n" // 16 src pixels per loop + "subs %2, %2, #16 \n" // 16 src pixels per loop "bgt 1b \n" : "+r"(src_y), // %0 "+r"(dst_y), // %1 - "+r"(scale), // %2 - "+r"(width) // %3 - : + "+r"(width) // %2 + : "r"(scale) // %3 : "cc", "memory", "q0", "q1", "q2"); } @@ -3541,7 +3857,7 @@ void DivideRow_16_NEON(const uint16_t* src_y, int scale, int width) { asm volatile( - "vdup.16 q0, %2 \n" + "vdup.16 q0, %3 \n" "1: \n" "vld1.16 {q1}, [%0]! \n" "vld1.16 {q2}, [%0]! \n" @@ -3559,16 +3875,44 @@ void DivideRow_16_NEON(const uint16_t* src_y, "vmovn.u32 d5, q2 \n" "vst1.16 {q1}, [%1]! \n" "vst1.16 {q2}, [%1]! \n" - "subs %3, %3, #16 \n" // 16 src pixels per loop + "subs %2, %2, #16 \n" // 16 src pixels per loop "bgt 1b \n" : "+r"(src_y), // %0 "+r"(dst_y), // %1 - "+r"(scale), // %2 - "+r"(width) // %3 - : + "+r"(width) // %2 + : "r"(scale) // %3 : "cc", "memory", "q0", "q1", "q2", "q3", "q4"); } +// Use scale to convert lsb formats to msb, depending how many bits there are: +// 32768 = 9 bits = shr 1 +// 16384 = 10 bits = shr 2 +// 4096 = 12 bits = shr 4 +// 256 = 16 bits = shr 8 +void Convert16To8Row_NEON(const uint16_t* src_y, + uint8_t* dst_y, + int scale, + int width) { + int shift = 15 - __builtin_clz((int32_t)scale); // Negative shl is shr + asm volatile( + "vdup.16 q2, %3 \n" + "1: \n" + "vld1.16 {q0}, [%0]! \n" + "vld1.16 {q1}, [%0]! \n" + "vshl.u16 q0, q0, q2 \n" // shr = q2 is negative + "vshl.u16 q1, q1, q2 \n" + "vqmovn.u16 d0, q0 \n" + "vqmovn.u16 d1, q1 \n" + "subs %2, %2, #16 \n" // 16 src pixels per loop + "vst1.8 {q0}, [%1]! \n" + "bgt 1b \n" + : "+r"(src_y), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "r"(shift) // %3 + : "cc", "memory", "q0", "q1", "q2"); +} + #endif // !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__).. #ifdef __cplusplus diff --git a/third-party/libyuv/third_party/libyuv/source/row_neon64.cc b/third-party/libyuv/third_party/libyuv/source/row_neon64.cc index da7e3c7cd4..85d1c1b9a0 100644 --- a/third-party/libyuv/third_party/libyuv/source/row_neon64.cc +++ b/third-party/libyuv/third_party/libyuv/source/row_neon64.cc @@ -15,6 +15,10 @@ namespace libyuv { extern "C" { #endif +// Enable LIBYUV_USE_ST2, LIBYUV_USE_ST3, LIBYUV_USE_ST4 for CPUs that prefer +// STn over ZIP1+ST1 +// Exynos M1, M2, M3 are slow with ST2, ST3 and ST4 instructions. + // This module is for GCC Neon armv8 64 bit. #if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__) @@ -138,6 +142,29 @@ void I444ToARGBRow_NEON(const uint8_t* src_y, : "cc", "memory", YUVTORGB_REGS, "v19"); } +void I444ToRGB24Row_NEON(const uint8_t* src_y, + const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile( + YUVTORGB_SETUP + "1: \n" READYUV444 YUVTORGB + RGBTORGB8 + "subs %w[width], %w[width], #8 \n" + "st3 {v16.8b,v17.8b,v18.8b}, [%[dst_rgb24]], #24 \n" + "b.gt 1b \n" + : [src_y] "+r"(src_y), // %[src_y] + [src_u] "+r"(src_u), // %[src_u] + [src_v] "+r"(src_v), // %[src_v] + [dst_rgb24] "+r"(dst_rgb24), // %[dst_rgb24] + [width] "+r"(width) // %[width] + : [kUVCoeff] "r"(&yuvconstants->kUVCoeff), // %[kUVCoeff] + [kRGBCoeffBias] "r"(&yuvconstants->kRGBCoeffBias) // %[kRGBCoeffBias] + : "cc", "memory", YUVTORGB_REGS); +} + void I422ToARGBRow_NEON(const uint8_t* src_y, const uint8_t* src_u, const uint8_t* src_v, @@ -382,6 +409,7 @@ void I400ToARGBRow_NEON(const uint8_t* src_y, : "cc", "memory", YUVTORGB_REGS, "v19"); } +#if LIBYUV_USE_ST4 void J400ToARGBRow_NEON(const uint8_t* src_y, uint8_t* dst_argb, int width) { asm volatile( "movi v23.8b, #255 \n" @@ -399,6 +427,27 @@ void J400ToARGBRow_NEON(const uint8_t* src_y, uint8_t* dst_argb, int width) { : : "cc", "memory", "v20", "v21", "v22", "v23"); } +#else +void J400ToARGBRow_NEON(const uint8_t* src_y, uint8_t* dst_argb, int width) { + asm volatile( + "movi v20.8b, #255 \n" + "1: \n" + "ldr d16, [%0], #8 \n" + "subs %w2, %w2, #8 \n" + "zip1 v18.16b, v16.16b, v16.16b \n" // YY + "zip1 v19.16b, v16.16b, v20.16b \n" // YA + "prfm pldl1keep, [%0, 448] \n" + "zip1 v16.16b, v18.16b, v19.16b \n" // YYYA + "zip2 v17.16b, v18.16b, v19.16b \n" + "stp q16, q17, [%1], #32 \n" + "b.gt 1b \n" + : "+r"(src_y), // %0 + "+r"(dst_argb), // %1 + "+r"(width) // %2 + : + : "cc", "memory", "v16", "v17", "v18", "v19", "v20"); +} +#endif // LIBYUV_USE_ST4 void NV12ToARGBRow_NEON(const uint8_t* src_y, const uint8_t* src_uv, @@ -578,6 +627,129 @@ void SplitUVRow_NEON(const uint8_t* src_uv, ); } +// Reads 16 byte Y's from tile and writes out 16 Y's. +// MM21 Y tiles are 16x32 so src_tile_stride = 512 bytes +// MM21 UV tiles are 8x16 so src_tile_stride = 256 bytes +// width measured in bytes so 8 UV = 16. +void DetileRow_NEON(const uint8_t* src, + ptrdiff_t src_tile_stride, + uint8_t* dst, + int width) { + asm volatile( + "1: \n" + "ld1 {v0.16b}, [%0], %3 \n" // load 16 bytes + "subs %w2, %w2, #16 \n" // 16 processed per loop + "prfm pldl1keep, [%0, 1792] \n" // 7 tiles of 256b ahead + "st1 {v0.16b}, [%1], #16 \n" // store 16 bytes + "b.gt 1b \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "r"(src_tile_stride) // %3 + : "cc", "memory", "v0" // Clobber List + ); +} + +// Reads 16 byte Y's of 16 bits from tile and writes out 16 Y's. +void DetileRow_16_NEON(const uint16_t* src, + ptrdiff_t src_tile_stride, + uint16_t* dst, + int width) { + asm volatile( + "1: \n" + "ld1 {v0.8h,v1.8h}, [%0], %3 \n" // load 16 pixels + "subs %w2, %w2, #16 \n" // 16 processed per loop + "prfm pldl1keep, [%0, 3584] \n" // 7 tiles of 512b ahead + "st1 {v0.8h,v1.8h}, [%1], #32 \n" // store 16 pixels + "b.gt 1b \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "r"(src_tile_stride * 2) // %3 + : "cc", "memory", "v0", "v1" // Clobber List + ); +} + +// Read 16 bytes of UV, detile, and write 8 bytes of U and 8 bytes of V. +void DetileSplitUVRow_NEON(const uint8_t* src_uv, + ptrdiff_t src_tile_stride, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + asm volatile( + "1: \n" + "ld2 {v0.8b,v1.8b}, [%0], %4 \n" + "subs %w3, %w3, #16 \n" + "prfm pldl1keep, [%0, 1792] \n" + "st1 {v0.8b}, [%1], #8 \n" + "st1 {v1.8b}, [%2], #8 \n" + "b.gt 1b \n" + : "+r"(src_uv), // %0 + "+r"(dst_u), // %1 + "+r"(dst_v), // %2 + "+r"(width) // %3 + : "r"(src_tile_stride) // %4 + : "cc", "memory", "v0", "v1" // Clobber List + ); +} + +#if LIBYUV_USE_ST2 +// Read 16 Y, 8 UV, and write 8 YUY2 +void DetileToYUY2_NEON(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width) { + asm volatile( + "1: \n" + "ld1 {v0.16b}, [%0], %4 \n" // load 16 Ys + "prfm pldl1keep, [%0, 1792] \n" + "ld1 {v1.16b}, [%1], %5 \n" // load 8 UVs + "prfm pldl1keep, [%1, 1792] \n" + "subs %w3, %w3, #16 \n" // store 8 YUY2 + "st2 {v0.16b,v1.16b}, [%2], #32 \n" + "b.gt 1b \n" + : "+r"(src_y), // %0 + "+r"(src_uv), // %1 + "+r"(dst_yuy2), // %2 + "+r"(width) // %3 + : "r"(src_y_tile_stride), // %4 + "r"(src_uv_tile_stride) // %5 + : "cc", "memory", "v0", "v1" // Clobber list + ); +} +#else +// Read 16 Y, 8 UV, and write 8 YUY2 +void DetileToYUY2_NEON(const uint8_t* src_y, + ptrdiff_t src_y_tile_stride, + const uint8_t* src_uv, + ptrdiff_t src_uv_tile_stride, + uint8_t* dst_yuy2, + int width) { + asm volatile( + "1: \n" + "ld1 {v0.16b}, [%0], %4 \n" // load 16 Ys + "ld1 {v1.16b}, [%1], %5 \n" // load 8 UVs + "subs %w3, %w3, #16 \n" + "prfm pldl1keep, [%0, 1792] \n" + "zip1 v2.16b, v0.16b, v1.16b \n" + "prfm pldl1keep, [%1, 1792] \n" + "zip2 v3.16b, v0.16b, v1.16b \n" + "st1 {v2.16b,v3.16b}, [%2], #32 \n" // store 8 YUY2 + "b.gt 1b \n" + : "+r"(src_y), // %0 + "+r"(src_uv), // %1 + "+r"(dst_yuy2), // %2 + "+r"(width) // %3 + : "r"(src_y_tile_stride), // %4 + "r"(src_uv_tile_stride) // %5 + : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber list + ); +} +#endif + +#if LIBYUV_USE_ST2 // Reads 16 U's and V's and writes out 16 pairs of UV. void MergeUVRow_NEON(const uint8_t* src_u, const uint8_t* src_v, @@ -601,6 +773,86 @@ void MergeUVRow_NEON(const uint8_t* src_u, ); } +void MergeUVRow_16_NEON(const uint16_t* src_u, + const uint16_t* src_v, + uint16_t* dst_uv, + int depth, + int width) { + int shift = 16 - depth; + asm volatile( + "dup v2.8h, %w4 \n" + "1: \n" + "ld1 {v0.8h}, [%0], #16 \n" // load 8 U + "subs %w3, %w3, #8 \n" // 8 src pixels per loop + "ld1 {v1.8h}, [%1], #16 \n" // load 8 V + "ushl v0.8h, v0.8h, v2.8h \n" + "prfm pldl1keep, [%0, 448] \n" + "ushl v1.8h, v1.8h, v2.8h \n" + "prfm pldl1keep, [%1, 448] \n" + "st2 {v0.8h, v1.8h}, [%2], #32 \n" // store 8 UV pixels + "b.gt 1b \n" + : "+r"(src_u), // %0 + "+r"(src_v), // %1 + "+r"(dst_uv), // %2 + "+r"(width) // %3 + : "r"(shift) // %4 + : "cc", "memory", "v0", "v1", "v2"); +} +#else +// Reads 16 U's and V's and writes out 16 pairs of UV. +void MergeUVRow_NEON(const uint8_t* src_u, + const uint8_t* src_v, + uint8_t* dst_uv, + int width) { + asm volatile( + "1: \n" + "ld1 {v0.16b}, [%0], #16 \n" // load U + "ld1 {v1.16b}, [%1], #16 \n" // load V + "subs %w3, %w3, #16 \n" // 16 processed per loop + "zip1 v2.16b, v0.16b, v1.16b \n" + "prfm pldl1keep, [%0, 448] \n" + "zip2 v3.16b, v0.16b, v1.16b \n" + "prfm pldl1keep, [%1, 448] \n" + "st1 {v2.16b,v3.16b}, [%2], #32 \n" // store 16 pairs of UV + "b.gt 1b \n" + : "+r"(src_u), // %0 + "+r"(src_v), // %1 + "+r"(dst_uv), // %2 + "+r"(width) // %3 // Output registers + : // Input registers + : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List + ); +} + +void MergeUVRow_16_NEON(const uint16_t* src_u, + const uint16_t* src_v, + uint16_t* dst_uv, + int depth, + int width) { + int shift = 16 - depth; + asm volatile( + "dup v4.8h, %w4 \n" + "1: \n" + "ld1 {v0.8h}, [%0], #16 \n" // load 8 U + "subs %w3, %w3, #8 \n" // 8 src pixels per loop + "ld1 {v1.8h}, [%1], #16 \n" // load 8 V + "ushl v0.8h, v0.8h, v4.8h \n" + "ushl v1.8h, v1.8h, v4.8h \n" + "prfm pldl1keep, [%0, 448] \n" + "zip1 v2.8h, v0.8h, v1.8h \n" + "zip2 v3.8h, v0.8h, v1.8h \n" + "prfm pldl1keep, [%1, 448] \n" + "st1 {v2.8h, v3.8h}, [%2], #32 \n" // store 8 UV pixels + "b.gt 1b \n" + : "+r"(src_u), // %0 + "+r"(src_v), // %1 + "+r"(dst_uv), // %2 + "+r"(width) // %3 + : "r"(shift) // %4 + : "cc", "memory", "v0", "v1", "v2", "v1", "v2", "v3", "v4"); +} +#endif // LIBYUV_USE_ST2 + // Reads 16 packed RGB and write to planar dst_r, dst_g, dst_b. void SplitRGBRow_NEON(const uint8_t* src_rgb, uint8_t* dst_r, @@ -681,6 +933,7 @@ void SplitARGBRow_NEON(const uint8_t* src_rgba, ); } +#if LIBYUV_USE_ST4 // Reads 16 planar R's, G's, B's and A's and writes out 16 packed ARGB at a time void MergeARGBRow_NEON(const uint8_t* src_r, const uint8_t* src_g, @@ -690,9 +943,9 @@ void MergeARGBRow_NEON(const uint8_t* src_r, int width) { asm volatile( "1: \n" - "ld1 {v2.16b}, [%0], #16 \n" // load R - "ld1 {v1.16b}, [%1], #16 \n" // load G "ld1 {v0.16b}, [%2], #16 \n" // load B + "ld1 {v1.16b}, [%1], #16 \n" // load G + "ld1 {v2.16b}, [%0], #16 \n" // load R "ld1 {v3.16b}, [%3], #16 \n" // load A "subs %w5, %w5, #16 \n" // 16 processed per loop "prfm pldl1keep, [%0, 448] \n" @@ -711,6 +964,47 @@ void MergeARGBRow_NEON(const uint8_t* src_r, : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List ); } +#else +// Reads 16 planar R's, G's, B's and A's and writes out 16 packed ARGB at a time +void MergeARGBRow_NEON(const uint8_t* src_r, + const uint8_t* src_g, + const uint8_t* src_b, + const uint8_t* src_a, + uint8_t* dst_argb, + int width) { + asm volatile( + "1: \n" + "ld1 {v0.16b}, [%2], #16 \n" // load B + "ld1 {v1.16b}, [%1], #16 \n" // load G + "ld1 {v2.16b}, [%0], #16 \n" // load R + "ld1 {v3.16b}, [%3], #16 \n" // load A + "subs %w5, %w5, #16 \n" // 16 processed per loop + "prfm pldl1keep, [%2, 448] \n" + "zip1 v4.16b, v0.16b, v1.16b \n" // BG + "zip1 v5.16b, v2.16b, v3.16b \n" // RA + "prfm pldl1keep, [%1, 448] \n" + "zip2 v6.16b, v0.16b, v1.16b \n" // BG + "zip2 v7.16b, v2.16b, v3.16b \n" // RA + "prfm pldl1keep, [%0, 448] \n" + "zip1 v0.8h, v4.8h, v5.8h \n" // BGRA + "zip2 v1.8h, v4.8h, v5.8h \n" + "prfm pldl1keep, [%3, 448] \n" + "zip1 v2.8h, v6.8h, v7.8h \n" + "zip2 v3.8h, v6.8h, v7.8h \n" + "st1 {v0.16b,v1.16b,v2.16b,v3.16b}, [%4], #64 \n" // store 16ARGB + "b.gt 1b \n" + : "+r"(src_r), // %0 + "+r"(src_g), // %1 + "+r"(src_b), // %2 + "+r"(src_a), // %3 + "+r"(dst_argb), // %4 + "+r"(width) // %5 + : // Input registers + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", + "v7" // Clobber List + ); +} +#endif // LIBYUV_USE_ST4 // Reads 16 packed ARGB and write to planar dst_r, dst_g, dst_b. void SplitXRGBRow_NEON(const uint8_t* src_rgba, @@ -1373,17 +1667,16 @@ void ARGBToRGB24Row_NEON(const uint8_t* src_argb, int width) { asm volatile( "1: \n" - "ld4 {v1.8b,v2.8b,v3.8b,v4.8b}, [%0], #32 \n" // load 8 ARGB - "subs %w2, %w2, #8 \n" // 8 processed per loop. + "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 ARGB + "subs %w2, %w2, #16 \n" // 16 pixels per loop. "prfm pldl1keep, [%0, 448] \n" - "st3 {v1.8b,v2.8b,v3.8b}, [%1], #24 \n" // store 8 pixels of - // RGB24 + "st3 {v0.16b,v1.16b,v2.16b}, [%1], #48 \n" // store 8 RGB24 "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_rgb24), // %1 "+r"(width) // %2 : - : "cc", "memory", "v1", "v2", "v3", "v4" // Clobber List + : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List ); } @@ -1535,6 +1828,29 @@ void UYVYToUVRow_NEON(const uint8_t* src_uyvy, ); } +void YUY2ToNVUVRow_NEON(const uint8_t* src_yuy2, + int stride_yuy2, + uint8_t* dst_uv, + int width) { + const uint8_t* src_yuy2b = src_yuy2 + stride_yuy2; + asm volatile( + "1: \n" + "ld2 {v0.16b,v1.16b}, [%0], #32 \n" // load 16 pixels + "subs %w3, %w3, #16 \n" // 16 pixels = 8 UVs. + "ld2 {v2.16b,v3.16b}, [%1], #32 \n" // load next row + "urhadd v4.16b, v1.16b, v3.16b \n" // average rows of UV + "prfm pldl1keep, [%0, 448] \n" + "st1 {v4.16b}, [%2], #16 \n" // store 8 UV. + "b.gt 1b \n" + : "+r"(src_yuy2), // %0 + "+r"(src_yuy2b), // %1 + "+r"(dst_uv), // %2 + "+r"(width) // %3 + : + : "cc", "memory", "v0", "v1", "v2", "v3", "v4" // Clobber List + ); +} + // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. void ARGBShuffleRow_NEON(const uint8_t* src_argb, uint8_t* dst_argb, @@ -1684,9 +2000,7 @@ void ARGBToARGB4444Row_NEON(const uint8_t* src_argb, : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v23"); } -static const uvec8 kShuffleARGBToABGR = {2, 1, 0, 3, 6, 5, 4, 7, - 10, 9, 8, 11, 14, 13, 12, 15}; - +#if LIBYUV_USE_ST2 void ARGBToAR64Row_NEON(const uint8_t* src_argb, uint16_t* dst_ar64, int width) { @@ -1707,11 +2021,14 @@ void ARGBToAR64Row_NEON(const uint8_t* src_argb, : "cc", "memory", "v0", "v1", "v2", "v3"); } +static const uvec8 kShuffleARGBToABGR = {2, 1, 0, 3, 6, 5, 4, 7, + 10, 9, 8, 11, 14, 13, 12, 15}; + void ARGBToAB64Row_NEON(const uint8_t* src_argb, uint16_t* dst_ab64, int width) { asm volatile( - "ld1 {v4.16b}, %3 \n" // shuffler + "ldr q4, [%3] \n" // shuffler "1: \n" "ldp q0, q2, [%0], #32 \n" // load 8 pixels "tbl v0.16b, {v0.16b}, v4.16b \n" @@ -1723,12 +2040,60 @@ void ARGBToAB64Row_NEON(const uint8_t* src_argb, "st2 {v0.16b, v1.16b}, [%1], #32 \n" // store 4 pixels "st2 {v2.16b, v3.16b}, [%1], #32 \n" // store 4 pixels "b.gt 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_ab64), // %1 - "+r"(width) // %2 - : "m"(kShuffleARGBToABGR) // %3 + : "+r"(src_argb), // %0 + "+r"(dst_ab64), // %1 + "+r"(width) // %2 + : "r"(&kShuffleARGBToABGR) // %3 : "cc", "memory", "v0", "v1", "v2", "v3", "v4"); } +#else +void ARGBToAR64Row_NEON(const uint8_t* src_argb, + uint16_t* dst_ar64, + int width) { + asm volatile( + "1: \n" + "ldp q0, q1, [%0], #32 \n" // load 8 ARGB pixels + "subs %w2, %w2, #8 \n" // 8 processed per loop. + "zip1 v2.16b, v0.16b, v0.16b \n" + "zip2 v3.16b, v0.16b, v0.16b \n" + "prfm pldl1keep, [%0, 448] \n" + "zip1 v4.16b, v1.16b, v1.16b \n" + "zip2 v5.16b, v1.16b, v1.16b \n" + "st1 {v2.8h, v3.8h, v4.8h, v5.8h}, [%1], #64 \n" // 8 AR64 + "b.gt 1b \n" + : "+r"(src_argb), // %0 + "+r"(dst_ar64), // %1 + "+r"(width) // %2 + : + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5"); +} + +static const uvec8 kShuffleARGBToAB64[2] = { + {2, 2, 1, 1, 0, 0, 3, 3, 6, 6, 5, 5, 4, 4, 7, 7}, + {10, 10, 9, 9, 8, 8, 11, 11, 14, 14, 13, 13, 12, 12, 15, 15}}; + +void ARGBToAB64Row_NEON(const uint8_t* src_argb, + uint16_t* dst_ab64, + int width) { + asm volatile( + "ldp q6, q7, [%3] \n" // 2 shufflers + "1: \n" + "ldp q0, q1, [%0], #32 \n" // load 8 pixels + "subs %w2, %w2, #8 \n" // 8 processed per loop. + "tbl v2.16b, {v0.16b}, v6.16b \n" // ARGB to AB64 + "tbl v3.16b, {v0.16b}, v7.16b \n" + "prfm pldl1keep, [%0, 448] \n" + "tbl v4.16b, {v1.16b}, v6.16b \n" + "tbl v5.16b, {v1.16b}, v7.16b \n" + "st1 {v2.8h, v3.8h, v4.8h, v5.8h}, [%1], #64 \n" // 8 AR64 + "b.gt 1b \n" + : "+r"(src_argb), // %0 + "+r"(dst_ab64), // %1 + "+r"(width) // %2 + : "r"(&kShuffleARGBToAB64[0]) // %3 + : "cc", "memory", "v0", "v1", "v2", "v3", "v4"); +} +#endif // LIBYUV_USE_ST2 static const uvec8 kShuffleAR64ToARGB = {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}; @@ -1737,7 +2102,7 @@ void AR64ToARGBRow_NEON(const uint16_t* src_ar64, uint8_t* dst_argb, int width) { asm volatile( - "ld1 {v4.16b}, %3 \n" // shuffler + "ldr q4, [%3] \n" // shuffler "1: \n" "ldp q0, q1, [%0], #32 \n" // load 4 pixels "ldp q2, q3, [%0], #32 \n" // load 4 pixels @@ -1747,10 +2112,10 @@ void AR64ToARGBRow_NEON(const uint16_t* src_ar64, "subs %w2, %w2, #8 \n" // 8 processed per loop. "stp q0, q2, [%1], #32 \n" // store 8 pixels "b.gt 1b \n" - : "+r"(src_ar64), // %0 - "+r"(dst_argb), // %1 - "+r"(width) // %2 - : "m"(kShuffleAR64ToARGB) // %3 + : "+r"(src_ar64), // %0 + "+r"(dst_argb), // %1 + "+r"(width) // %2 + : "r"(&kShuffleAR64ToARGB) // %3 : "cc", "memory", "v0", "v1", "v2", "v3", "v4"); } @@ -1761,7 +2126,7 @@ void AB64ToARGBRow_NEON(const uint16_t* src_ab64, uint8_t* dst_argb, int width) { asm volatile( - "ld1 {v4.16b}, %3 \n" // shuffler + "ldr q4, [%3] \n" // shuffler "1: \n" "ldp q0, q1, [%0], #32 \n" // load 4 pixels "ldp q2, q3, [%0], #32 \n" // load 4 pixels @@ -1771,37 +2136,13 @@ void AB64ToARGBRow_NEON(const uint16_t* src_ab64, "subs %w2, %w2, #8 \n" // 8 processed per loop. "stp q0, q2, [%1], #32 \n" // store 8 pixels "b.gt 1b \n" - : "+r"(src_ab64), // %0 - "+r"(dst_argb), // %1 - "+r"(width) // %2 - : "m"(kShuffleAB64ToARGB) // %3 + : "+r"(src_ab64), // %0 + "+r"(dst_argb), // %1 + "+r"(width) // %2 + : "r"(&kShuffleAB64ToARGB) // %3 : "cc", "memory", "v0", "v1", "v2", "v3", "v4"); } -void ARGBToYRow_NEON(const uint8_t* src_argb, uint8_t* dst_y, int width) { - asm volatile( - "movi v4.8b, #25 \n" // B * 0.1016 coefficient - "movi v5.8b, #129 \n" // G * 0.5078 coefficient - "movi v6.8b, #66 \n" // R * 0.2578 coefficient - "movi v7.8b, #16 \n" // Add 16 constant - "1: \n" - "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v3.8h, v0.8b, v4.8b \n" // B - "prfm pldl1keep, [%0, 448] \n" - "umlal v3.8h, v1.8b, v5.8b \n" // G - "umlal v3.8h, v2.8b, v6.8b \n" // R - "uqrshrn v0.8b, v3.8h, #8 \n" // 16 bit to 8 bit Y - "uqadd v0.8b, v0.8b, v7.8b \n" - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. - "b.gt 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); -} - void ARGBExtractAlphaRow_NEON(const uint8_t* src_argb, uint8_t* dst_a, int width) { @@ -1820,50 +2161,6 @@ void ARGBExtractAlphaRow_NEON(const uint8_t* src_argb, ); } -void ARGBToYJRow_NEON(const uint8_t* src_argb, uint8_t* dst_y, int width) { - asm volatile( - "movi v4.8b, #29 \n" // B * 0.1140 coefficient - "movi v5.8b, #150 \n" // G * 0.5870 coefficient - "movi v6.8b, #77 \n" // R * 0.2990 coefficient - "1: \n" - "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v3.8h, v0.8b, v4.8b \n" // B - "prfm pldl1keep, [%0, 448] \n" - "umlal v3.8h, v1.8b, v5.8b \n" // G - "umlal v3.8h, v2.8b, v6.8b \n" // R - "uqrshrn v0.8b, v3.8h, #8 \n" // 16 bit to 8 bit Y - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. - "b.gt 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6"); -} - -void RGBAToYJRow_NEON(const uint8_t* src_rgba, uint8_t* dst_y, int width) { - asm volatile( - "movi v4.8b, #29 \n" // B * 0.1140 coefficient - "movi v5.8b, #150 \n" // G * 0.5870 coefficient - "movi v6.8b, #77 \n" // R * 0.2990 coefficient - "1: \n" - "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 RGBA - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v0.8h, v1.8b, v4.8b \n" // B - "prfm pldl1keep, [%0, 448] \n" - "umlal v0.8h, v2.8b, v5.8b \n" // G - "umlal v0.8h, v3.8b, v6.8b \n" // R - "uqrshrn v3.8b, v0.8h, #8 \n" // 16 bit to 8 bit Y - "st1 {v3.8b}, [%1], #8 \n" // store 8 pixels Y. - "b.gt 1b \n" - : "+r"(src_rgba), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6"); -} - // 8x1 pixels. void ARGBToUV444Row_NEON(const uint8_t* src_argb, uint8_t* dst_u, @@ -1881,18 +2178,16 @@ void ARGBToUV444Row_NEON(const uint8_t* src_argb, "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB "subs %w3, %w3, #8 \n" // 8 processed per loop. "umull v4.8h, v0.8b, v24.8b \n" // B - "prfm pldl1keep, [%0, 448] \n" "umlsl v4.8h, v1.8b, v25.8b \n" // G "umlsl v4.8h, v2.8b, v26.8b \n" // R - "add v4.8h, v4.8h, v29.8h \n" // +128 -> unsigned + "prfm pldl1keep, [%0, 448] \n" "umull v3.8h, v2.8b, v24.8b \n" // R "umlsl v3.8h, v1.8b, v28.8b \n" // G "umlsl v3.8h, v0.8b, v27.8b \n" // B - "add v3.8h, v3.8h, v29.8h \n" // +128 -> unsigned - "uqshrn v0.8b, v4.8h, #8 \n" // 16 bit to 8 bit U - "uqshrn v1.8b, v3.8h, #8 \n" // 16 bit to 8 bit V + "addhn v0.8b, v4.8h, v29.8h \n" // +128 -> unsigned + "addhn v1.8b, v3.8h, v29.8h \n" // +128 -> unsigned "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels U. "st1 {v1.8b}, [%2], #8 \n" // store 8 pixels V. @@ -1923,10 +2218,8 @@ void ARGBToUV444Row_NEON(const uint8_t* src_argb, "mls v4.8h, " #QG ",v24.8h \n" /* G */ \ "mls v3.8h, " #QR ",v22.8h \n" /* R */ \ "mls v4.8h, " #QB ",v23.8h \n" /* B */ \ - "add v3.8h, v3.8h, v25.8h \n" /* +128 -> unsigned */ \ - "add v4.8h, v4.8h, v25.8h \n" /* +128 -> unsigned */ \ - "uqshrn v0.8b, v3.8h, #8 \n" /* 16 bit to 8 bit U */ \ - "uqshrn v1.8b, v4.8h, #8 \n" /* 16 bit to 8 bit V */ + "addhn v0.8b, v3.8h, v25.8h \n" /* +128 -> unsigned */ \ + "addhn v1.8b, v4.8h, v25.8h \n" /* +128 -> unsigned */ // clang-format on // TODO(fbarchard): Consider vhadd vertical, then vpaddl horizontal, avoid shr. @@ -1973,6 +2266,7 @@ void ARGBToUVRow_NEON(const uint8_t* src_argb, ); } +// TODO(fbarchard): Subsample match Intel code. void ARGBToUVJRow_NEON(const uint8_t* src_argb, int src_stride_argb, uint8_t* dst_u, @@ -2002,7 +2296,7 @@ void ARGBToUVJRow_NEON(const uint8_t* src_argb, "urshr v1.8h, v1.8h, #1 \n" "urshr v2.8h, v2.8h, #1 \n" - "subs %w4, %w4, #16 \n" // 32 processed per loop. + "subs %w4, %w4, #16 \n" // 16 processed per loop. RGBTOUV(v0.8h, v1.8h, v2.8h) "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. @@ -2018,6 +2312,141 @@ void ARGBToUVJRow_NEON(const uint8_t* src_argb, ); } +void ABGRToUVJRow_NEON(const uint8_t* src_abgr, + int src_stride_abgr, + uint8_t* dst_uj, + uint8_t* dst_vj, + int width) { + const uint8_t* src_abgr_1 = src_abgr + src_stride_abgr; + asm volatile ( + "movi v20.8h, #63, lsl #0 \n" // UB/VR coeff (0.500) / 2 + "movi v21.8h, #42, lsl #0 \n" // UG coeff (-0.33126) / 2 + "movi v22.8h, #21, lsl #0 \n" // UR coeff (-0.16874) / 2 + "movi v23.8h, #10, lsl #0 \n" // VB coeff (-0.08131) / 2 + "movi v24.8h, #53, lsl #0 \n" // VG coeff (-0.41869) / 2 + "movi v25.16b, #0x80 \n" // 128.5 (0x8080 in 16-bit) + "1: \n" + "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels. + "uaddlp v0.8h, v0.16b \n" // R 16 bytes -> 8 shorts. + "prfm pldl1keep, [%0, 448] \n" + "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts. + "uaddlp v2.8h, v2.16b \n" // B 16 bytes -> 8 shorts. + "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%1], #64 \n" // load next 16 + "uadalp v0.8h, v4.16b \n" // R 16 bytes -> 8 shorts. + "prfm pldl1keep, [%1, 448] \n" + "uadalp v1.8h, v5.16b \n" // G 16 bytes -> 8 shorts. + "uadalp v2.8h, v6.16b \n" // B 16 bytes -> 8 shorts. + + "urshr v0.8h, v0.8h, #1 \n" // 2x average + "urshr v1.8h, v1.8h, #1 \n" + "urshr v2.8h, v2.8h, #1 \n" + + "subs %w4, %w4, #16 \n" // 16 processed per loop. + RGBTOUV(v2.8h, v1.8h, v0.8h) + "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. + "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. + "b.gt 1b \n" + : "+r"(src_abgr), // %0 + "+r"(src_abgr_1), // %1 + "+r"(dst_uj), // %2 + "+r"(dst_vj), // %3 + "+r"(width) // %4 + : + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v20", "v21", "v22", "v23", "v24", "v25" + ); +} + +void RGB24ToUVJRow_NEON(const uint8_t* src_rgb24, + int src_stride_rgb24, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + const uint8_t* src_rgb24_1 = src_rgb24 + src_stride_rgb24; + asm volatile ( + "movi v20.8h, #63, lsl #0 \n" // UB/VR coeff (0.500) / 2 + "movi v21.8h, #42, lsl #0 \n" // UG coeff (-0.33126) / 2 + "movi v22.8h, #21, lsl #0 \n" // UR coeff (-0.16874) / 2 + "movi v23.8h, #10, lsl #0 \n" // VB coeff (-0.08131) / 2 + "movi v24.8h, #53, lsl #0 \n" // VG coeff (-0.41869) / 2 + "movi v25.16b, #0x80 \n" // 128.5 (0x8080 in 16-bit) + "1: \n" + "ld3 {v0.16b,v1.16b,v2.16b}, [%0], #48 \n" // load 16 pixels. + "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts. + "prfm pldl1keep, [%0, 448] \n" + "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts. + "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts. + "ld3 {v4.16b,v5.16b,v6.16b}, [%1], #48 \n" // load next 16 + "uadalp v0.8h, v4.16b \n" // B 16 bytes -> 8 shorts. + "prfm pldl1keep, [%1, 448] \n" + "uadalp v1.8h, v5.16b \n" // G 16 bytes -> 8 shorts. + "uadalp v2.8h, v6.16b \n" // R 16 bytes -> 8 shorts. + + "urshr v0.8h, v0.8h, #1 \n" // 2x average + "urshr v1.8h, v1.8h, #1 \n" + "urshr v2.8h, v2.8h, #1 \n" + + "subs %w4, %w4, #16 \n" // 16 processed per loop. + RGBTOUV(v0.8h, v1.8h, v2.8h) + "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. + "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. + "b.gt 1b \n" + : "+r"(src_rgb24), // %0 + "+r"(src_rgb24_1), // %1 + "+r"(dst_u), // %2 + "+r"(dst_v), // %3 + "+r"(width) // %4 + : + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v20", "v21", "v22", "v23", "v24", "v25" + ); +} + +void RAWToUVJRow_NEON(const uint8_t* src_raw, + int src_stride_raw, + uint8_t* dst_u, + uint8_t* dst_v, + int width) { + const uint8_t* src_raw_1 = src_raw + src_stride_raw; + asm volatile ( + "movi v20.8h, #63, lsl #0 \n" // UB/VR coeff (0.500) / 2 + "movi v21.8h, #42, lsl #0 \n" // UG coeff (-0.33126) / 2 + "movi v22.8h, #21, lsl #0 \n" // UR coeff (-0.16874) / 2 + "movi v23.8h, #10, lsl #0 \n" // VB coeff (-0.08131) / 2 + "movi v24.8h, #53, lsl #0 \n" // VG coeff (-0.41869) / 2 + "movi v25.16b, #0x80 \n" // 128.5 (0x8080 in 16-bit) + "1: \n" + "ld3 {v0.16b,v1.16b,v2.16b}, [%0], #48 \n" // load 16 pixels. + "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts. + "prfm pldl1keep, [%0, 448] \n" + "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts. + "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts. + "ld3 {v4.16b,v5.16b,v6.16b}, [%1], #48 \n" // load next 16 + "uadalp v0.8h, v4.16b \n" // B 16 bytes -> 8 shorts. + "prfm pldl1keep, [%1, 448] \n" + "uadalp v1.8h, v5.16b \n" // G 16 bytes -> 8 shorts. + "uadalp v2.8h, v6.16b \n" // R 16 bytes -> 8 shorts. + + "urshr v0.8h, v0.8h, #1 \n" // 2x average + "urshr v1.8h, v1.8h, #1 \n" + "urshr v2.8h, v2.8h, #1 \n" + + "subs %w4, %w4, #16 \n" // 16 processed per loop. + RGBTOUV(v2.8h, v1.8h, v0.8h) + "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. + "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. + "b.gt 1b \n" + : "+r"(src_raw), // %0 + "+r"(src_raw_1), // %1 + "+r"(dst_u), // %2 + "+r"(dst_v), // %3 + "+r"(width) // %4 + : + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v20", "v21", "v22", "v23", "v24", "v25" + ); +} + void BGRAToUVRow_NEON(const uint8_t* src_bgra, int src_stride_bgra, uint8_t* dst_u, @@ -2042,7 +2471,7 @@ void BGRAToUVRow_NEON(const uint8_t* src_bgra, "urshr v1.8h, v3.8h, #1 \n" "urshr v2.8h, v2.8h, #1 \n" - "subs %w4, %w4, #16 \n" // 32 processed per loop. + "subs %w4, %w4, #16 \n" // 16 processed per loop. RGBTOUV(v0.8h, v1.8h, v2.8h) "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. @@ -2082,7 +2511,7 @@ void ABGRToUVRow_NEON(const uint8_t* src_abgr, "urshr v2.8h, v2.8h, #1 \n" "urshr v1.8h, v1.8h, #1 \n" - "subs %w4, %w4, #16 \n" // 32 processed per loop. + "subs %w4, %w4, #16 \n" // 16 processed per loop. RGBTOUV(v0.8h, v2.8h, v1.8h) "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. @@ -2122,7 +2551,7 @@ void RGBAToUVRow_NEON(const uint8_t* src_rgba, "urshr v1.8h, v1.8h, #1 \n" "urshr v2.8h, v2.8h, #1 \n" - "subs %w4, %w4, #16 \n" // 32 processed per loop. + "subs %w4, %w4, #16 \n" // 16 processed per loop. RGBTOUV(v0.8h, v1.8h, v2.8h) "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. @@ -2162,7 +2591,7 @@ void RGB24ToUVRow_NEON(const uint8_t* src_rgb24, "urshr v1.8h, v1.8h, #1 \n" "urshr v2.8h, v2.8h, #1 \n" - "subs %w4, %w4, #16 \n" // 32 processed per loop. + "subs %w4, %w4, #16 \n" // 16 processed per loop. RGBTOUV(v0.8h, v1.8h, v2.8h) "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. @@ -2187,7 +2616,7 @@ void RAWToUVRow_NEON(const uint8_t* src_raw, asm volatile ( RGBTOUV_SETUP_REG "1: \n" - "ld3 {v0.16b,v1.16b,v2.16b}, [%0], #48 \n" // load 8 RAW pixels. + "ld3 {v0.16b,v1.16b,v2.16b}, [%0], #48 \n" // load 16 RAW pixels. "uaddlp v2.8h, v2.16b \n" // B 16 bytes -> 8 shorts. "prfm pldl1keep, [%0, 448] \n" "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts. @@ -2202,7 +2631,7 @@ void RAWToUVRow_NEON(const uint8_t* src_raw, "urshr v1.8h, v1.8h, #1 \n" "urshr v0.8h, v0.8h, #1 \n" - "subs %w4, %w4, #16 \n" // 32 processed per loop. + "subs %w4, %w4, #16 \n" // 16 processed per loop. RGBTOUV(v2.8h, v1.8h, v0.8h) "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U. "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V. @@ -2474,168 +2903,179 @@ void ARGB4444ToYRow_NEON(const uint8_t* src_argb4444, : "cc", "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"); } -void BGRAToYRow_NEON(const uint8_t* src_bgra, uint8_t* dst_y, int width) { +struct RgbConstants { + uint8_t kRGBToY[4]; + uint16_t kAddY; + uint16_t pad; +}; + +// RGB to JPeg coefficients +// B * 0.1140 coefficient = 29 +// G * 0.5870 coefficient = 150 +// R * 0.2990 coefficient = 77 +// Add 0.5 = 0x80 +static const struct RgbConstants kRgb24JPEGConstants = {{29, 150, 77, 0}, + 128, + 0}; + +static const struct RgbConstants kRawJPEGConstants = {{77, 150, 29, 0}, 128, 0}; + +// RGB to BT.601 coefficients +// B * 0.1016 coefficient = 25 +// G * 0.5078 coefficient = 129 +// R * 0.2578 coefficient = 66 +// Add 16.5 = 0x1080 + +static const struct RgbConstants kRgb24I601Constants = {{25, 129, 66, 0}, + 0x1080, + 0}; + +static const struct RgbConstants kRawI601Constants = {{66, 129, 25, 0}, + 0x1080, + 0}; + +// ARGB expects first 3 values to contain RGB and 4th value is ignored. +void ARGBToYMatrixRow_NEON(const uint8_t* src_argb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile( - "movi v4.8b, #66 \n" // R * 0.2578 coefficient - "movi v5.8b, #129 \n" // G * 0.5078 coefficient - "movi v6.8b, #25 \n" // B * 0.1016 coefficient - "movi v7.8b, #16 \n" // Add 16 constant + "ldr d0, [%3] \n" // load rgbconstants + "dup v6.16b, v0.b[0] \n" + "dup v7.16b, v0.b[1] \n" + "dup v16.16b, v0.b[2] \n" + "dup v17.8h, v0.h[2] \n" "1: \n" - "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 pixels. - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v16.8h, v1.8b, v4.8b \n" // R + "ld4 {v2.16b,v3.16b,v4.16b,v5.16b}, [%0], #64 \n" // load 16 + // pixels. + "subs %w2, %w2, #16 \n" // 16 processed per loop. + "umull v0.8h, v2.8b, v6.8b \n" // B + "umull2 v1.8h, v2.16b, v6.16b \n" "prfm pldl1keep, [%0, 448] \n" - "umlal v16.8h, v2.8b, v5.8b \n" // G - "umlal v16.8h, v3.8b, v6.8b \n" // B - "uqrshrn v0.8b, v16.8h, #8 \n" // 16 bit to 8 bit Y - "uqadd v0.8b, v0.8b, v7.8b \n" - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. + "umlal v0.8h, v3.8b, v7.8b \n" // G + "umlal2 v1.8h, v3.16b, v7.16b \n" + "umlal v0.8h, v4.8b, v16.8b \n" // R + "umlal2 v1.8h, v4.16b, v16.16b \n" + "addhn v0.8b, v0.8h, v17.8h \n" // 16 bit to 8 bit Y + "addhn v1.8b, v1.8h, v17.8h \n" + "st1 {v0.8b, v1.8b}, [%1], #16 \n" // store 16 pixels Y. "b.gt 1b \n" - : "+r"(src_bgra), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"); + : "+r"(src_argb), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "r"(rgbconstants) // %3 + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", + "v17"); +} + +void ARGBToYRow_NEON(const uint8_t* src_argb, uint8_t* dst_y, int width) { + ARGBToYMatrixRow_NEON(src_argb, dst_y, width, &kRgb24I601Constants); +} + +void ARGBToYJRow_NEON(const uint8_t* src_argb, uint8_t* dst_yj, int width) { + ARGBToYMatrixRow_NEON(src_argb, dst_yj, width, &kRgb24JPEGConstants); } void ABGRToYRow_NEON(const uint8_t* src_abgr, uint8_t* dst_y, int width) { + ARGBToYMatrixRow_NEON(src_abgr, dst_y, width, &kRawI601Constants); +} + +void ABGRToYJRow_NEON(const uint8_t* src_abgr, uint8_t* dst_yj, int width) { + ARGBToYMatrixRow_NEON(src_abgr, dst_yj, width, &kRawJPEGConstants); +} + +// RGBA expects first value to be A and ignored, then 3 values to contain RGB. +// Same code as ARGB, except the LD4 +void RGBAToYMatrixRow_NEON(const uint8_t* src_rgba, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile( - "movi v6.8b, #25 \n" // B * 0.1016 coefficient - "movi v5.8b, #129 \n" // G * 0.5078 coefficient - "movi v4.8b, #66 \n" // R * 0.2578 coefficient - "movi v7.8b, #16 \n" // Add 16 constant + "ldr d0, [%3] \n" // load rgbconstants + "dup v6.16b, v0.b[0] \n" + "dup v7.16b, v0.b[1] \n" + "dup v16.16b, v0.b[2] \n" + "dup v17.8h, v0.h[2] \n" "1: \n" - "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 pixels. - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v16.8h, v0.8b, v4.8b \n" // R + "ld4 {v1.16b,v2.16b,v3.16b,v4.16b}, [%0], #64 \n" // load 16 + // pixels. + "subs %w2, %w2, #16 \n" // 16 processed per loop. + "umull v0.8h, v2.8b, v6.8b \n" // B + "umull2 v1.8h, v2.16b, v6.16b \n" "prfm pldl1keep, [%0, 448] \n" - "umlal v16.8h, v1.8b, v5.8b \n" // G - "umlal v16.8h, v2.8b, v6.8b \n" // B - "uqrshrn v0.8b, v16.8h, #8 \n" // 16 bit to 8 bit Y - "uqadd v0.8b, v0.8b, v7.8b \n" - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. + "umlal v0.8h, v3.8b, v7.8b \n" // G + "umlal2 v1.8h, v3.16b, v7.16b \n" + "umlal v0.8h, v4.8b, v16.8b \n" // R + "umlal2 v1.8h, v4.16b, v16.16b \n" + "addhn v0.8b, v0.8h, v17.8h \n" // 16 bit to 8 bit Y + "addhn v1.8b, v1.8h, v17.8h \n" + "st1 {v0.8b, v1.8b}, [%1], #16 \n" // store 16 pixels Y. "b.gt 1b \n" - : "+r"(src_abgr), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"); + : "+r"(src_rgba), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "r"(rgbconstants) // %3 + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", + "v17"); } void RGBAToYRow_NEON(const uint8_t* src_rgba, uint8_t* dst_y, int width) { - asm volatile( - "movi v4.8b, #25 \n" // B * 0.1016 coefficient - "movi v5.8b, #129 \n" // G * 0.5078 coefficient - "movi v6.8b, #66 \n" // R * 0.2578 coefficient - "movi v7.8b, #16 \n" // Add 16 constant - "1: \n" - "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 pixels. - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v16.8h, v1.8b, v4.8b \n" // B - "prfm pldl1keep, [%0, 448] \n" - "umlal v16.8h, v2.8b, v5.8b \n" // G - "umlal v16.8h, v3.8b, v6.8b \n" // R - "uqrshrn v0.8b, v16.8h, #8 \n" // 16 bit to 8 bit Y - "uqadd v0.8b, v0.8b, v7.8b \n" - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. - "b.gt 1b \n" - : "+r"(src_rgba), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"); + RGBAToYMatrixRow_NEON(src_rgba, dst_y, width, &kRgb24I601Constants); } -void RGB24ToYRow_NEON(const uint8_t* src_rgb24, uint8_t* dst_y, int width) { - asm volatile( - "movi v4.8b, #25 \n" // B * 0.1016 coefficient - "movi v5.8b, #129 \n" // G * 0.5078 coefficient - "movi v6.8b, #66 \n" // R * 0.2578 coefficient - "movi v7.8b, #16 \n" // Add 16 constant - "1: \n" - "ld3 {v0.8b,v1.8b,v2.8b}, [%0], #24 \n" // load 8 pixels. - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v16.8h, v0.8b, v4.8b \n" // B - "prfm pldl1keep, [%0, 448] \n" - "umlal v16.8h, v1.8b, v5.8b \n" // G - "umlal v16.8h, v2.8b, v6.8b \n" // R - "uqrshrn v0.8b, v16.8h, #8 \n" // 16 bit to 8 bit Y - "uqadd v0.8b, v0.8b, v7.8b \n" - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. - "b.gt 1b \n" - : "+r"(src_rgb24), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"); +void RGBAToYJRow_NEON(const uint8_t* src_rgba, uint8_t* dst_yj, int width) { + RGBAToYMatrixRow_NEON(src_rgba, dst_yj, width, &kRgb24JPEGConstants); } -void RAWToYRow_NEON(const uint8_t* src_raw, uint8_t* dst_y, int width) { +void BGRAToYRow_NEON(const uint8_t* src_bgra, uint8_t* dst_y, int width) { + RGBAToYMatrixRow_NEON(src_bgra, dst_y, width, &kRawI601Constants); +} + +void RGBToYMatrixRow_NEON(const uint8_t* src_rgb, + uint8_t* dst_y, + int width, + const struct RgbConstants* rgbconstants) { asm volatile( - "movi v6.8b, #25 \n" // B * 0.1016 coefficient - "movi v5.8b, #129 \n" // G * 0.5078 coefficient - "movi v4.8b, #66 \n" // R * 0.2578 coefficient - "movi v7.8b, #16 \n" // Add 16 constant + "ldr d0, [%3] \n" // load rgbconstants + "dup v5.16b, v0.b[0] \n" + "dup v6.16b, v0.b[1] \n" + "dup v7.16b, v0.b[2] \n" + "dup v16.8h, v0.h[2] \n" "1: \n" - "ld3 {v0.8b,v1.8b,v2.8b}, [%0], #24 \n" // load 8 pixels. - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v16.8h, v0.8b, v4.8b \n" // B + "ld3 {v2.16b,v3.16b,v4.16b}, [%0], #48 \n" // load 16 pixels. + "subs %w2, %w2, #16 \n" // 16 processed per loop. + "umull v0.8h, v2.8b, v5.8b \n" // B + "umull2 v1.8h, v2.16b, v5.16b \n" "prfm pldl1keep, [%0, 448] \n" - "umlal v16.8h, v1.8b, v5.8b \n" // G - "umlal v16.8h, v2.8b, v6.8b \n" // R - "uqrshrn v0.8b, v16.8h, #8 \n" // 16 bit to 8 bit Y - "uqadd v0.8b, v0.8b, v7.8b \n" - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. + "umlal v0.8h, v3.8b, v6.8b \n" // G + "umlal2 v1.8h, v3.16b, v6.16b \n" + "umlal v0.8h, v4.8b, v7.8b \n" // R + "umlal2 v1.8h, v4.16b, v7.16b \n" + "addhn v0.8b, v0.8h, v16.8h \n" // 16 bit to 8 bit Y + "addhn v1.8b, v1.8h, v16.8h \n" + "st1 {v0.8b, v1.8b}, [%1], #16 \n" // store 16 pixels Y. "b.gt 1b \n" - : "+r"(src_raw), // %0 - "+r"(dst_y), // %1 - "+r"(width) // %2 - : + : "+r"(src_rgb), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "r"(rgbconstants) // %3 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"); } void RGB24ToYJRow_NEON(const uint8_t* src_rgb24, uint8_t* dst_yj, int width) { - asm volatile( - "movi v4.8b, #29 \n" // B * 0.1140 coefficient - "movi v5.8b, #150 \n" // G * 0.5870 coefficient - "movi v6.8b, #77 \n" // R * 0.2990 coefficient - "1: \n" - "ld3 {v0.8b,v1.8b,v2.8b}, [%0], #24 \n" // load 8 pixels. - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v0.8h, v0.8b, v4.8b \n" // B - "prfm pldl1keep, [%0, 448] \n" - "umlal v0.8h, v1.8b, v5.8b \n" // G - "umlal v0.8h, v2.8b, v6.8b \n" // R - "uqrshrn v0.8b, v0.8h, #8 \n" // 16 bit to 8 bit Y - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. - "b.gt 1b \n" - : "+r"(src_rgb24), // %0 - "+r"(dst_yj), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6"); + RGBToYMatrixRow_NEON(src_rgb24, dst_yj, width, &kRgb24JPEGConstants); } void RAWToYJRow_NEON(const uint8_t* src_raw, uint8_t* dst_yj, int width) { - asm volatile( - "movi v6.8b, #29 \n" // B * 0.1140 coefficient - "movi v5.8b, #150 \n" // G * 0.5870 coefficient - "movi v4.8b, #77 \n" // R * 0.2990 coefficient - "1: \n" - "ld3 {v0.8b,v1.8b,v2.8b}, [%0], #24 \n" // load 8 pixels. - "subs %w2, %w2, #8 \n" // 8 processed per loop. - "umull v0.8h, v0.8b, v4.8b \n" // B - "prfm pldl1keep, [%0, 448] \n" - "umlal v0.8h, v1.8b, v5.8b \n" // G - "umlal v0.8h, v2.8b, v6.8b \n" // R - "uqrshrn v0.8b, v0.8h, #8 \n" // 16 bit to 8 bit Y - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y. - "b.gt 1b \n" - : "+r"(src_raw), // %0 - "+r"(dst_yj), // %1 - "+r"(width) // %2 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6"); + RGBToYMatrixRow_NEON(src_raw, dst_yj, width, &kRawJPEGConstants); +} + +void RGB24ToYRow_NEON(const uint8_t* src_rgb24, uint8_t* dst_y, int width) { + RGBToYMatrixRow_NEON(src_rgb24, dst_y, width, &kRgb24I601Constants); +} + +void RAWToYRow_NEON(const uint8_t* src_raw, uint8_t* dst_y, int width) { + RGBToYMatrixRow_NEON(src_raw, dst_y, width, &kRawI601Constants); } // Bilinear filter 16x2 -> 16x1 @@ -2703,6 +3143,151 @@ void InterpolateRow_NEON(uint8_t* dst_ptr, : "cc", "memory", "v0", "v1", "v3", "v4", "v5"); } +// Bilinear filter 8x2 -> 8x1 +void InterpolateRow_16_NEON(uint16_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int dst_width, + int source_y_fraction) { + int y1_fraction = source_y_fraction; + int y0_fraction = 256 - y1_fraction; + const uint16_t* src_ptr1 = src_ptr + src_stride; + + asm volatile( + "cmp %w4, #0 \n" + "b.eq 100f \n" + "cmp %w4, #128 \n" + "b.eq 50f \n" + + "dup v5.8h, %w4 \n" + "dup v4.8h, %w5 \n" + // General purpose row blend. + "1: \n" + "ld1 {v0.8h}, [%1], #16 \n" + "ld1 {v1.8h}, [%2], #16 \n" + "subs %w3, %w3, #8 \n" + "umull v2.4s, v0.4h, v4.4h \n" + "prfm pldl1keep, [%1, 448] \n" + "umull2 v3.4s, v0.8h, v4.8h \n" + "prfm pldl1keep, [%2, 448] \n" + "umlal v2.4s, v1.4h, v5.4h \n" + "umlal2 v3.4s, v1.8h, v5.8h \n" + "rshrn v0.4h, v2.4s, #8 \n" + "rshrn2 v0.8h, v3.4s, #8 \n" + "st1 {v0.8h}, [%0], #16 \n" + "b.gt 1b \n" + "b 99f \n" + + // Blend 50 / 50. + "50: \n" + "ld1 {v0.8h}, [%1], #16 \n" + "ld1 {v1.8h}, [%2], #16 \n" + "subs %w3, %w3, #8 \n" + "prfm pldl1keep, [%1, 448] \n" + "urhadd v0.8h, v0.8h, v1.8h \n" + "prfm pldl1keep, [%2, 448] \n" + "st1 {v0.8h}, [%0], #16 \n" + "b.gt 50b \n" + "b 99f \n" + + // Blend 100 / 0 - Copy row unchanged. + "100: \n" + "ld1 {v0.8h}, [%1], #16 \n" + "subs %w3, %w3, #8 \n" + "prfm pldl1keep, [%1, 448] \n" + "st1 {v0.8h}, [%0], #16 \n" + "b.gt 100b \n" + + "99: \n" + : "+r"(dst_ptr), // %0 + "+r"(src_ptr), // %1 + "+r"(src_ptr1), // %2 + "+r"(dst_width) // %3 + : "r"(y1_fraction), // %4 + "r"(y0_fraction) // %5 + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5"); +} + +// Bilinear filter 8x2 -> 8x1 +// Use scale to convert lsb formats to msb, depending how many bits there are: +// 32768 = 9 bits +// 16384 = 10 bits +// 4096 = 12 bits +// 256 = 16 bits +void InterpolateRow_16To8_NEON(uint8_t* dst_ptr, + const uint16_t* src_ptr, + ptrdiff_t src_stride, + int scale, + int dst_width, + int source_y_fraction) { + int y1_fraction = source_y_fraction; + int y0_fraction = 256 - y1_fraction; + const uint16_t* src_ptr1 = src_ptr + src_stride; + int shift = 15 - __builtin_clz((int32_t)scale); // Negative shl is shr + + asm volatile( + "dup v6.8h, %w6 \n" + "cmp %w4, #0 \n" + "b.eq 100f \n" + "cmp %w4, #128 \n" + "b.eq 50f \n" + + "dup v5.8h, %w4 \n" + "dup v4.8h, %w5 \n" + // General purpose row blend. + "1: \n" + "ld1 {v0.8h}, [%1], #16 \n" + "ld1 {v1.8h}, [%2], #16 \n" + "subs %w3, %w3, #8 \n" + "umull v2.4s, v0.4h, v4.4h \n" + "prfm pldl1keep, [%1, 448] \n" + "umull2 v3.4s, v0.8h, v4.8h \n" + "prfm pldl1keep, [%2, 448] \n" + "umlal v2.4s, v1.4h, v5.4h \n" + "umlal2 v3.4s, v1.8h, v5.8h \n" + "rshrn v0.4h, v2.4s, #8 \n" + "rshrn2 v0.8h, v3.4s, #8 \n" + "ushl v0.8h, v0.8h, v6.8h \n" + "uqxtn v0.8b, v0.8h \n" + "st1 {v0.8b}, [%0], #8 \n" + "b.gt 1b \n" + "b 99f \n" + + // Blend 50 / 50. + "50: \n" + "ld1 {v0.8h}, [%1], #16 \n" + "ld1 {v1.8h}, [%2], #16 \n" + "subs %w3, %w3, #8 \n" + "prfm pldl1keep, [%1, 448] \n" + "urhadd v0.8h, v0.8h, v1.8h \n" + "prfm pldl1keep, [%2, 448] \n" + "ushl v0.8h, v0.8h, v6.8h \n" + "uqxtn v0.8b, v0.8h \n" + "st1 {v0.8b}, [%0], #8 \n" + "b.gt 50b \n" + "b 99f \n" + + // Blend 100 / 0 - Copy row unchanged. + "100: \n" + "ldr q0, [%1], #16 \n" + "ushl v0.8h, v0.8h, v2.8h \n" // shr = v2 is negative + "prfm pldl1keep, [%1, 448] \n" + "uqxtn v0.8b, v0.8h \n" + "subs %w3, %w3, #8 \n" // 8 src pixels per loop + "str d0, [%0], #8 \n" // store 8 pixels + "b.gt 100b \n" + + "99: \n" + : "+r"(dst_ptr), // %0 + "+r"(src_ptr), // %1 + "+r"(src_ptr1), // %2 + "+r"(dst_width) // %3 + : "r"(y1_fraction), // %4 + "r"(y0_fraction), // %5 + "r"(shift) // %6 + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6"); +} + // dr * (256 - sa) / 256 + sr = dr - dr * sa / 256 + sr void ARGBBlendRow_NEON(const uint8_t* src_argb, const uint8_t* src_argb1, @@ -3580,6 +4165,7 @@ void GaussRow_F32_NEON(const float* src, float* dst, int width) { : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"); } +#if LIBYUV_USE_ST3 // Convert biplanar NV21 to packed YUV24 void NV21ToYUV24Row_NEON(const uint8_t* src_y, const uint8_t* src_vu, @@ -3603,8 +4189,44 @@ void NV21ToYUV24Row_NEON(const uint8_t* src_y, : : "cc", "memory", "v0", "v1", "v2"); } +#else +static const uvec8 kYUV24Shuffle[3] = { + {16, 17, 0, 16, 17, 1, 18, 19, 2, 18, 19, 3, 20, 21, 4, 20}, + {21, 5, 22, 23, 6, 22, 23, 7, 24, 25, 8, 24, 25, 9, 26, 27}, + {10, 26, 27, 11, 28, 29, 12, 28, 29, 13, 30, 31, 14, 30, 31, 15}}; -// AYUV is YVUA in memory. UV for NV12 is UV order in memory. +// Convert biplanar NV21 to packed YUV24 +// NV21 has VU in memory for chroma. +// YUV24 is VUY in memory +void NV21ToYUV24Row_NEON(const uint8_t* src_y, + const uint8_t* src_vu, + uint8_t* dst_yuv24, + int width) { + asm volatile( + "ld1 {v5.16b,v6.16b,v7.16b}, [%4] \n" // 3 shuffler constants + "1: \n" + "ld1 {v0.16b}, [%0], #16 \n" // load 16 Y values + "ld1 {v1.16b}, [%1], #16 \n" // load 8 VU values + "tbl v2.16b, {v0.16b,v1.16b}, v5.16b \n" // weave into YUV24 + "prfm pldl1keep, [%0, 448] \n" + "tbl v3.16b, {v0.16b,v1.16b}, v6.16b \n" + "prfm pldl1keep, [%1, 448] \n" + "tbl v4.16b, {v0.16b,v1.16b}, v7.16b \n" + "subs %w3, %w3, #16 \n" // 16 pixels per loop + "st1 {v2.16b,v3.16b,v4.16b}, [%2], #48 \n" // store 16 YUV pixels + "b.gt 1b \n" + : "+r"(src_y), // %0 + "+r"(src_vu), // %1 + "+r"(dst_yuv24), // %2 + "+r"(width) // %3 + : "r"(&kYUV24Shuffle[0]) // %4 + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); +} +#endif // LIBYUV_USE_ST3 + +// Note ST2 8b version is faster than zip+ST1 + +// AYUV is VUYA in memory. UV for NV12 is UV order in memory. void AYUVToUVRow_NEON(const uint8_t* src_ayuv, int src_stride_ayuv, uint8_t* dst_uv, @@ -3619,8 +4241,8 @@ void AYUVToUVRow_NEON(const uint8_t* src_ayuv, "uaddlp v1.8h, v1.16b \n" // U 16 bytes -> 8 shorts. "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%1], #64 \n" // load next 16 "uadalp v0.8h, v4.16b \n" // V 16 bytes -> 8 shorts. - "prfm pldl1keep, [%1, 448] \n" "uadalp v1.8h, v5.16b \n" // U 16 bytes -> 8 shorts. + "prfm pldl1keep, [%1, 448] \n" "uqrshrn v3.8b, v0.8h, #2 \n" // 2x2 average "uqrshrn v2.8b, v1.8h, #2 \n" "subs %w3, %w3, #16 \n" // 16 processed per loop. @@ -3648,8 +4270,8 @@ void AYUVToVURow_NEON(const uint8_t* src_ayuv, "uaddlp v1.8h, v1.16b \n" // U 16 bytes -> 8 shorts. "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%1], #64 \n" // load next 16 "uadalp v0.8h, v4.16b \n" // V 16 bytes -> 8 shorts. - "prfm pldl1keep, [%1, 448] \n" "uadalp v1.8h, v5.16b \n" // U 16 bytes -> 8 shorts. + "prfm pldl1keep, [%1, 448] \n" "uqrshrn v0.8b, v0.8h, #2 \n" // 2x2 average "uqrshrn v1.8b, v1.8h, #2 \n" "subs %w3, %w3, #16 \n" // 16 processed per loop. @@ -3765,52 +4387,24 @@ void SplitUVRow_16_NEON(const uint16_t* src_uv, : "cc", "memory", "v0", "v1", "v2"); } -void MergeUVRow_16_NEON(const uint16_t* src_u, - const uint16_t* src_v, - uint16_t* dst_uv, - int depth, - int width) { - int shift = 16 - depth; - asm volatile( - "dup v2.8h, %w4 \n" - "1: \n" - "ld1 {v0.8h}, [%0], #16 \n" // load 8 U - "subs %w3, %w3, #8 \n" // 8 src pixels per loop - "ld1 {v1.8h}, [%1], #16 \n" // load 8 V - "ushl v0.8h, v0.8h, v2.8h \n" - "prfm pldl1keep, [%0, 448] \n" - "ushl v1.8h, v1.8h, v2.8h \n" - "prfm pldl1keep, [%1, 448] \n" - "st2 {v0.8h, v1.8h}, [%2], #32 \n" // store 8 UV pixels - "b.gt 1b \n" - : "+r"(src_u), // %0 - "+r"(src_v), // %1 - "+r"(dst_uv), // %2 - "+r"(width) // %3 - : "r"(shift) // %4 - : "cc", "memory", "v0", "v1", "v2"); -} - void MultiplyRow_16_NEON(const uint16_t* src_y, uint16_t* dst_y, int scale, int width) { asm volatile( - "dup v2.8h, %w2 \n" + "dup v2.8h, %w3 \n" "1: \n" "ldp q0, q1, [%0], #32 \n" "mul v0.8h, v0.8h, v2.8h \n" "prfm pldl1keep, [%0, 448] \n" "mul v1.8h, v1.8h, v2.8h \n" - "stp q0, q1, [%1] \n" // store 16 pixels - "add %1, %1, #32 \n" - "subs %w3, %w3, #16 \n" // 16 src pixels per loop + "stp q0, q1, [%1], #32 \n" // store 16 pixels + "subs %w2, %w2, #16 \n" // 16 src pixels per loop "b.gt 1b \n" : "+r"(src_y), // %0 "+r"(dst_y), // %1 - "+r"(scale), // %2 - "+r"(width) // %3 - : + "+r"(width) // %2 + : "r"(scale) // %3 : "cc", "memory", "v0", "v1", "v2"); } @@ -3819,7 +4413,7 @@ void DivideRow_16_NEON(const uint16_t* src_y, int scale, int width) { asm volatile( - "dup v0.8h, %w2 \n" + "dup v0.8h, %w3 \n" "1: \n" "ldp q1, q2, [%0], #32 \n" "ushll v3.4s, v1.4h, #0 \n" @@ -3835,18 +4429,45 @@ void DivideRow_16_NEON(const uint16_t* src_y, "shrn v4.4h, v4.4s, #16 \n" "shrn2 v3.8h, v1.4s, #16 \n" "shrn2 v4.8h, v2.4s, #16 \n" - "stp q3, q3, [%1] \n" // store 16 pixels - "add %1, %1, #32 \n" - "subs %w3, %w3, #16 \n" // 16 src pixels per loop + "stp q3, q3, [%1], #32 \n" // store 16 pixels + "subs %w2, %w2, #16 \n" // 16 src pixels per loop "b.gt 1b \n" : "+r"(src_y), // %0 "+r"(dst_y), // %1 - "+r"(scale), // %2 - "+r"(width) // %3 - : + "+r"(width) // %2 + : "r"(scale) // %3 : "cc", "memory", "v0", "v1", "v2", "v3", "v4"); } +// Use scale to convert lsb formats to msb, depending how many bits there are: +// 32768 = 9 bits = shr 1 +// 16384 = 10 bits = shr 2 +// 4096 = 12 bits = shr 4 +// 256 = 16 bits = shr 8 +void Convert16To8Row_NEON(const uint16_t* src_y, + uint8_t* dst_y, + int scale, + int width) { + int shift = 15 - __builtin_clz((int32_t)scale); // Negative shl is shr + asm volatile( + "dup v2.8h, %w3 \n" + "1: \n" + "ldp q0, q1, [%0], #32 \n" + "ushl v0.8h, v0.8h, v2.8h \n" // shr = v2 is negative + "ushl v1.8h, v1.8h, v2.8h \n" + "prfm pldl1keep, [%0, 448] \n" + "uqxtn v0.8b, v0.8h \n" + "uqxtn2 v0.16b, v1.8h \n" + "subs %w2, %w2, #16 \n" // 16 src pixels per loop + "str q0, [%1], #16 \n" // store 16 pixels + "b.gt 1b \n" + : "+r"(src_y), // %0 + "+r"(dst_y), // %1 + "+r"(width) // %2 + : "r"(shift) // %3 + : "cc", "memory", "v0", "v1", "v2"); +} + #endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__) #ifdef __cplusplus diff --git a/third-party/libyuv/third_party/libyuv/source/row_win.cc b/third-party/libyuv/third_party/libyuv/source/row_win.cc index 7dccacc7fd..c5a14f86fb 100644 --- a/third-party/libyuv/third_party/libyuv/source/row_win.cc +++ b/third-party/libyuv/third_party/libyuv/source/row_win.cc @@ -75,7 +75,7 @@ extern "C" { // Convert 8 pixels: 8 UV and 8 Y. #define YUVTORGB(yuvconstants) \ - xmm3 = _mm_sub_epi8(xmm3, _mm_set1_epi8(0x80)); \ + xmm3 = _mm_sub_epi8(xmm3, _mm_set1_epi8((char)0x80)); \ xmm4 = _mm_mulhi_epu16(xmm4, *(__m128i*)yuvconstants->kYToRgb); \ xmm4 = _mm_add_epi16(xmm4, *(__m128i*)yuvconstants->kYBiasToRgb); \ xmm0 = _mm_maddubs_epi16(*(__m128i*)yuvconstants->kUVToB, xmm3); \ @@ -2789,6 +2789,44 @@ __declspec(naked) void I422ToRGB24Row_SSSE3( } } +// 8 pixels. +// 8 UV values, mixed with 8 Y producing 8 RGB24 (24 bytes). +__declspec(naked) void I444ToRGB24Row_SSSE3( + const uint8_t* y_buf, + const uint8_t* u_buf, + const uint8_t* v_buf, + uint8_t* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width) { + __asm { + push esi + push edi + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // argb + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width + sub edi, esi + movdqa xmm5, xmmword ptr kShuffleMaskARGBToRGB24_0 + movdqa xmm6, xmmword ptr kShuffleMaskARGBToRGB24 + + convertloop: + READYUV444 + YUVTORGB(ebx) + STORERGB24 + + sub ecx, 8 + jg convertloop + + pop ebx + pop edi + pop esi + ret + } +} + // 8 pixels // 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB565 (16 bytes). __declspec(naked) void I422ToRGB565Row_SSSE3( diff --git a/third-party/libyuv/third_party/libyuv/source/scale.cc b/third-party/libyuv/third_party/libyuv/source/scale.cc index 03b0486f76..16854c45c4 100644 --- a/third-party/libyuv/third_party/libyuv/source/scale.cc +++ b/third-party/libyuv/third_party/libyuv/source/scale.cc @@ -29,6 +29,7 @@ static __inline int Abs(int v) { } #define SUBSAMPLE(v, a, s) (v < 0) ? (-((-v + a) >> s)) : ((v + a) >> s) +#define CENTERSTART(dx, s) (dx < 0) ? -((-dx >> 1) + s) : ((dx >> 1) + s) // Scale plane, 1/2 // This is an optimized version for scaling down a plane to 1/2 of @@ -50,7 +51,7 @@ static void ScalePlaneDown2(int src_width, ? ScaleRowDown2_C : (filtering == kFilterLinear ? ScaleRowDown2Linear_C : ScaleRowDown2Box_C); - int row_stride = src_stride << 1; + int row_stride = src_stride * 2; (void)src_width; (void)src_height; if (!filtering) { @@ -104,21 +105,6 @@ static void ScalePlaneDown2(int src_width, } } #endif -#if defined(HAS_SCALEROWDOWN2_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleRowDown2 = - filtering == kFilterNone - ? ScaleRowDown2_Any_MMI - : (filtering == kFilterLinear ? ScaleRowDown2Linear_Any_MMI - : ScaleRowDown2Box_Any_MMI); - if (IS_ALIGNED(dst_width, 8)) { - ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_MMI - : (filtering == kFilterLinear - ? ScaleRowDown2Linear_MMI - : ScaleRowDown2Box_MMI); - } - } -#endif #if defined(HAS_SCALEROWDOWN2_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleRowDown2 = @@ -134,6 +120,21 @@ static void ScalePlaneDown2(int src_width, } } #endif +#if defined(HAS_SCALEROWDOWN2_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ScaleRowDown2 = + filtering == kFilterNone + ? ScaleRowDown2_Any_LSX + : (filtering == kFilterLinear ? ScaleRowDown2Linear_Any_LSX + : ScaleRowDown2Box_Any_LSX); + if (IS_ALIGNED(dst_width, 32)) { + ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_LSX + : (filtering == kFilterLinear + ? ScaleRowDown2Linear_LSX + : ScaleRowDown2Box_LSX); + } + } +#endif if (filtering == kFilterLinear) { src_stride = 0; @@ -162,7 +163,7 @@ static void ScalePlaneDown2_16(int src_width, ? ScaleRowDown2_16_C : (filtering == kFilterLinear ? ScaleRowDown2Linear_16_C : ScaleRowDown2Box_16_C); - int row_stride = src_stride << 1; + int row_stride = src_stride * 2; (void)src_width; (void)src_height; if (!filtering) { @@ -185,14 +186,6 @@ static void ScalePlaneDown2_16(int src_width, : ScaleRowDown2Box_16_SSE2); } #endif -#if defined(HAS_SCALEROWDOWN2_16_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) { - ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_16_MMI - : (filtering == kFilterLinear - ? ScaleRowDown2Linear_16_MMI - : ScaleRowDown2Box_16_MMI); - } -#endif if (filtering == kFilterLinear) { src_stride = 0; @@ -222,7 +215,7 @@ static void ScalePlaneDown4(int src_width, void (*ScaleRowDown4)(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, int dst_width) = filtering ? ScaleRowDown4Box_C : ScaleRowDown4_C; - int row_stride = src_stride << 2; + int row_stride = src_stride * 4; (void)src_width; (void)src_height; if (!filtering) { @@ -256,15 +249,6 @@ static void ScalePlaneDown4(int src_width, } } #endif -#if defined(HAS_SCALEROWDOWN4_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleRowDown4 = - filtering ? ScaleRowDown4Box_Any_MMI : ScaleRowDown4_Any_MMI; - if (IS_ALIGNED(dst_width, 8)) { - ScaleRowDown4 = filtering ? ScaleRowDown4Box_MMI : ScaleRowDown4_MMI; - } - } -#endif #if defined(HAS_SCALEROWDOWN4_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleRowDown4 = @@ -274,6 +258,15 @@ static void ScalePlaneDown4(int src_width, } } #endif +#if defined(HAS_SCALEROWDOWN4_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ScaleRowDown4 = + filtering ? ScaleRowDown4Box_Any_LSX : ScaleRowDown4_Any_LSX; + if (IS_ALIGNED(dst_width, 16)) { + ScaleRowDown4 = filtering ? ScaleRowDown4Box_LSX : ScaleRowDown4_LSX; + } + } +#endif if (filtering == kFilterLinear) { src_stride = 0; @@ -298,7 +291,7 @@ static void ScalePlaneDown4_16(int src_width, void (*ScaleRowDown4)(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, int dst_width) = filtering ? ScaleRowDown4Box_16_C : ScaleRowDown4_16_C; - int row_stride = src_stride << 2; + int row_stride = src_stride * 4; (void)src_width; (void)src_height; if (!filtering) { @@ -317,11 +310,6 @@ static void ScalePlaneDown4_16(int src_width, filtering ? ScaleRowDown4Box_16_SSE2 : ScaleRowDown4_16_SSE2; } #endif -#if defined(HAS_SCALEROWDOWN4_16_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 8)) { - ScaleRowDown4 = filtering ? ScaleRowDown4Box_16_MMI : ScaleRowDown4_16_MMI; - } -#endif if (filtering == kFilterLinear) { src_stride = 0; @@ -379,18 +367,6 @@ static void ScalePlaneDown34(int src_width, } } #endif -#if defined(HAS_SCALEROWDOWN34_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - if (!filtering) { - ScaleRowDown34_0 = ScaleRowDown34_Any_MMI; - ScaleRowDown34_1 = ScaleRowDown34_Any_MMI; - if (dst_width % 24 == 0) { - ScaleRowDown34_0 = ScaleRowDown34_MMI; - ScaleRowDown34_1 = ScaleRowDown34_MMI; - } - } - } -#endif #if defined(HAS_SCALEROWDOWN34_MSA) if (TestCpuFlag(kCpuHasMSA)) { if (!filtering) { @@ -411,6 +387,26 @@ static void ScalePlaneDown34(int src_width, } } #endif +#if defined(HAS_SCALEROWDOWN34_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + if (!filtering) { + ScaleRowDown34_0 = ScaleRowDown34_Any_LSX; + ScaleRowDown34_1 = ScaleRowDown34_Any_LSX; + } else { + ScaleRowDown34_0 = ScaleRowDown34_0_Box_Any_LSX; + ScaleRowDown34_1 = ScaleRowDown34_1_Box_Any_LSX; + } + if (dst_width % 48 == 0) { + if (!filtering) { + ScaleRowDown34_0 = ScaleRowDown34_LSX; + ScaleRowDown34_1 = ScaleRowDown34_LSX; + } else { + ScaleRowDown34_0 = ScaleRowDown34_0_Box_LSX; + ScaleRowDown34_1 = ScaleRowDown34_1_Box_LSX; + } + } + } +#endif #if defined(HAS_SCALEROWDOWN34_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { if (!filtering) { @@ -626,6 +622,26 @@ static void ScalePlaneDown38(int src_width, } } #endif +#if defined(HAS_SCALEROWDOWN38_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + if (!filtering) { + ScaleRowDown38_3 = ScaleRowDown38_Any_LSX; + ScaleRowDown38_2 = ScaleRowDown38_Any_LSX; + } else { + ScaleRowDown38_3 = ScaleRowDown38_3_Box_Any_LSX; + ScaleRowDown38_2 = ScaleRowDown38_2_Box_Any_LSX; + } + if (dst_width % 12 == 0) { + if (!filtering) { + ScaleRowDown38_3 = ScaleRowDown38_LSX; + ScaleRowDown38_2 = ScaleRowDown38_LSX; + } else { + ScaleRowDown38_3 = ScaleRowDown38_3_Box_LSX; + ScaleRowDown38_2 = ScaleRowDown38_2_Box_LSX; + } + } + } +#endif for (y = 0; y < dst_height - 2; y += 3) { ScaleRowDown38_3(src_ptr, filter_stride, dst_ptr, dst_width); @@ -891,14 +907,6 @@ static void ScalePlaneBox(int src_width, } } #endif -#if defined(HAS_SCALEADDROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleAddRow = ScaleAddRow_Any_MMI; - if (IS_ALIGNED(src_width, 8)) { - ScaleAddRow = ScaleAddRow_MMI; - } - } -#endif #if defined(HAS_SCALEADDROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleAddRow = ScaleAddRow_Any_MSA; @@ -907,11 +915,19 @@ static void ScalePlaneBox(int src_width, } } #endif +#if defined(HAS_SCALEADDROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ScaleAddRow = ScaleAddRow_Any_LSX; + if (IS_ALIGNED(src_width, 16)) { + ScaleAddRow = ScaleAddRow_LSX; + } + } +#endif for (j = 0; j < dst_height; ++j) { int boxheight; int iy = y >> 16; - const uint8_t* src = src_ptr + iy * src_stride; + const uint8_t* src = src_ptr + iy * (int64_t)src_stride; y += dy; if (y > max_y) { y = max_y; @@ -962,15 +978,10 @@ static void ScalePlaneBox_16(int src_width, } #endif -#if defined(HAS_SCALEADDROW_16_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(src_width, 4)) { - ScaleAddRow = ScaleAddRow_16_MMI; - } -#endif for (j = 0; j < dst_height; ++j) { int boxheight; int iy = y >> 16; - const uint16_t* src = src_ptr + iy * src_stride; + const uint16_t* src = src_ptr + iy * (int64_t)src_stride; y += dy; if (y > max_y) { y = max_y; @@ -1043,14 +1054,6 @@ void ScalePlaneBilinearDown(int src_width, } } #endif -#if defined(HAS_INTERPOLATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - InterpolateRow = InterpolateRow_Any_MMI; - if (IS_ALIGNED(src_width, 16)) { - InterpolateRow = InterpolateRow_MMI; - } - } -#endif #if defined(HAS_INTERPOLATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { InterpolateRow = InterpolateRow_Any_MSA; @@ -1059,6 +1062,14 @@ void ScalePlaneBilinearDown(int src_width, } } #endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(src_width, 32)) { + InterpolateRow = InterpolateRow_LSX; + } + } +#endif #if defined(HAS_SCALEFILTERCOLS_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) { @@ -1080,6 +1091,14 @@ void ScalePlaneBilinearDown(int src_width, ScaleFilterCols = ScaleFilterCols_MSA; } } +#endif +#if defined(HAS_SCALEFILTERCOLS_LSX) + if (TestCpuFlag(kCpuHasLSX) && src_width < 32768) { + ScaleFilterCols = ScaleFilterCols_Any_LSX; + if (IS_ALIGNED(dst_width, 16)) { + ScaleFilterCols = ScaleFilterCols_LSX; + } + } #endif if (y > max_y) { y = max_y; @@ -1087,7 +1106,7 @@ void ScalePlaneBilinearDown(int src_width, for (j = 0; j < dst_height; ++j) { int yi = y >> 16; - const uint8_t* src = src_ptr + yi * src_stride; + const uint8_t* src = src_ptr + yi * (int64_t)src_stride; if (filtering == kFilterLinear) { ScaleFilterCols(dst_ptr, src, dst_width, x, dx); } else { @@ -1136,7 +1155,7 @@ void ScalePlaneBilinearDown_16(int src_width, #if defined(HAS_INTERPOLATEROW_16_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_16_SSE2; + InterpolateRow = InterpolateRow_16_Any_SSE2; if (IS_ALIGNED(src_width, 16)) { InterpolateRow = InterpolateRow_16_SSE2; } @@ -1144,7 +1163,7 @@ void ScalePlaneBilinearDown_16(int src_width, #endif #if defined(HAS_INTERPOLATEROW_16_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - InterpolateRow = InterpolateRow_Any_16_SSSE3; + InterpolateRow = InterpolateRow_16_Any_SSSE3; if (IS_ALIGNED(src_width, 16)) { InterpolateRow = InterpolateRow_16_SSSE3; } @@ -1152,7 +1171,7 @@ void ScalePlaneBilinearDown_16(int src_width, #endif #if defined(HAS_INTERPOLATEROW_16_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - InterpolateRow = InterpolateRow_Any_16_AVX2; + InterpolateRow = InterpolateRow_16_Any_AVX2; if (IS_ALIGNED(src_width, 32)) { InterpolateRow = InterpolateRow_16_AVX2; } @@ -1160,7 +1179,7 @@ void ScalePlaneBilinearDown_16(int src_width, #endif #if defined(HAS_INTERPOLATEROW_16_NEON) if (TestCpuFlag(kCpuHasNEON)) { - InterpolateRow = InterpolateRow_Any_16_NEON; + InterpolateRow = InterpolateRow_16_Any_NEON; if (IS_ALIGNED(src_width, 16)) { InterpolateRow = InterpolateRow_16_NEON; } @@ -1178,7 +1197,7 @@ void ScalePlaneBilinearDown_16(int src_width, for (j = 0; j < dst_height; ++j) { int yi = y >> 16; - const uint16_t* src = src_ptr + yi * src_stride; + const uint16_t* src = src_ptr + yi * (int64_t)src_stride; if (filtering == kFilterLinear) { ScaleFilterCols(dst_ptr, src, dst_width, x, dx); } else { @@ -1270,6 +1289,14 @@ void ScalePlaneBilinearUp(int src_width, ScaleFilterCols = ScaleFilterCols_MSA; } } +#endif +#if defined(HAS_SCALEFILTERCOLS_LSX) + if (filtering && TestCpuFlag(kCpuHasLSX) && src_width < 32768) { + ScaleFilterCols = ScaleFilterCols_Any_LSX; + if (IS_ALIGNED(dst_width, 16)) { + ScaleFilterCols = ScaleFilterCols_LSX; + } + } #endif if (!filtering && src_width * 2 == dst_width && x < 0x8000) { ScaleFilterCols = ScaleColsUp2_C; @@ -1277,11 +1304,6 @@ void ScalePlaneBilinearUp(int src_width, if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) { ScaleFilterCols = ScaleColsUp2_SSE2; } -#endif -#if defined(HAS_SCALECOLS_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 8)) { - ScaleFilterCols = ScaleColsUp2_MMI; - } #endif } @@ -1290,14 +1312,14 @@ void ScalePlaneBilinearUp(int src_width, } { int yi = y >> 16; - const uint8_t* src = src_ptr + yi * src_stride; + const uint8_t* src = src_ptr + yi * (int64_t)src_stride; // Allocate 2 row buffers. - const int kRowSize = (dst_width + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (dst_width + 31) & ~31; + align_buffer_64(row, row_size * 2); uint8_t* rowptr = row; - int rowstride = kRowSize; + int rowstride = row_size; int lasty = yi; ScaleFilterCols(rowptr, src, dst_width, x, dx); @@ -1305,7 +1327,9 @@ void ScalePlaneBilinearUp(int src_width, src += src_stride; } ScaleFilterCols(rowptr + rowstride, src, dst_width, x, dx); - src += src_stride; + if (src_height > 2) { + src += src_stride; + } for (j = 0; j < dst_height; ++j) { yi = y >> 16; @@ -1313,14 +1337,16 @@ void ScalePlaneBilinearUp(int src_width, if (y > max_y) { y = max_y; yi = y >> 16; - src = src_ptr + yi * src_stride; + src = src_ptr + yi * (int64_t)src_stride; } if (yi != lasty) { ScaleFilterCols(rowptr, src, dst_width, x, dx); rowptr += rowstride; rowstride = -rowstride; lasty = yi; - src += src_stride; + if ((y + 65536) < max_y) { + src += src_stride; + } } } if (filtering == kFilterLinear) { @@ -1358,38 +1384,38 @@ void ScalePlaneUp2_Linear(int src_width, // This function can only scale up by 2 times horizontally. assert(src_width == ((dst_width + 1) / 2)); -#ifdef HAS_SCALEROWUP2LINEAR_SSE2 +#ifdef HAS_SCALEROWUP2_LINEAR_SSE2 if (TestCpuFlag(kCpuHasSSE2)) { ScaleRowUp = ScaleRowUp2_Linear_Any_SSE2; } #endif -#ifdef HAS_SCALEROWUP2LINEAR_SSSE3 +#ifdef HAS_SCALEROWUP2_LINEAR_SSSE3 if (TestCpuFlag(kCpuHasSSSE3)) { ScaleRowUp = ScaleRowUp2_Linear_Any_SSSE3; } #endif -#ifdef HAS_SCALEROWUP2LINEAR_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_AVX2 if (TestCpuFlag(kCpuHasAVX2)) { ScaleRowUp = ScaleRowUp2_Linear_Any_AVX2; } #endif -#ifdef HAS_SCALEROWUP2LINEAR_NEON +#ifdef HAS_SCALEROWUP2_LINEAR_NEON if (TestCpuFlag(kCpuHasNEON)) { ScaleRowUp = ScaleRowUp2_Linear_Any_NEON; } #endif if (dst_height == 1) { - ScaleRowUp(src_ptr + ((src_height - 1) / 2) * src_stride, dst_ptr, + ScaleRowUp(src_ptr + ((src_height - 1) / 2) * (int64_t)src_stride, dst_ptr, dst_width); } else { dy = FixedDiv(src_height - 1, dst_height - 1); y = (1 << 15) - 1; for (i = 0; i < dst_height; ++i) { - ScaleRowUp(src_ptr + (y >> 16) * src_stride, dst_ptr, dst_width); + ScaleRowUp(src_ptr + (y >> 16) * (int64_t)src_stride, dst_ptr, dst_width); dst_ptr += dst_stride; y += dy; } @@ -1417,25 +1443,25 @@ void ScalePlaneUp2_Bilinear(int src_width, assert(src_width == ((dst_width + 1) / 2)); assert(src_height == ((dst_height + 1) / 2)); -#ifdef HAS_SCALEROWUP2BILINEAR_SSE2 +#ifdef HAS_SCALEROWUP2_BILINEAR_SSE2 if (TestCpuFlag(kCpuHasSSE2)) { Scale2RowUp = ScaleRowUp2_Bilinear_Any_SSE2; } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_SSSE3 +#ifdef HAS_SCALEROWUP2_BILINEAR_SSSE3 if (TestCpuFlag(kCpuHasSSSE3)) { Scale2RowUp = ScaleRowUp2_Bilinear_Any_SSSE3; } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_AVX2 if (TestCpuFlag(kCpuHasAVX2)) { Scale2RowUp = ScaleRowUp2_Bilinear_Any_AVX2; } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_NEON +#ifdef HAS_SCALEROWUP2_BILINEAR_NEON if (TestCpuFlag(kCpuHasNEON)) { Scale2RowUp = ScaleRowUp2_Bilinear_Any_NEON; } @@ -1477,32 +1503,32 @@ void ScalePlaneUp2_12_Linear(int src_width, // This function can only scale up by 2 times horizontally. assert(src_width == ((dst_width + 1) / 2)); -#ifdef HAS_SCALEROWUP2LINEAR_12_SSSE3 +#ifdef HAS_SCALEROWUP2_LINEAR_12_SSSE3 if (TestCpuFlag(kCpuHasSSSE3)) { ScaleRowUp = ScaleRowUp2_Linear_12_Any_SSSE3; } #endif -#ifdef HAS_SCALEROWUP2LINEAR_12_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_12_AVX2 if (TestCpuFlag(kCpuHasAVX2)) { ScaleRowUp = ScaleRowUp2_Linear_12_Any_AVX2; } #endif -#ifdef HAS_SCALEROWUP2LINEAR_12_NEON +#ifdef HAS_SCALEROWUP2_LINEAR_12_NEON if (TestCpuFlag(kCpuHasNEON)) { ScaleRowUp = ScaleRowUp2_Linear_12_Any_NEON; } #endif if (dst_height == 1) { - ScaleRowUp(src_ptr + ((src_height - 1) / 2) * src_stride, dst_ptr, + ScaleRowUp(src_ptr + ((src_height - 1) / 2) * (int64_t)src_stride, dst_ptr, dst_width); } else { dy = FixedDiv(src_height - 1, dst_height - 1); y = (1 << 15) - 1; for (i = 0; i < dst_height; ++i) { - ScaleRowUp(src_ptr + (y >> 16) * src_stride, dst_ptr, dst_width); + ScaleRowUp(src_ptr + (y >> 16) * (int64_t)src_stride, dst_ptr, dst_width); dst_ptr += dst_stride; y += dy; } @@ -1531,19 +1557,19 @@ void ScalePlaneUp2_12_Bilinear(int src_width, assert(src_width == ((dst_width + 1) / 2)); assert(src_height == ((dst_height + 1) / 2)); -#ifdef HAS_SCALEROWUP2BILINEAR_12_SSSE3 +#ifdef HAS_SCALEROWUP2_BILINEAR_12_SSSE3 if (TestCpuFlag(kCpuHasSSSE3)) { Scale2RowUp = ScaleRowUp2_Bilinear_12_Any_SSSE3; } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_12_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_12_AVX2 if (TestCpuFlag(kCpuHasAVX2)) { Scale2RowUp = ScaleRowUp2_Bilinear_12_Any_AVX2; } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_12_NEON +#ifdef HAS_SCALEROWUP2_BILINEAR_12_NEON if (TestCpuFlag(kCpuHasNEON)) { Scale2RowUp = ScaleRowUp2_Bilinear_12_Any_NEON; } @@ -1578,32 +1604,32 @@ void ScalePlaneUp2_16_Linear(int src_width, // This function can only scale up by 2 times horizontally. assert(src_width == ((dst_width + 1) / 2)); -#ifdef HAS_SCALEROWUP2LINEAR_16_SSE2 +#ifdef HAS_SCALEROWUP2_LINEAR_16_SSE2 if (TestCpuFlag(kCpuHasSSE2)) { ScaleRowUp = ScaleRowUp2_Linear_16_Any_SSE2; } #endif -#ifdef HAS_SCALEROWUP2LINEAR_16_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_16_AVX2 if (TestCpuFlag(kCpuHasAVX2)) { ScaleRowUp = ScaleRowUp2_Linear_16_Any_AVX2; } #endif -#ifdef HAS_SCALEROWUP2LINEAR_16_NEON +#ifdef HAS_SCALEROWUP2_LINEAR_16_NEON if (TestCpuFlag(kCpuHasNEON)) { ScaleRowUp = ScaleRowUp2_Linear_16_Any_NEON; } #endif if (dst_height == 1) { - ScaleRowUp(src_ptr + ((src_height - 1) / 2) * src_stride, dst_ptr, + ScaleRowUp(src_ptr + ((src_height - 1) / 2) * (int64_t)src_stride, dst_ptr, dst_width); } else { dy = FixedDiv(src_height - 1, dst_height - 1); y = (1 << 15) - 1; for (i = 0; i < dst_height; ++i) { - ScaleRowUp(src_ptr + (y >> 16) * src_stride, dst_ptr, dst_width); + ScaleRowUp(src_ptr + (y >> 16) * (int64_t)src_stride, dst_ptr, dst_width); dst_ptr += dst_stride; y += dy; } @@ -1627,19 +1653,19 @@ void ScalePlaneUp2_16_Bilinear(int src_width, assert(src_width == ((dst_width + 1) / 2)); assert(src_height == ((dst_height + 1) / 2)); -#ifdef HAS_SCALEROWUP2BILINEAR_16_SSE2 - if (TestCpuFlag(kCpuHasSSSE3)) { - Scale2RowUp = ScaleRowUp2_Bilinear_16_Any_SSSE3; +#ifdef HAS_SCALEROWUP2_BILINEAR_16_SSE2 + if (TestCpuFlag(kCpuHasSSE2)) { + Scale2RowUp = ScaleRowUp2_Bilinear_16_Any_SSE2; } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_16_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_16_AVX2 if (TestCpuFlag(kCpuHasAVX2)) { Scale2RowUp = ScaleRowUp2_Bilinear_16_Any_AVX2; } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_16_NEON +#ifdef HAS_SCALEROWUP2_BILINEAR_16_NEON if (TestCpuFlag(kCpuHasNEON)) { Scale2RowUp = ScaleRowUp2_Bilinear_16_Any_NEON; } @@ -1685,7 +1711,7 @@ void ScalePlaneBilinearUp_16(int src_width, #if defined(HAS_INTERPOLATEROW_16_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_16_SSE2; + InterpolateRow = InterpolateRow_16_Any_SSE2; if (IS_ALIGNED(dst_width, 16)) { InterpolateRow = InterpolateRow_16_SSE2; } @@ -1693,7 +1719,7 @@ void ScalePlaneBilinearUp_16(int src_width, #endif #if defined(HAS_INTERPOLATEROW_16_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - InterpolateRow = InterpolateRow_Any_16_SSSE3; + InterpolateRow = InterpolateRow_16_Any_SSSE3; if (IS_ALIGNED(dst_width, 16)) { InterpolateRow = InterpolateRow_16_SSSE3; } @@ -1701,7 +1727,7 @@ void ScalePlaneBilinearUp_16(int src_width, #endif #if defined(HAS_INTERPOLATEROW_16_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - InterpolateRow = InterpolateRow_Any_16_AVX2; + InterpolateRow = InterpolateRow_16_Any_AVX2; if (IS_ALIGNED(dst_width, 32)) { InterpolateRow = InterpolateRow_16_AVX2; } @@ -1709,7 +1735,7 @@ void ScalePlaneBilinearUp_16(int src_width, #endif #if defined(HAS_INTERPOLATEROW_16_NEON) if (TestCpuFlag(kCpuHasNEON)) { - InterpolateRow = InterpolateRow_Any_16_NEON; + InterpolateRow = InterpolateRow_16_Any_NEON; if (IS_ALIGNED(dst_width, 16)) { InterpolateRow = InterpolateRow_16_NEON; } @@ -1730,27 +1756,21 @@ void ScalePlaneBilinearUp_16(int src_width, if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) { ScaleFilterCols = ScaleColsUp2_16_SSE2; } -#endif -#if defined(HAS_SCALECOLS_16_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 8)) { - ScaleFilterCols = ScaleColsUp2_16_MMI; - } #endif } - if (y > max_y) { y = max_y; } { int yi = y >> 16; - const uint16_t* src = src_ptr + yi * src_stride; + const uint16_t* src = src_ptr + yi * (int64_t)src_stride; // Allocate 2 row buffers. - const int kRowSize = (dst_width + 31) & ~31; - align_buffer_64(row, kRowSize * 4); + const int row_size = (dst_width + 31) & ~31; + align_buffer_64(row, row_size * 4); uint16_t* rowptr = (uint16_t*)row; - int rowstride = kRowSize; + int rowstride = row_size; int lasty = yi; ScaleFilterCols(rowptr, src, dst_width, x, dx); @@ -1758,7 +1778,9 @@ void ScalePlaneBilinearUp_16(int src_width, src += src_stride; } ScaleFilterCols(rowptr + rowstride, src, dst_width, x, dx); - src += src_stride; + if (src_height > 2) { + src += src_stride; + } for (j = 0; j < dst_height; ++j) { yi = y >> 16; @@ -1766,14 +1788,16 @@ void ScalePlaneBilinearUp_16(int src_width, if (y > max_y) { y = max_y; yi = y >> 16; - src = src_ptr + yi * src_stride; + src = src_ptr + yi * (int64_t)src_stride; } if (yi != lasty) { ScaleFilterCols(rowptr, src, dst_width, x, dx); rowptr += rowstride; rowstride = -rowstride; lasty = yi; - src += src_stride; + if ((y + 65536) < max_y) { + src += src_stride; + } } } if (filtering == kFilterLinear) { @@ -1820,16 +1844,12 @@ static void ScalePlaneSimple(int src_width, if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) { ScaleCols = ScaleColsUp2_SSE2; } -#endif -#if defined(HAS_SCALECOLS_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 8)) { - ScaleCols = ScaleColsUp2_MMI; - } #endif } for (i = 0; i < dst_height; ++i) { - ScaleCols(dst_ptr, src_ptr + (y >> 16) * src_stride, dst_width, x, dx); + ScaleCols(dst_ptr, src_ptr + (y >> 16) * (int64_t)src_stride, dst_width, x, + dx); dst_ptr += dst_stride; y += dy; } @@ -1861,16 +1881,12 @@ static void ScalePlaneSimple_16(int src_width, if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) { ScaleCols = ScaleColsUp2_16_SSE2; } -#endif -#if defined(HAS_SCALECOLS_16_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 8)) { - ScaleCols = ScaleColsUp2_16_MMI; - } #endif } for (i = 0; i < dst_height; ++i) { - ScaleCols(dst_ptr, src_ptr + (y >> 16) * src_stride, dst_width, x, dx); + ScaleCols(dst_ptr, src_ptr + (y >> 16) * (int64_t)src_stride, dst_width, x, + dx); dst_ptr += dst_stride; y += dy; } @@ -1878,7 +1894,6 @@ static void ScalePlaneSimple_16(int src_width, // Scale a plane. // This function dispatches to a specialized scaler based on scale factor. - LIBYUV_API void ScalePlane(const uint8_t* src, int src_stride, @@ -1896,10 +1911,9 @@ void ScalePlane(const uint8_t* src, // Negative height means invert the image. if (src_height < 0) { src_height = -src_height; - src = src + (src_height - 1) * src_stride; + src = src + (src_height - 1) * (int64_t)src_stride; src_stride = -src_stride; } - // Use specialized scales to improve performance for common resolutions. // For example, all the 1/2 scalings will use ScalePlaneDown2() if (dst_width == src_width && dst_height == src_height) { @@ -1908,10 +1922,19 @@ void ScalePlane(const uint8_t* src, return; } if (dst_width == src_width && filtering != kFilterBox) { - int dy = FixedDiv(src_height, dst_height); + int dy = 0; + int y = 0; + // When scaling down, use the center 2 rows to filter. + // When scaling up, last row of destination uses the last 2 source rows. + if (dst_height <= src_height) { + dy = FixedDiv(src_height, dst_height); + y = CENTERSTART(dy, -32768); // Subtract 0.5 (32768) to center filter. + } else if (src_height > 1 && dst_height > 1) { + dy = FixedDiv1(src_height, dst_height); + } // Arbitrary scale vertically, but unscaled horizontally. ScalePlaneVertical(src_height, dst_width, dst_height, src_stride, - dst_stride, src, dst, 0, 0, dy, 1, filtering); + dst_stride, src, dst, 0, y, dy, /*bpp=*/1, filtering); return; } if (dst_width <= Abs(src_width) && dst_height <= src_height) { @@ -1990,10 +2013,9 @@ void ScalePlane_16(const uint16_t* src, // Negative height means invert the image. if (src_height < 0) { src_height = -src_height; - src = src + (src_height - 1) * src_stride; + src = src + (src_height - 1) * (int64_t)src_stride; src_stride = -src_stride; } - // Use specialized scales to improve performance for common resolutions. // For example, all the 1/2 scalings will use ScalePlaneDown2() if (dst_width == src_width && dst_height == src_height) { @@ -2002,10 +2024,22 @@ void ScalePlane_16(const uint16_t* src, return; } if (dst_width == src_width && filtering != kFilterBox) { - int dy = FixedDiv(src_height, dst_height); + int dy = 0; + int y = 0; + // When scaling down, use the center 2 rows to filter. + // When scaling up, last row of destination uses the last 2 source rows. + if (dst_height <= src_height) { + dy = FixedDiv(src_height, dst_height); + y = CENTERSTART(dy, -32768); // Subtract 0.5 (32768) to center filter. + // When scaling up, ensure the last row of destination uses the last + // source. Avoid divide by zero for dst_height but will do no scaling + // later. + } else if (src_height > 1 && dst_height > 1) { + dy = FixedDiv1(src_height, dst_height); + } // Arbitrary scale vertically, but unscaled horizontally. ScalePlaneVertical_16(src_height, dst_width, dst_height, src_stride, - dst_stride, src, dst, 0, 0, dy, 1, filtering); + dst_stride, src, dst, 0, y, dy, /*bpp=*/1, filtering); return; } if (dst_width <= Abs(src_width) && dst_height <= src_height) { @@ -2084,7 +2118,7 @@ void ScalePlane_12(const uint16_t* src, // Negative height means invert the image. if (src_height < 0) { src_height = -src_height; - src = src + (src_height - 1) * src_stride; + src = src + (src_height - 1) * (int64_t)src_stride; src_stride = -src_stride; } @@ -2129,6 +2163,7 @@ int I420Scale(const uint8_t* src_y, int src_halfheight = SUBSAMPLE(src_height, 1, 1); int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1); int dst_halfheight = SUBSAMPLE(dst_height, 1, 1); + if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 || src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v || dst_width <= 0 || dst_height <= 0) { @@ -2166,6 +2201,7 @@ int I420Scale_16(const uint16_t* src_y, int src_halfheight = SUBSAMPLE(src_height, 1, 1); int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1); int dst_halfheight = SUBSAMPLE(dst_height, 1, 1); + if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 || src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v || dst_width <= 0 || dst_height <= 0) { @@ -2203,6 +2239,7 @@ int I420Scale_12(const uint16_t* src_y, int src_halfheight = SUBSAMPLE(src_height, 1, 1); int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1); int dst_halfheight = SUBSAMPLE(dst_height, 1, 1); + if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 || src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v || dst_width <= 0 || dst_height <= 0) { @@ -2320,6 +2357,117 @@ int I444Scale_12(const uint16_t* src_y, return 0; } +// Scale an I422 image. +// This function in turn calls a scaling function for each plane. + +LIBYUV_API +int I422Scale(const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_u, + int src_stride_u, + const uint8_t* src_v, + int src_stride_v, + int src_width, + int src_height, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_u, + int dst_stride_u, + uint8_t* dst_v, + int dst_stride_v, + int dst_width, + int dst_height, + enum FilterMode filtering) { + int src_halfwidth = SUBSAMPLE(src_width, 1, 1); + int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1); + + if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 || + src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v || + dst_width <= 0 || dst_height <= 0) { + return -1; + } + + ScalePlane(src_y, src_stride_y, src_width, src_height, dst_y, dst_stride_y, + dst_width, dst_height, filtering); + ScalePlane(src_u, src_stride_u, src_halfwidth, src_height, dst_u, + dst_stride_u, dst_halfwidth, dst_height, filtering); + ScalePlane(src_v, src_stride_v, src_halfwidth, src_height, dst_v, + dst_stride_v, dst_halfwidth, dst_height, filtering); + return 0; +} + +LIBYUV_API +int I422Scale_16(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + int src_width, + int src_height, + uint16_t* dst_y, + int dst_stride_y, + uint16_t* dst_u, + int dst_stride_u, + uint16_t* dst_v, + int dst_stride_v, + int dst_width, + int dst_height, + enum FilterMode filtering) { + int src_halfwidth = SUBSAMPLE(src_width, 1, 1); + int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1); + + if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 || + src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v || + dst_width <= 0 || dst_height <= 0) { + return -1; + } + + ScalePlane_16(src_y, src_stride_y, src_width, src_height, dst_y, dst_stride_y, + dst_width, dst_height, filtering); + ScalePlane_16(src_u, src_stride_u, src_halfwidth, src_height, dst_u, + dst_stride_u, dst_halfwidth, dst_height, filtering); + ScalePlane_16(src_v, src_stride_v, src_halfwidth, src_height, dst_v, + dst_stride_v, dst_halfwidth, dst_height, filtering); + return 0; +} + +LIBYUV_API +int I422Scale_12(const uint16_t* src_y, + int src_stride_y, + const uint16_t* src_u, + int src_stride_u, + const uint16_t* src_v, + int src_stride_v, + int src_width, + int src_height, + uint16_t* dst_y, + int dst_stride_y, + uint16_t* dst_u, + int dst_stride_u, + uint16_t* dst_v, + int dst_stride_v, + int dst_width, + int dst_height, + enum FilterMode filtering) { + int src_halfwidth = SUBSAMPLE(src_width, 1, 1); + int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1); + + if (!src_y || !src_u || !src_v || src_width <= 0 || src_height == 0 || + src_width > 32768 || src_height > 32768 || !dst_y || !dst_u || !dst_v || + dst_width <= 0 || dst_height <= 0) { + return -1; + } + + ScalePlane_12(src_y, src_stride_y, src_width, src_height, dst_y, dst_stride_y, + dst_width, dst_height, filtering); + ScalePlane_12(src_u, src_stride_u, src_halfwidth, src_height, dst_u, + dst_stride_u, dst_halfwidth, dst_height, filtering); + ScalePlane_12(src_v, src_stride_v, src_halfwidth, src_height, dst_v, + dst_stride_v, dst_halfwidth, dst_height, filtering); + return 0; +} + // Scale an NV12 image. // This function in turn calls a scaling function for each plane. @@ -2341,6 +2489,7 @@ int NV12Scale(const uint8_t* src_y, int src_halfheight = SUBSAMPLE(src_height, 1, 1); int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1); int dst_halfheight = SUBSAMPLE(dst_height, 1, 1); + if (!src_y || !src_uv || src_width <= 0 || src_height == 0 || src_width > 32768 || src_height > 32768 || !dst_y || !dst_uv || dst_width <= 0 || dst_height <= 0) { diff --git a/third-party/libyuv/third_party/libyuv/source/scale_any.cc b/third-party/libyuv/third_party/libyuv/source/scale_any.cc index 965749c415..317041f806 100644 --- a/third-party/libyuv/third_party/libyuv/source/scale_any.cc +++ b/third-party/libyuv/third_party/libyuv/source/scale_any.cc @@ -76,7 +76,7 @@ SDANY(ScaleUVRowDown2Box_Any_SSSE3, ScaleUVRowDown2Box_C, 2, 2, - 4) + 3) #endif #ifdef HAS_SCALEUVROWDOWN2BOX_AVX2 SDANY(ScaleUVRowDown2Box_Any_AVX2, @@ -84,7 +84,7 @@ SDANY(ScaleUVRowDown2Box_Any_AVX2, ScaleUVRowDown2Box_C, 2, 2, - 8) + 7) #endif #ifdef HAS_SCALEROWDOWN2_AVX2 SDANY(ScaleRowDown2_Any_AVX2, ScaleRowDown2_AVX2, ScaleRowDown2_C, 2, 1, 31) @@ -134,7 +134,7 @@ SDANY(ScaleUVRowDown2Box_Any_NEON, ScaleUVRowDown2Box_C, 2, 2, - 8) + 7) #endif #ifdef HAS_SCALEROWDOWN2_MSA @@ -152,26 +152,20 @@ SDANY(ScaleRowDown2Box_Any_MSA, 1, 31) #endif -#ifdef HAS_SCALEROWDOWN2_MMI -SDANY(ScaleRowDown2_Any_MMI, ScaleRowDown2_MMI, ScaleRowDown2_C, 2, 1, 7) -SDANY(ScaleRowDown2Linear_Any_MMI, - ScaleRowDown2Linear_MMI, +#ifdef HAS_SCALEROWDOWN2_LSX +SDANY(ScaleRowDown2_Any_LSX, ScaleRowDown2_LSX, ScaleRowDown2_C, 2, 1, 31) +SDANY(ScaleRowDown2Linear_Any_LSX, + ScaleRowDown2Linear_LSX, ScaleRowDown2Linear_C, 2, 1, - 7) -SDANY(ScaleRowDown2Box_Any_MMI, - ScaleRowDown2Box_MMI, + 31) +SDANY(ScaleRowDown2Box_Any_LSX, + ScaleRowDown2Box_LSX, ScaleRowDown2Box_C, 2, 1, - 7) -SDODD(ScaleRowDown2Box_Odd_MMI, - ScaleRowDown2Box_MMI, - ScaleRowDown2Box_Odd_C, - 2, - 1, - 7) + 31) #endif #ifdef HAS_SCALEROWDOWN4_SSSE3 SDANY(ScaleRowDown4_Any_SSSE3, ScaleRowDown4_SSSE3, ScaleRowDown4_C, 4, 1, 7) @@ -209,14 +203,14 @@ SDANY(ScaleRowDown4Box_Any_MSA, 1, 15) #endif -#ifdef HAS_SCALEROWDOWN4_MMI -SDANY(ScaleRowDown4_Any_MMI, ScaleRowDown4_MMI, ScaleRowDown4_C, 4, 1, 7) -SDANY(ScaleRowDown4Box_Any_MMI, - ScaleRowDown4Box_MMI, +#ifdef HAS_SCALEROWDOWN4_LSX +SDANY(ScaleRowDown4_Any_LSX, ScaleRowDown4_LSX, ScaleRowDown4_C, 4, 1, 15) +SDANY(ScaleRowDown4Box_Any_LSX, + ScaleRowDown4Box_LSX, ScaleRowDown4Box_C, 4, 1, - 7) + 15) #endif #ifdef HAS_SCALEROWDOWN34_SSSE3 SDANY(ScaleRowDown34_Any_SSSE3, @@ -278,13 +272,25 @@ SDANY(ScaleRowDown34_1_Box_Any_MSA, 1, 47) #endif -#ifdef HAS_SCALEROWDOWN34_MMI -SDANY(ScaleRowDown34_Any_MMI, - ScaleRowDown34_MMI, +#ifdef HAS_SCALEROWDOWN34_LSX +SDANY(ScaleRowDown34_Any_LSX, + ScaleRowDown34_LSX, ScaleRowDown34_C, 4 / 3, 1, - 23) + 47) +SDANY(ScaleRowDown34_0_Box_Any_LSX, + ScaleRowDown34_0_Box_LSX, + ScaleRowDown34_0_Box_C, + 4 / 3, + 1, + 47) +SDANY(ScaleRowDown34_1_Box_Any_LSX, + ScaleRowDown34_1_Box_LSX, + ScaleRowDown34_1_Box_C, + 4 / 3, + 1, + 47) #endif #ifdef HAS_SCALEROWDOWN38_SSSE3 SDANY(ScaleRowDown38_Any_SSSE3, @@ -346,6 +352,26 @@ SDANY(ScaleRowDown38_2_Box_Any_MSA, 1, 11) #endif +#ifdef HAS_SCALEROWDOWN38_LSX +SDANY(ScaleRowDown38_Any_LSX, + ScaleRowDown38_LSX, + ScaleRowDown38_C, + 8 / 3, + 1, + 11) +SDANY(ScaleRowDown38_3_Box_Any_LSX, + ScaleRowDown38_3_Box_LSX, + ScaleRowDown38_3_Box_C, + 8 / 3, + 1, + 11) +SDANY(ScaleRowDown38_2_Box_Any_LSX, + ScaleRowDown38_2_Box_LSX, + ScaleRowDown38_2_Box_C, + 8 / 3, + 1, + 11) +#endif #ifdef HAS_SCALEARGBROWDOWN2_SSE2 SDANY(ScaleARGBRowDown2_Any_SSE2, @@ -407,25 +433,25 @@ SDANY(ScaleARGBRowDown2Box_Any_MSA, 4, 3) #endif -#ifdef HAS_SCALEARGBROWDOWN2_MMI -SDANY(ScaleARGBRowDown2_Any_MMI, - ScaleARGBRowDown2_MMI, +#ifdef HAS_SCALEARGBROWDOWN2_LSX +SDANY(ScaleARGBRowDown2_Any_LSX, + ScaleARGBRowDown2_LSX, ScaleARGBRowDown2_C, 2, 4, - 1) -SDANY(ScaleARGBRowDown2Linear_Any_MMI, - ScaleARGBRowDown2Linear_MMI, + 3) +SDANY(ScaleARGBRowDown2Linear_Any_LSX, + ScaleARGBRowDown2Linear_LSX, ScaleARGBRowDown2Linear_C, 2, 4, - 1) -SDANY(ScaleARGBRowDown2Box_Any_MMI, - ScaleARGBRowDown2Box_MMI, + 3) +SDANY(ScaleARGBRowDown2Box_Any_LSX, + ScaleARGBRowDown2Box_LSX, ScaleARGBRowDown2Box_C, 2, 4, - 1) + 3) #endif #undef SDANY @@ -478,17 +504,17 @@ SDAANY(ScaleARGBRowDownEvenBox_Any_MSA, 4, 3) #endif -#ifdef HAS_SCALEARGBROWDOWNEVEN_MMI -SDAANY(ScaleARGBRowDownEven_Any_MMI, - ScaleARGBRowDownEven_MMI, +#ifdef HAS_SCALEARGBROWDOWNEVEN_LSX +SDAANY(ScaleARGBRowDownEven_Any_LSX, + ScaleARGBRowDownEven_LSX, ScaleARGBRowDownEven_C, 4, - 1) -SDAANY(ScaleARGBRowDownEvenBox_Any_MMI, - ScaleARGBRowDownEvenBox_MMI, + 3) +SDAANY(ScaleARGBRowDownEvenBox_Any_LSX, + ScaleARGBRowDownEvenBox_LSX, ScaleARGBRowDownEvenBox_C, 4, - 1) + 3) #endif #ifdef HAS_SCALEUVROWDOWNEVEN_NEON SDAANY(ScaleUVRowDownEven_Any_NEON, @@ -530,8 +556,8 @@ SAROW(ScaleAddRow_Any_NEON, ScaleAddRow_NEON, 1, 2, 15) #ifdef HAS_SCALEADDROW_MSA SAROW(ScaleAddRow_Any_MSA, ScaleAddRow_MSA, 1, 2, 15) #endif -#ifdef HAS_SCALEADDROW_MMI -SAROW(ScaleAddRow_Any_MMI, ScaleAddRow_MMI, 1, 2, 7) +#ifdef HAS_SCALEADDROW_LSX +SAROW(ScaleAddRow_Any_LSX, ScaleAddRow_LSX, 1, 2, 15) #endif #undef SAANY @@ -559,8 +585,8 @@ SAANY(ScaleAddRow_Any_NEON, ScaleAddRow_NEON, ScaleAddRow_C, 15) #ifdef HAS_SCALEADDROW_MSA SAANY(ScaleAddRow_Any_MSA, ScaleAddRow_MSA, ScaleAddRow_C, 15) #endif -#ifdef HAS_SCALEADDROW_MMI -SAANY(ScaleAddRow_Any_MMI, ScaleAddRow_MMI, ScaleAddRow_C, 7) +#ifdef HAS_SCALEADDROW_LSX +SAANY(ScaleAddRow_Any_LSX, ScaleAddRow_LSX, ScaleAddRow_C, 15) #endif #undef SAANY @@ -584,14 +610,17 @@ CANY(ScaleFilterCols_Any_NEON, ScaleFilterCols_NEON, ScaleFilterCols_C, 1, 7) #ifdef HAS_SCALEFILTERCOLS_MSA CANY(ScaleFilterCols_Any_MSA, ScaleFilterCols_MSA, ScaleFilterCols_C, 1, 15) #endif +#ifdef HAS_SCALEFILTERCOLS_LSX +CANY(ScaleFilterCols_Any_LSX, ScaleFilterCols_LSX, ScaleFilterCols_C, 1, 15) +#endif #ifdef HAS_SCALEARGBCOLS_NEON CANY(ScaleARGBCols_Any_NEON, ScaleARGBCols_NEON, ScaleARGBCols_C, 4, 7) #endif #ifdef HAS_SCALEARGBCOLS_MSA CANY(ScaleARGBCols_Any_MSA, ScaleARGBCols_MSA, ScaleARGBCols_C, 4, 3) #endif -#ifdef HAS_SCALEARGBCOLS_MMI -CANY(ScaleARGBCols_Any_MMI, ScaleARGBCols_MMI, ScaleARGBCols_C, 4, 0) +#ifdef HAS_SCALEARGBCOLS_LSX +CANY(ScaleARGBCols_Any_LSX, ScaleARGBCols_LSX, ScaleARGBCols_C, 4, 3) #endif #ifdef HAS_SCALEARGBFILTERCOLS_NEON CANY(ScaleARGBFilterCols_Any_NEON, @@ -607,6 +636,13 @@ CANY(ScaleARGBFilterCols_Any_MSA, 4, 7) #endif +#ifdef HAS_SCALEARGBFILTERCOLS_LSX +CANY(ScaleARGBFilterCols_Any_LSX, + ScaleARGBFilterCols_LSX, + ScaleARGBFilterCols_C, + 4, + 7) +#endif #undef CANY // Scale up horizontally 2 times using linear filter. @@ -622,7 +658,7 @@ CANY(ScaleARGBFilterCols_Any_MSA, } \ C(src_ptr + (n / 2), dst_ptr + n + 1, r); \ } \ - dst_ptr[dst_width - 1] = src_ptr[(dst_width / 2) - 1]; \ + dst_ptr[dst_width - 1] = src_ptr[(dst_width - 1) / 2]; \ } // Even the C versions need to be wrapped, because boundary pixels have to @@ -640,7 +676,7 @@ SUH2LANY(ScaleRowUp2_Linear_16_Any_C, 0, uint16_t) -#ifdef HAS_SCALEROWUP2LINEAR_SSE2 +#ifdef HAS_SCALEROWUP2_LINEAR_SSE2 SUH2LANY(ScaleRowUp2_Linear_Any_SSE2, ScaleRowUp2_Linear_SSE2, ScaleRowUp2_Linear_C, @@ -648,7 +684,7 @@ SUH2LANY(ScaleRowUp2_Linear_Any_SSE2, uint8_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_SSSE3 +#ifdef HAS_SCALEROWUP2_LINEAR_SSSE3 SUH2LANY(ScaleRowUp2_Linear_Any_SSSE3, ScaleRowUp2_Linear_SSSE3, ScaleRowUp2_Linear_C, @@ -656,7 +692,7 @@ SUH2LANY(ScaleRowUp2_Linear_Any_SSSE3, uint8_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_12_SSSE3 +#ifdef HAS_SCALEROWUP2_LINEAR_12_SSSE3 SUH2LANY(ScaleRowUp2_Linear_12_Any_SSSE3, ScaleRowUp2_Linear_12_SSSE3, ScaleRowUp2_Linear_16_C, @@ -664,7 +700,7 @@ SUH2LANY(ScaleRowUp2_Linear_12_Any_SSSE3, uint16_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_16_SSE2 +#ifdef HAS_SCALEROWUP2_LINEAR_16_SSE2 SUH2LANY(ScaleRowUp2_Linear_16_Any_SSE2, ScaleRowUp2_Linear_16_SSE2, ScaleRowUp2_Linear_16_C, @@ -672,7 +708,7 @@ SUH2LANY(ScaleRowUp2_Linear_16_Any_SSE2, uint16_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_AVX2 SUH2LANY(ScaleRowUp2_Linear_Any_AVX2, ScaleRowUp2_Linear_AVX2, ScaleRowUp2_Linear_C, @@ -680,7 +716,7 @@ SUH2LANY(ScaleRowUp2_Linear_Any_AVX2, uint8_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_12_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_12_AVX2 SUH2LANY(ScaleRowUp2_Linear_12_Any_AVX2, ScaleRowUp2_Linear_12_AVX2, ScaleRowUp2_Linear_16_C, @@ -688,7 +724,7 @@ SUH2LANY(ScaleRowUp2_Linear_12_Any_AVX2, uint16_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_16_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_16_AVX2 SUH2LANY(ScaleRowUp2_Linear_16_Any_AVX2, ScaleRowUp2_Linear_16_AVX2, ScaleRowUp2_Linear_16_C, @@ -696,7 +732,7 @@ SUH2LANY(ScaleRowUp2_Linear_16_Any_AVX2, uint16_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_NEON +#ifdef HAS_SCALEROWUP2_LINEAR_NEON SUH2LANY(ScaleRowUp2_Linear_Any_NEON, ScaleRowUp2_Linear_NEON, ScaleRowUp2_Linear_C, @@ -704,7 +740,7 @@ SUH2LANY(ScaleRowUp2_Linear_Any_NEON, uint8_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_12_NEON +#ifdef HAS_SCALEROWUP2_LINEAR_12_NEON SUH2LANY(ScaleRowUp2_Linear_12_Any_NEON, ScaleRowUp2_Linear_12_NEON, ScaleRowUp2_Linear_16_C, @@ -712,7 +748,7 @@ SUH2LANY(ScaleRowUp2_Linear_12_Any_NEON, uint16_t) #endif -#ifdef HAS_SCALEROWUP2LINEAR_16_NEON +#ifdef HAS_SCALEROWUP2_LINEAR_16_NEON SUH2LANY(ScaleRowUp2_Linear_16_Any_NEON, ScaleRowUp2_Linear_16_NEON, ScaleRowUp2_Linear_16_C, @@ -760,7 +796,7 @@ SU2BLANY(ScaleRowUp2_Bilinear_16_Any_C, 0, uint16_t) -#ifdef HAS_SCALEROWUP2BILINEAR_SSE2 +#ifdef HAS_SCALEROWUP2_BILINEAR_SSE2 SU2BLANY(ScaleRowUp2_Bilinear_Any_SSE2, ScaleRowUp2_Bilinear_SSE2, ScaleRowUp2_Bilinear_C, @@ -768,7 +804,7 @@ SU2BLANY(ScaleRowUp2_Bilinear_Any_SSE2, uint8_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_12_SSSE3 +#ifdef HAS_SCALEROWUP2_BILINEAR_12_SSSE3 SU2BLANY(ScaleRowUp2_Bilinear_12_Any_SSSE3, ScaleRowUp2_Bilinear_12_SSSE3, ScaleRowUp2_Bilinear_16_C, @@ -776,15 +812,15 @@ SU2BLANY(ScaleRowUp2_Bilinear_12_Any_SSSE3, uint16_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_16_SSE2 -SU2BLANY(ScaleRowUp2_Bilinear_16_Any_SSSE3, +#ifdef HAS_SCALEROWUP2_BILINEAR_16_SSE2 +SU2BLANY(ScaleRowUp2_Bilinear_16_Any_SSE2, ScaleRowUp2_Bilinear_16_SSE2, ScaleRowUp2_Bilinear_16_C, 7, uint16_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_SSSE3 +#ifdef HAS_SCALEROWUP2_BILINEAR_SSSE3 SU2BLANY(ScaleRowUp2_Bilinear_Any_SSSE3, ScaleRowUp2_Bilinear_SSSE3, ScaleRowUp2_Bilinear_C, @@ -792,7 +828,7 @@ SU2BLANY(ScaleRowUp2_Bilinear_Any_SSSE3, uint8_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_AVX2 SU2BLANY(ScaleRowUp2_Bilinear_Any_AVX2, ScaleRowUp2_Bilinear_AVX2, ScaleRowUp2_Bilinear_C, @@ -800,7 +836,7 @@ SU2BLANY(ScaleRowUp2_Bilinear_Any_AVX2, uint8_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_12_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_12_AVX2 SU2BLANY(ScaleRowUp2_Bilinear_12_Any_AVX2, ScaleRowUp2_Bilinear_12_AVX2, ScaleRowUp2_Bilinear_16_C, @@ -808,7 +844,7 @@ SU2BLANY(ScaleRowUp2_Bilinear_12_Any_AVX2, uint16_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_16_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_16_AVX2 SU2BLANY(ScaleRowUp2_Bilinear_16_Any_AVX2, ScaleRowUp2_Bilinear_16_AVX2, ScaleRowUp2_Bilinear_16_C, @@ -816,7 +852,7 @@ SU2BLANY(ScaleRowUp2_Bilinear_16_Any_AVX2, uint16_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_NEON +#ifdef HAS_SCALEROWUP2_BILINEAR_NEON SU2BLANY(ScaleRowUp2_Bilinear_Any_NEON, ScaleRowUp2_Bilinear_NEON, ScaleRowUp2_Bilinear_C, @@ -824,7 +860,7 @@ SU2BLANY(ScaleRowUp2_Bilinear_Any_NEON, uint8_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_12_NEON +#ifdef HAS_SCALEROWUP2_BILINEAR_12_NEON SU2BLANY(ScaleRowUp2_Bilinear_12_Any_NEON, ScaleRowUp2_Bilinear_12_NEON, ScaleRowUp2_Bilinear_16_C, @@ -832,7 +868,7 @@ SU2BLANY(ScaleRowUp2_Bilinear_12_Any_NEON, uint16_t) #endif -#ifdef HAS_SCALEROWUP2BILINEAR_16_NEON +#ifdef HAS_SCALEROWUP2_BILINEAR_16_NEON SU2BLANY(ScaleRowUp2_Bilinear_16_Any_NEON, ScaleRowUp2_Bilinear_16_NEON, ScaleRowUp2_Bilinear_16_C, @@ -872,7 +908,7 @@ SBUH2LANY(ScaleUVRowUp2_Linear_16_Any_C, 0, uint16_t) -#ifdef HAS_SCALEUVROWUP2LINEAR_SSSE3 +#ifdef HAS_SCALEUVROWUP2_LINEAR_SSSE3 SBUH2LANY(ScaleUVRowUp2_Linear_Any_SSSE3, ScaleUVRowUp2_Linear_SSSE3, ScaleUVRowUp2_Linear_C, @@ -880,7 +916,7 @@ SBUH2LANY(ScaleUVRowUp2_Linear_Any_SSSE3, uint8_t) #endif -#ifdef HAS_SCALEUVROWUP2LINEAR_AVX2 +#ifdef HAS_SCALEUVROWUP2_LINEAR_AVX2 SBUH2LANY(ScaleUVRowUp2_Linear_Any_AVX2, ScaleUVRowUp2_Linear_AVX2, ScaleUVRowUp2_Linear_C, @@ -888,15 +924,15 @@ SBUH2LANY(ScaleUVRowUp2_Linear_Any_AVX2, uint8_t) #endif -#ifdef HAS_SCALEUVROWUP2LINEAR_16_SSE2 -SBUH2LANY(ScaleUVRowUp2_Linear_16_Any_SSE2, - ScaleUVRowUp2_Linear_16_SSE2, +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_SSE41 +SBUH2LANY(ScaleUVRowUp2_Linear_16_Any_SSE41, + ScaleUVRowUp2_Linear_16_SSE41, ScaleUVRowUp2_Linear_16_C, 3, uint16_t) #endif -#ifdef HAS_SCALEUVROWUP2LINEAR_16_AVX2 +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_AVX2 SBUH2LANY(ScaleUVRowUp2_Linear_16_Any_AVX2, ScaleUVRowUp2_Linear_16_AVX2, ScaleUVRowUp2_Linear_16_C, @@ -904,7 +940,7 @@ SBUH2LANY(ScaleUVRowUp2_Linear_16_Any_AVX2, uint16_t) #endif -#ifdef HAS_SCALEUVROWUP2LINEAR_NEON +#ifdef HAS_SCALEUVROWUP2_LINEAR_NEON SBUH2LANY(ScaleUVRowUp2_Linear_Any_NEON, ScaleUVRowUp2_Linear_NEON, ScaleUVRowUp2_Linear_C, @@ -912,7 +948,7 @@ SBUH2LANY(ScaleUVRowUp2_Linear_Any_NEON, uint8_t) #endif -#ifdef HAS_SCALEUVROWUP2LINEAR_16_NEON +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_NEON SBUH2LANY(ScaleUVRowUp2_Linear_16_Any_NEON, ScaleUVRowUp2_Linear_16_NEON, ScaleUVRowUp2_Linear_16_C, @@ -970,7 +1006,7 @@ SBU2BLANY(ScaleUVRowUp2_Bilinear_16_Any_C, 0, uint16_t) -#ifdef HAS_SCALEUVROWUP2BILINEAR_SSSE3 +#ifdef HAS_SCALEUVROWUP2_BILINEAR_SSSE3 SBU2BLANY(ScaleUVRowUp2_Bilinear_Any_SSSE3, ScaleUVRowUp2_Bilinear_SSSE3, ScaleUVRowUp2_Bilinear_C, @@ -978,7 +1014,7 @@ SBU2BLANY(ScaleUVRowUp2_Bilinear_Any_SSSE3, uint8_t) #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_AVX2 +#ifdef HAS_SCALEUVROWUP2_BILINEAR_AVX2 SBU2BLANY(ScaleUVRowUp2_Bilinear_Any_AVX2, ScaleUVRowUp2_Bilinear_AVX2, ScaleUVRowUp2_Bilinear_C, @@ -986,15 +1022,15 @@ SBU2BLANY(ScaleUVRowUp2_Bilinear_Any_AVX2, uint8_t) #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_16_SSE2 -SBU2BLANY(ScaleUVRowUp2_Bilinear_16_Any_SSE2, - ScaleUVRowUp2_Bilinear_16_SSE2, +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_SSE41 +SBU2BLANY(ScaleUVRowUp2_Bilinear_16_Any_SSE41, + ScaleUVRowUp2_Bilinear_16_SSE41, ScaleUVRowUp2_Bilinear_16_C, 7, uint16_t) #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_16_AVX2 +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_AVX2 SBU2BLANY(ScaleUVRowUp2_Bilinear_16_Any_AVX2, ScaleUVRowUp2_Bilinear_16_AVX2, ScaleUVRowUp2_Bilinear_16_C, @@ -1002,7 +1038,7 @@ SBU2BLANY(ScaleUVRowUp2_Bilinear_16_Any_AVX2, uint16_t) #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_NEON +#ifdef HAS_SCALEUVROWUP2_BILINEAR_NEON SBU2BLANY(ScaleUVRowUp2_Bilinear_Any_NEON, ScaleUVRowUp2_Bilinear_NEON, ScaleUVRowUp2_Bilinear_C, @@ -1010,7 +1046,7 @@ SBU2BLANY(ScaleUVRowUp2_Bilinear_Any_NEON, uint8_t) #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_16_NEON +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_NEON SBU2BLANY(ScaleUVRowUp2_Bilinear_16_Any_NEON, ScaleUVRowUp2_Bilinear_16_NEON, ScaleUVRowUp2_Bilinear_16_C, diff --git a/third-party/libyuv/third_party/libyuv/source/scale_argb.cc b/third-party/libyuv/third_party/libyuv/source/scale_argb.cc index 451d4ec4d1..07840d6511 100644 --- a/third-party/libyuv/third_party/libyuv/source/scale_argb.cc +++ b/third-party/libyuv/third_party/libyuv/source/scale_argb.cc @@ -58,9 +58,9 @@ static void ScaleARGBDown2(int src_width, assert((dy & 0x1ffff) == 0); // Test vertical scale is multiple of 2. // Advance to odd row, even column. if (filtering == kFilterBilinear) { - src_argb += (y >> 16) * src_stride + (x >> 16) * 4; + src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4; } else { - src_argb += (y >> 16) * src_stride + ((x >> 16) - 1) * 4; + src_argb += (y >> 16) * (int64_t)src_stride + ((x >> 16) - 1) * 4; } #if defined(HAS_SCALEARGBROWDOWN2_SSE2) @@ -95,22 +95,6 @@ static void ScaleARGBDown2(int src_width, } } #endif -#if defined(HAS_SCALEARGBROWDOWN2_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleARGBRowDown2 = - filtering == kFilterNone - ? ScaleARGBRowDown2_Any_MMI - : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_MMI - : ScaleARGBRowDown2Box_Any_MMI); - if (IS_ALIGNED(dst_width, 2)) { - ScaleARGBRowDown2 = - filtering == kFilterNone - ? ScaleARGBRowDown2_MMI - : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_MMI - : ScaleARGBRowDown2Box_MMI); - } - } -#endif #if defined(HAS_SCALEARGBROWDOWN2_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleARGBRowDown2 = @@ -127,6 +111,22 @@ static void ScaleARGBDown2(int src_width, } } #endif +#if defined(HAS_SCALEARGBROWDOWN2_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ScaleARGBRowDown2 = + filtering == kFilterNone + ? ScaleARGBRowDown2_Any_LSX + : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_LSX + : ScaleARGBRowDown2Box_Any_LSX); + if (IS_ALIGNED(dst_width, 4)) { + ScaleARGBRowDown2 = + filtering == kFilterNone + ? ScaleARGBRowDown2_LSX + : (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_LSX + : ScaleARGBRowDown2Box_LSX); + } + } +#endif if (filtering == kFilterLinear) { src_stride = 0; @@ -155,14 +155,14 @@ static void ScaleARGBDown4Box(int src_width, int dy) { int j; // Allocate 2 rows of ARGB. - const int kRowSize = (dst_width * 2 * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (dst_width * 2 * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); int row_stride = src_stride * (dy >> 16); void (*ScaleARGBRowDown2)(const uint8_t* src_argb, ptrdiff_t src_stride, uint8_t* dst_argb, int dst_width) = ScaleARGBRowDown2Box_C; // Advance to odd row, even column. - src_argb += (y >> 16) * src_stride + (x >> 16) * 4; + src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4; (void)src_width; (void)src_height; (void)dx; @@ -187,9 +187,9 @@ static void ScaleARGBDown4Box(int src_width, for (j = 0; j < dst_height; ++j) { ScaleARGBRowDown2(src_argb, src_stride, row, dst_width * 2); - ScaleARGBRowDown2(src_argb + src_stride * 2, src_stride, row + kRowSize, + ScaleARGBRowDown2(src_argb + src_stride * 2, src_stride, row + row_size, dst_width * 2); - ScaleARGBRowDown2(row, kRowSize, dst_argb, dst_width); + ScaleARGBRowDown2(row, row_size, dst_argb, dst_width); src_argb += row_stride; dst_argb += dst_stride; } @@ -214,7 +214,7 @@ static void ScaleARGBDownEven(int src_width, enum FilterMode filtering) { int j; int col_step = dx >> 16; - int row_stride = (dy >> 16) * src_stride; + int row_stride = (dy >> 16) * (int64_t)src_stride; void (*ScaleARGBRowDownEven)(const uint8_t* src_argb, ptrdiff_t src_stride, int src_step, uint8_t* dst_argb, int dst_width) = filtering ? ScaleARGBRowDownEvenBox_C : ScaleARGBRowDownEven_C; @@ -222,7 +222,7 @@ static void ScaleARGBDownEven(int src_width, (void)src_height; assert(IS_ALIGNED(src_width, 2)); assert(IS_ALIGNED(src_height, 2)); - src_argb += (y >> 16) * src_stride + (x >> 16) * 4; + src_argb += (y >> 16) * (int64_t)src_stride + (x >> 16) * 4; #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_SSE2 @@ -243,16 +243,6 @@ static void ScaleARGBDownEven(int src_width, } } #endif -#if defined(HAS_SCALEARGBROWDOWNEVEN_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_MMI - : ScaleARGBRowDownEven_Any_MMI; - if (IS_ALIGNED(dst_width, 2)) { - ScaleARGBRowDownEven = - filtering ? ScaleARGBRowDownEvenBox_MMI : ScaleARGBRowDownEven_MMI; - } - } -#endif #if defined(HAS_SCALEARGBROWDOWNEVEN_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_MSA @@ -263,6 +253,16 @@ static void ScaleARGBDownEven(int src_width, } } #endif +#if defined(HAS_SCALEARGBROWDOWNEVEN_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_LSX + : ScaleARGBRowDownEven_Any_LSX; + if (IS_ALIGNED(dst_width, 4)) { + ScaleARGBRowDownEven = + filtering ? ScaleARGBRowDownEvenBox_LSX : ScaleARGBRowDownEven_LSX; + } + } +#endif if (filtering == kFilterLinear) { src_stride = 0; @@ -340,6 +340,14 @@ static void ScaleARGBBilinearDown(int src_width, } } #endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(clip_src_width, 32)) { + InterpolateRow = InterpolateRow_LSX; + } + } +#endif #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) { ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3; @@ -360,6 +368,14 @@ static void ScaleARGBBilinearDown(int src_width, ScaleARGBFilterCols = ScaleARGBFilterCols_MSA; } } +#endif +#if defined(HAS_SCALEARGBFILTERCOLS_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ScaleARGBFilterCols = ScaleARGBFilterCols_Any_LSX; + if (IS_ALIGNED(dst_width, 8)) { + ScaleARGBFilterCols = ScaleARGBFilterCols_LSX; + } + } #endif // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear. // Allocate a row of ARGB. @@ -372,7 +388,7 @@ static void ScaleARGBBilinearDown(int src_width, } for (j = 0; j < dst_height; ++j) { int yi = y >> 16; - const uint8_t* src = src_argb + yi * src_stride; + const uint8_t* src = src_argb + yi * (int64_t)src_stride; if (filtering == kFilterLinear) { ScaleARGBFilterCols(dst_argb, src, dst_width, x, dx); } else { @@ -436,14 +452,6 @@ static void ScaleARGBBilinearUp(int src_width, } } #endif -#if defined(HAS_INTERPOLATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - InterpolateRow = InterpolateRow_Any_MMI; - if (IS_ALIGNED(dst_width, 2)) { - InterpolateRow = InterpolateRow_MMI; - } - } -#endif #if defined(HAS_INTERPOLATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { InterpolateRow = InterpolateRow_Any_MSA; @@ -451,6 +459,14 @@ static void ScaleARGBBilinearUp(int src_width, InterpolateRow = InterpolateRow_MSA; } } +#endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(dst_width, 8)) { + InterpolateRow = InterpolateRow_LSX; + } + } #endif if (src_width >= 32768) { ScaleARGBFilterCols = @@ -477,6 +493,14 @@ static void ScaleARGBBilinearUp(int src_width, } } #endif +#if defined(HAS_SCALEARGBFILTERCOLS_LSX) + if (filtering && TestCpuFlag(kCpuHasLSX)) { + ScaleARGBFilterCols = ScaleARGBFilterCols_Any_LSX; + if (IS_ALIGNED(dst_width, 8)) { + ScaleARGBFilterCols = ScaleARGBFilterCols_LSX; + } + } +#endif #if defined(HAS_SCALEARGBCOLS_SSE2) if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) { ScaleARGBFilterCols = ScaleARGBCols_SSE2; @@ -490,14 +514,6 @@ static void ScaleARGBBilinearUp(int src_width, } } #endif -#if defined(HAS_SCALEARGBCOLS_MMI) - if (!filtering && TestCpuFlag(kCpuHasMMI)) { - ScaleARGBFilterCols = ScaleARGBCols_Any_MMI; - if (IS_ALIGNED(dst_width, 1)) { - ScaleARGBFilterCols = ScaleARGBCols_MMI; - } - } -#endif #if defined(HAS_SCALEARGBCOLS_MSA) if (!filtering && TestCpuFlag(kCpuHasMSA)) { ScaleARGBFilterCols = ScaleARGBCols_Any_MSA; @@ -505,6 +521,14 @@ static void ScaleARGBBilinearUp(int src_width, ScaleARGBFilterCols = ScaleARGBCols_MSA; } } +#endif +#if defined(HAS_SCALEARGBCOLS_LSX) + if (!filtering && TestCpuFlag(kCpuHasLSX)) { + ScaleARGBFilterCols = ScaleARGBCols_Any_LSX; + if (IS_ALIGNED(dst_width, 4)) { + ScaleARGBFilterCols = ScaleARGBCols_LSX; + } + } #endif if (!filtering && src_width * 2 == dst_width && x < 0x8000) { ScaleARGBFilterCols = ScaleARGBColsUp2_C; @@ -512,11 +536,6 @@ static void ScaleARGBBilinearUp(int src_width, if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) { ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2; } -#endif -#if defined(HAS_SCALEARGBCOLSUP2_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) { - ScaleARGBFilterCols = ScaleARGBColsUp2_MMI; - } #endif } @@ -526,14 +545,14 @@ static void ScaleARGBBilinearUp(int src_width, { int yi = y >> 16; - const uint8_t* src = src_argb + yi * src_stride; + const uint8_t* src = src_argb + yi * (int64_t)src_stride; // Allocate 2 rows of ARGB. - const int kRowSize = (dst_width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (dst_width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); uint8_t* rowptr = row; - int rowstride = kRowSize; + int rowstride = row_size; int lasty = yi; ScaleARGBFilterCols(rowptr, src, dst_width, x, dx); @@ -541,7 +560,9 @@ static void ScaleARGBBilinearUp(int src_width, src += src_stride; } ScaleARGBFilterCols(rowptr + rowstride, src, dst_width, x, dx); - src += src_stride; + if (src_height > 2) { + src += src_stride; + } for (j = 0; j < dst_height; ++j) { yi = y >> 16; @@ -549,14 +570,16 @@ static void ScaleARGBBilinearUp(int src_width, if (y > max_y) { y = max_y; yi = y >> 16; - src = src_argb + yi * src_stride; + src = src_argb + yi * (int64_t)src_stride; } if (yi != lasty) { ScaleARGBFilterCols(rowptr, src, dst_width, x, dx); rowptr += rowstride; rowstride = -rowstride; lasty = yi; - src += src_stride; + if ((y + 65536) < max_y) { + src += src_stride; + } } } if (filtering == kFilterLinear) { @@ -611,6 +634,15 @@ static void ScaleYUVToARGBBilinearUp(int src_width, } } #endif +#if defined(HAS_I422TOARGBROW_AVX512BW) + if (TestCpuFlag(kCpuHasAVX512BW | kCpuHasAVX512VL) == + (kCpuHasAVX512BW | kCpuHasAVX512VL)) { + I422ToARGBRow = I422ToARGBRow_Any_AVX512BW; + if (IS_ALIGNED(src_width, 32)) { + I422ToARGBRow = I422ToARGBRow_AVX512BW; + } + } +#endif #if defined(HAS_I422TOARGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { I422ToARGBRow = I422ToARGBRow_Any_NEON; @@ -619,14 +651,6 @@ static void ScaleYUVToARGBBilinearUp(int src_width, } } #endif -#if defined(HAS_I422TOARGBROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - I422ToARGBRow = I422ToARGBRow_Any_MMI; - if (IS_ALIGNED(src_width, 4)) { - I422ToARGBRow = I422ToARGBRow_MMI; - } - } -#endif #if defined(HAS_I422TOARGBROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { I422ToARGBRow = I422ToARGBRow_Any_MSA; @@ -635,6 +659,14 @@ static void ScaleYUVToARGBBilinearUp(int src_width, } } #endif +#if defined(HAS_I422TOARGBROW_LASX) + if (TestCpuFlag(kCpuHasLASX)) { + I422ToARGBRow = I422ToARGBRow_Any_LASX; + if (IS_ALIGNED(src_width, 32)) { + I422ToARGBRow = I422ToARGBRow_LASX; + } + } +#endif void (*InterpolateRow)(uint8_t * dst_argb, const uint8_t* src_argb, ptrdiff_t src_stride, int dst_width, @@ -671,6 +703,14 @@ static void ScaleYUVToARGBBilinearUp(int src_width, } } #endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(dst_width, 8)) { + InterpolateRow = InterpolateRow_LSX; + } + } +#endif void (*ScaleARGBFilterCols)(uint8_t * dst_argb, const uint8_t* src_argb, int dst_width, int x, int dx) = @@ -700,6 +740,14 @@ static void ScaleYUVToARGBBilinearUp(int src_width, } } #endif +#if defined(HAS_SCALEARGBFILTERCOLS_LSX) + if (filtering && TestCpuFlag(kCpuHasLSX)) { + ScaleARGBFilterCols = ScaleARGBFilterCols_Any_LSX; + if (IS_ALIGNED(dst_width, 8)) { + ScaleARGBFilterCols = ScaleARGBFilterCols_LSX; + } + } +#endif #if defined(HAS_SCALEARGBCOLS_SSE2) if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) { ScaleARGBFilterCols = ScaleARGBCols_SSE2; @@ -713,14 +761,6 @@ static void ScaleYUVToARGBBilinearUp(int src_width, } } #endif -#if defined(HAS_SCALEARGBCOLS_MMI) - if (!filtering && TestCpuFlag(kCpuHasMMI)) { - ScaleARGBFilterCols = ScaleARGBCols_Any_MMI; - if (IS_ALIGNED(dst_width, 1)) { - ScaleARGBFilterCols = ScaleARGBCols_MMI; - } - } -#endif #if defined(HAS_SCALEARGBCOLS_MSA) if (!filtering && TestCpuFlag(kCpuHasMSA)) { ScaleARGBFilterCols = ScaleARGBCols_Any_MSA; @@ -728,6 +768,14 @@ static void ScaleYUVToARGBBilinearUp(int src_width, ScaleARGBFilterCols = ScaleARGBCols_MSA; } } +#endif +#if defined(HAS_SCALEARGBCOLS_LSX) + if (!filtering && TestCpuFlag(kCpuHasLSX)) { + ScaleARGBFilterCols = ScaleARGBCols_Any_LSX; + if (IS_ALIGNED(dst_width, 4)) { + ScaleARGBFilterCols = ScaleARGBCols_LSX; + } + } #endif if (!filtering && src_width * 2 == dst_width && x < 0x8000) { ScaleARGBFilterCols = ScaleARGBColsUp2_C; @@ -735,11 +783,6 @@ static void ScaleYUVToARGBBilinearUp(int src_width, if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) { ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2; } -#endif -#if defined(HAS_SCALEARGBCOLSUP2_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) { - ScaleARGBFilterCols = ScaleARGBColsUp2_MMI; - } #endif } @@ -750,19 +793,19 @@ static void ScaleYUVToARGBBilinearUp(int src_width, const int kYShift = 1; // Shift Y by 1 to convert Y plane to UV coordinate. int yi = y >> 16; int uv_yi = yi >> kYShift; - const uint8_t* src_row_y = src_y + yi * src_stride_y; - const uint8_t* src_row_u = src_u + uv_yi * src_stride_u; - const uint8_t* src_row_v = src_v + uv_yi * src_stride_v; + const uint8_t* src_row_y = src_y + yi * (int64_t)src_stride_y; + const uint8_t* src_row_u = src_u + uv_yi * (int64_t)src_stride_u; + const uint8_t* src_row_v = src_v + uv_yi * (int64_t)src_stride_v; // Allocate 2 rows of ARGB. - const int kRowSize = (dst_width * 4 + 31) & ~31; - align_buffer_64(row, kRowSize * 2); + const int row_size = (dst_width * 4 + 31) & ~31; + align_buffer_64(row, row_size * 2); // Allocate 1 row of ARGB for source conversion. align_buffer_64(argb_row, src_width * 4); uint8_t* rowptr = row; - int rowstride = kRowSize; + int rowstride = row_size; int lasty = yi; // TODO(fbarchard): Convert first 2 rows of YUV to ARGB. @@ -790,9 +833,9 @@ static void ScaleYUVToARGBBilinearUp(int src_width, y = max_y; yi = y >> 16; uv_yi = yi >> kYShift; - src_row_y = src_y + yi * src_stride_y; - src_row_u = src_u + uv_yi * src_stride_u; - src_row_v = src_v + uv_yi * src_stride_v; + src_row_y = src_y + yi * (int64_t)src_stride_y; + src_row_u = src_u + uv_yi * (int64_t)src_stride_u; + src_row_v = src_v + uv_yi * (int64_t)src_stride_v; } if (yi != lasty) { // TODO(fbarchard): Convert the clipped region of row. @@ -857,14 +900,6 @@ static void ScaleARGBSimple(int src_width, } } #endif -#if defined(HAS_SCALEARGBCOLS_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleARGBCols = ScaleARGBCols_Any_MMI; - if (IS_ALIGNED(dst_width, 1)) { - ScaleARGBCols = ScaleARGBCols_MMI; - } - } -#endif #if defined(HAS_SCALEARGBCOLS_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleARGBCols = ScaleARGBCols_Any_MSA; @@ -872,6 +907,14 @@ static void ScaleARGBSimple(int src_width, ScaleARGBCols = ScaleARGBCols_MSA; } } +#endif +#if defined(HAS_SCALEARGBCOLS_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + ScaleARGBCols = ScaleARGBCols_Any_LSX; + if (IS_ALIGNED(dst_width, 4)) { + ScaleARGBCols = ScaleARGBCols_LSX; + } + } #endif if (src_width * 2 == dst_width && x < 0x8000) { ScaleARGBCols = ScaleARGBColsUp2_C; @@ -879,17 +922,12 @@ static void ScaleARGBSimple(int src_width, if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) { ScaleARGBCols = ScaleARGBColsUp2_SSE2; } -#endif -#if defined(HAS_SCALEARGBCOLSUP2_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) { - ScaleARGBCols = ScaleARGBColsUp2_MMI; - } #endif } for (j = 0; j < dst_height; ++j) { - ScaleARGBCols(dst_argb, src_argb + (y >> 16) * src_stride, dst_width, x, - dx); + ScaleARGBCols(dst_argb, src_argb + (y >> 16) * (int64_t)src_stride, + dst_width, x, dx); dst_argb += dst_stride; y += dy; } @@ -924,7 +962,7 @@ static void ScaleARGB(const uint8_t* src, // Negative src_height means invert the image. if (src_height < 0) { src_height = -src_height; - src = src + (src_height - 1) * src_stride; + src = src + (src_height - 1) * (int64_t)src_stride; src_stride = -src_stride; } ScaleSlope(src_width, src_height, dst_width, dst_height, filtering, &x, &y, @@ -939,7 +977,7 @@ static void ScaleARGB(const uint8_t* src, if (clip_y) { int64_t clipf = (int64_t)(clip_y)*dy; y += (clipf & 0xffff); - src += (clipf >> 16) * src_stride; + src += (clipf >> 16) * (int64_t)src_stride; dst += clip_y * dst_stride; } @@ -973,8 +1011,8 @@ static void ScaleARGB(const uint8_t* src, filtering = kFilterNone; if (dx == 0x10000 && dy == 0x10000) { // Straight copy. - ARGBCopy(src + (y >> 16) * src_stride + (x >> 16) * 4, src_stride, - dst, dst_stride, clip_width, clip_height); + ARGBCopy(src + (y >> 16) * (int64_t)src_stride + (x >> 16) * 4, + src_stride, dst, dst_stride, clip_width, clip_height); return; } } @@ -983,7 +1021,7 @@ static void ScaleARGB(const uint8_t* src, if (dx == 0x10000 && (x & 0xffff) == 0) { // Arbitrary scale vertically, but unscaled horizontally. ScalePlaneVertical(src_height, clip_width, clip_height, src_stride, - dst_stride, src, dst, x, y, dy, 4, filtering); + dst_stride, src, dst, x, y, dy, /*bpp=*/4, filtering); return; } if (filtering && dy < 65536) { diff --git a/third-party/libyuv/third_party/libyuv/source/scale_common.cc b/third-party/libyuv/third_party/libyuv/source/scale_common.cc index da96d42865..b02bdafd5b 100644 --- a/third-party/libyuv/third_party/libyuv/source/scale_common.cc +++ b/third-party/libyuv/third_party/libyuv/source/scale_common.cc @@ -766,18 +766,18 @@ void ScaleRowDown38_3_Box_16_C(const uint16_t* src_ptr, (src_ptr[0] + src_ptr[1] + src_ptr[2] + src_ptr[stride + 0] + src_ptr[stride + 1] + src_ptr[stride + 2] + src_ptr[stride * 2 + 0] + src_ptr[stride * 2 + 1] + src_ptr[stride * 2 + 2]) * - (65536 / 9) >> + (65536u / 9u) >> 16; dst_ptr[1] = (src_ptr[3] + src_ptr[4] + src_ptr[5] + src_ptr[stride + 3] + src_ptr[stride + 4] + src_ptr[stride + 5] + src_ptr[stride * 2 + 3] + src_ptr[stride * 2 + 4] + src_ptr[stride * 2 + 5]) * - (65536 / 9) >> + (65536u / 9u) >> 16; dst_ptr[2] = (src_ptr[6] + src_ptr[7] + src_ptr[stride + 6] + src_ptr[stride + 7] + src_ptr[stride * 2 + 6] + src_ptr[stride * 2 + 7]) * - (65536 / 6) >> + (65536u / 6u) >> 16; src_ptr += 8; dst_ptr += 3; @@ -820,15 +820,15 @@ void ScaleRowDown38_2_Box_16_C(const uint16_t* src_ptr, for (i = 0; i < dst_width; i += 3) { dst_ptr[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] + src_ptr[stride + 0] + src_ptr[stride + 1] + src_ptr[stride + 2]) * - (65536 / 6) >> + (65536u / 6u) >> 16; dst_ptr[1] = (src_ptr[3] + src_ptr[4] + src_ptr[5] + src_ptr[stride + 3] + src_ptr[stride + 4] + src_ptr[stride + 5]) * - (65536 / 6) >> + (65536u / 6u) >> 16; dst_ptr[2] = (src_ptr[6] + src_ptr[7] + src_ptr[stride + 6] + src_ptr[stride + 7]) * - (65536 / 4) >> + (65536u / 4u) >> 16; src_ptr += 8; dst_ptr += 3; @@ -1465,7 +1465,7 @@ void ScalePlaneVertical(int src_height, int x, int y, int dy, - int bpp, + int bpp, // bytes per pixel. 4 for ARGB. enum FilterMode filtering) { // TODO(fbarchard): Allow higher bpp. int dst_width_bytes = dst_width * bpp; @@ -1503,14 +1503,6 @@ void ScalePlaneVertical(int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - InterpolateRow = InterpolateRow_Any_MMI; - if (IS_ALIGNED(dst_width_bytes, 8)) { - InterpolateRow = InterpolateRow_MMI; - } - } -#endif #if defined(HAS_INTERPOLATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { InterpolateRow = InterpolateRow_Any_MSA; @@ -1518,6 +1510,14 @@ void ScalePlaneVertical(int src_height, InterpolateRow = InterpolateRow_MSA; } } +#endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(dst_width_bytes, 32)) { + InterpolateRow = InterpolateRow_LSX; + } + } #endif for (j = 0; j < dst_height; ++j) { int yi; @@ -1533,6 +1533,7 @@ void ScalePlaneVertical(int src_height, y += dy; } } + void ScalePlaneVertical_16(int src_height, int dst_width, int dst_height, @@ -1543,7 +1544,7 @@ void ScalePlaneVertical_16(int src_height, int x, int y, int dy, - int wpp, + int wpp, /* words per pixel. normally 1 */ enum FilterMode filtering) { // TODO(fbarchard): Allow higher wpp. int dst_width_words = dst_width * wpp; @@ -1559,32 +1560,32 @@ void ScalePlaneVertical_16(int src_height, src_argb += (x >> 16) * wpp; #if defined(HAS_INTERPOLATEROW_16_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_16_SSE2; - if (IS_ALIGNED(dst_width_bytes, 16)) { + InterpolateRow = InterpolateRow_16_Any_SSE2; + if (IS_ALIGNED(dst_width_words, 16)) { InterpolateRow = InterpolateRow_16_SSE2; } } #endif #if defined(HAS_INTERPOLATEROW_16_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - InterpolateRow = InterpolateRow_Any_16_SSSE3; - if (IS_ALIGNED(dst_width_bytes, 16)) { + InterpolateRow = InterpolateRow_16_Any_SSSE3; + if (IS_ALIGNED(dst_width_words, 16)) { InterpolateRow = InterpolateRow_16_SSSE3; } } #endif #if defined(HAS_INTERPOLATEROW_16_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { - InterpolateRow = InterpolateRow_Any_16_AVX2; - if (IS_ALIGNED(dst_width_bytes, 32)) { + InterpolateRow = InterpolateRow_16_Any_AVX2; + if (IS_ALIGNED(dst_width_words, 32)) { InterpolateRow = InterpolateRow_16_AVX2; } } #endif #if defined(HAS_INTERPOLATEROW_16_NEON) if (TestCpuFlag(kCpuHasNEON)) { - InterpolateRow = InterpolateRow_Any_16_NEON; - if (IS_ALIGNED(dst_width_bytes, 16)) { + InterpolateRow = InterpolateRow_16_Any_NEON; + if (IS_ALIGNED(dst_width_words, 8)) { InterpolateRow = InterpolateRow_16_NEON; } } @@ -1604,6 +1605,70 @@ void ScalePlaneVertical_16(int src_height, } } +// Use scale to convert lsb formats to msb, depending how many bits there are: +// 32768 = 9 bits +// 16384 = 10 bits +// 4096 = 12 bits +// 256 = 16 bits +// TODO(fbarchard): change scale to bits +void ScalePlaneVertical_16To8(int src_height, + int dst_width, + int dst_height, + int src_stride, + int dst_stride, + const uint16_t* src_argb, + uint8_t* dst_argb, + int x, + int y, + int dy, + int wpp, /* words per pixel. normally 1 */ + int scale, + enum FilterMode filtering) { + // TODO(fbarchard): Allow higher wpp. + int dst_width_words = dst_width * wpp; + // TODO(https://crbug.com/libyuv/931): Add NEON 32 bit and AVX2 versions. + void (*InterpolateRow_16To8)(uint8_t * dst_argb, const uint16_t* src_argb, + ptrdiff_t src_stride, int scale, int dst_width, + int source_y_fraction) = InterpolateRow_16To8_C; + const int max_y = (src_height > 1) ? ((src_height - 1) << 16) - 1 : 0; + int j; + assert(wpp >= 1 && wpp <= 2); + assert(src_height != 0); + assert(dst_width > 0); + assert(dst_height > 0); + src_argb += (x >> 16) * wpp; + +#if defined(HAS_INTERPOLATEROW_16TO8_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + InterpolateRow_16To8 = InterpolateRow_16To8_Any_NEON; + if (IS_ALIGNED(dst_width, 8)) { + InterpolateRow_16To8 = InterpolateRow_16To8_NEON; + } + } +#endif +#if defined(HAS_INTERPOLATEROW_16TO8_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + InterpolateRow_16To8 = InterpolateRow_16To8_Any_AVX2; + if (IS_ALIGNED(dst_width, 32)) { + InterpolateRow_16To8 = InterpolateRow_16To8_AVX2; + } + } +#endif + for (j = 0; j < dst_height; ++j) { + int yi; + int yf; + if (y > max_y) { + y = max_y; + } + yi = y >> 16; + yf = filtering ? ((y >> 8) & 255) : 0; + InterpolateRow_16To8(dst_argb, src_argb + yi * src_stride, src_stride, + scale, dst_width_words, yf); + dst_argb += dst_stride; + y += dy; + } +} + // Simplify the filtering based on scale factors. enum FilterMode ScaleFilterReduce(int src_width, int src_height, @@ -1653,7 +1718,7 @@ int FixedDiv_C(int num, int div) { return (int)(((int64_t)(num) << 16) / div); } -// Divide num by div and return as 16.16 fixed point result. +// Divide num - 1 by div - 1 and return as 16.16 fixed point result. int FixedDiv1_C(int num, int div) { return (int)((((int64_t)(num) << 16) - 0x00010001) / (div - 1)); } @@ -1696,14 +1761,14 @@ void ScaleSlope(int src_width, if (dst_width <= Abs(src_width)) { *dx = FixedDiv(Abs(src_width), dst_width); *x = CENTERSTART(*dx, -32768); // Subtract 0.5 (32768) to center filter. - } else if (dst_width > 1) { + } else if (src_width > 1 && dst_width > 1) { *dx = FixedDiv1(Abs(src_width), dst_width); *x = 0; } if (dst_height <= src_height) { *dy = FixedDiv(src_height, dst_height); *y = CENTERSTART(*dy, -32768); // Subtract 0.5 (32768) to center filter. - } else if (dst_height > 1) { + } else if (src_height > 1 && dst_height > 1) { *dy = FixedDiv1(src_height, dst_height); *y = 0; } @@ -1712,7 +1777,7 @@ void ScaleSlope(int src_width, if (dst_width <= Abs(src_width)) { *dx = FixedDiv(Abs(src_width), dst_width); *x = CENTERSTART(*dx, -32768); // Subtract 0.5 (32768) to center filter. - } else if (dst_width > 1) { + } else if (src_width > 1 && dst_width > 1) { *dx = FixedDiv1(Abs(src_width), dst_width); *x = 0; } diff --git a/third-party/libyuv/third_party/libyuv/source/scale_gcc.cc b/third-party/libyuv/third_party/libyuv/source/scale_gcc.cc index 279c5e4020..17eeffadfb 100644 --- a/third-party/libyuv/third_party/libyuv/source/scale_gcc.cc +++ b/third-party/libyuv/third_party/libyuv/source/scale_gcc.cc @@ -779,7 +779,7 @@ static const uvec8 kLinearShuffleFar = {2, 3, 0, 1, 6, 7, 4, 5, static const uvec8 kLinearMadd31 = {3, 1, 1, 3, 3, 1, 1, 3, 3, 1, 1, 3, 3, 1, 1, 3}; -#ifdef HAS_SCALEROWUP2LINEAR_SSE2 +#ifdef HAS_SCALEROWUP2_LINEAR_SSE2 void ScaleRowUp2_Linear_SSE2(const uint8_t* src_ptr, uint8_t* dst_ptr, int dst_width) { @@ -833,7 +833,7 @@ void ScaleRowUp2_Linear_SSE2(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_SSE2 +#ifdef HAS_SCALEROWUP2_BILINEAR_SSE2 void ScaleRowUp2_Bilinear_SSE2(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, @@ -949,7 +949,7 @@ void ScaleRowUp2_Bilinear_SSE2(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2LINEAR_12_SSSE3 +#ifdef HAS_SCALEROWUP2_LINEAR_12_SSSE3 void ScaleRowUp2_Linear_12_SSSE3(const uint16_t* src_ptr, uint16_t* dst_ptr, int dst_width) { @@ -999,7 +999,7 @@ void ScaleRowUp2_Linear_12_SSSE3(const uint16_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_12_SSSE3 +#ifdef HAS_SCALEROWUP2_BILINEAR_12_SSSE3 void ScaleRowUp2_Bilinear_12_SSSE3(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, @@ -1094,11 +1094,12 @@ void ScaleRowUp2_Bilinear_12_SSSE3(const uint16_t* src_ptr, : "r"((intptr_t)(src_stride)), // %3 "r"((intptr_t)(dst_stride)), // %4 "m"(kLinearShuffleFar) // %5 - : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"); + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", + "xmm7"); } #endif -#ifdef HAS_SCALEROWUP2LINEAR_16_SSE2 +#ifdef HAS_SCALEROWUP2_LINEAR_16_SSE2 void ScaleRowUp2_Linear_16_SSE2(const uint16_t* src_ptr, uint16_t* dst_ptr, int dst_width) { @@ -1149,7 +1150,7 @@ void ScaleRowUp2_Linear_16_SSE2(const uint16_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_16_SSE2 +#ifdef HAS_SCALEROWUP2_BILINEAR_16_SSE2 void ScaleRowUp2_Bilinear_16_SSE2(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, @@ -1242,7 +1243,7 @@ void ScaleRowUp2_Bilinear_16_SSE2(const uint16_t* src_ptr, "pshufd $0b11011000,%%xmm4,%%xmm4 \n" "movdqu %%xmm4,(%1) \n" // store above "packssdw %%xmm2,%%xmm5 \n" - "pshufd $0b11011000,%%xmm4,%%xmm4 \n" + "pshufd $0b11011000,%%xmm5,%%xmm5 \n" "movdqu %%xmm5,(%1,%4,2) \n" // store below "lea 0x8(%0),%0 \n" @@ -1254,11 +1255,12 @@ void ScaleRowUp2_Bilinear_16_SSE2(const uint16_t* src_ptr, "+r"(dst_width) // %2 : "r"((intptr_t)(src_stride)), // %3 "r"((intptr_t)(dst_stride)) // %4 - : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"); + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", + "xmm7"); } #endif -#ifdef HAS_SCALEROWUP2LINEAR_SSSE3 +#ifdef HAS_SCALEROWUP2_LINEAR_SSSE3 void ScaleRowUp2_Linear_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int dst_width) { @@ -1283,9 +1285,8 @@ void ScaleRowUp2_Linear_SSSE3(const uint8_t* src_ptr, "paddw %%xmm4,%%xmm2 \n" // 3*near+far+2 (hi) "psrlw $2,%%xmm0 \n" // 3/4*near+1/4*far (lo) "psrlw $2,%%xmm2 \n" // 3/4*near+1/4*far (hi) - "vpackuswb %%xmm2,%%xmm0,%%xmm0 \n" - "vmovdqu %%xmm0,(%1) \n" - + "packuswb %%xmm2,%%xmm0 \n" + "movdqu %%xmm0,(%1) \n" "lea 0x8(%0),%0 \n" "lea 0x10(%1),%1 \n" // 8 sample to 16 sample "sub $0x10,%2 \n" @@ -1294,11 +1295,11 @@ void ScaleRowUp2_Linear_SSSE3(const uint8_t* src_ptr, "+r"(dst_ptr), // %1 "+r"(dst_width) // %2 : "m"(kLinearMadd31) // %3 - : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"); + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4"); } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_SSSE3 +#ifdef HAS_SCALEROWUP2_BILINEAR_SSSE3 void ScaleRowUp2_Bilinear_SSSE3(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, @@ -1385,7 +1386,7 @@ void ScaleRowUp2_Bilinear_SSSE3(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2LINEAR_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_AVX2 void ScaleRowUp2_Linear_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, int dst_width) { @@ -1427,7 +1428,7 @@ void ScaleRowUp2_Linear_AVX2(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_AVX2 void ScaleRowUp2_Bilinear_AVX2(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, @@ -1511,7 +1512,7 @@ void ScaleRowUp2_Bilinear_AVX2(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2LINEAR_12_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_12_AVX2 void ScaleRowUp2_Linear_12_AVX2(const uint16_t* src_ptr, uint16_t* dst_ptr, int dst_width) { @@ -1561,7 +1562,7 @@ void ScaleRowUp2_Linear_12_AVX2(const uint16_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_12_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_12_AVX2 void ScaleRowUp2_Bilinear_12_AVX2(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, @@ -1625,7 +1626,7 @@ void ScaleRowUp2_Bilinear_12_AVX2(const uint16_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2LINEAR_16_AVX2 +#ifdef HAS_SCALEROWUP2_LINEAR_16_AVX2 void ScaleRowUp2_Linear_16_AVX2(const uint16_t* src_ptr, uint16_t* dst_ptr, int dst_width) { @@ -1673,7 +1674,7 @@ void ScaleRowUp2_Linear_16_AVX2(const uint16_t* src_ptr, } #endif -#ifdef HAS_SCALEROWUP2BILINEAR_16_AVX2 +#ifdef HAS_SCALEROWUP2_BILINEAR_16_AVX2 void ScaleRowUp2_Bilinear_16_AVX2(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, @@ -2326,13 +2327,18 @@ int FixedDiv1_X86(int num, int div) { return num; } -#ifdef HAS_SCALEUVROWDOWN2BOX_SSSE3 +#if defined(HAS_SCALEUVROWDOWN2BOX_SSSE3) || \ + defined(HAS_SCALEUVROWDOWN2BOX_AVX2) + // Shuffle table for splitting UV into upper and lower part of register. static const uvec8 kShuffleSplitUV = {0u, 2u, 4u, 6u, 8u, 10u, 12u, 14u, 1u, 3u, 5u, 7u, 9u, 11u, 13u, 15u}; static const uvec8 kShuffleMergeUV = {0u, 8u, 2u, 10u, 4u, 12u, 6u, 14u, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80}; +#endif + +#ifdef HAS_SCALEUVROWDOWN2BOX_SSSE3 void ScaleUVRowDown2Box_SSSE3(const uint8_t* src_ptr, ptrdiff_t src_stride, @@ -2418,7 +2424,7 @@ void ScaleUVRowDown2Box_AVX2(const uint8_t* src_ptr, static const uvec8 kUVLinearMadd31 = {3, 1, 3, 1, 1, 3, 1, 3, 3, 1, 3, 1, 1, 3, 1, 3}; -#ifdef HAS_SCALEUVROWUP2LINEAR_SSSE3 +#ifdef HAS_SCALEUVROWUP2_LINEAR_SSSE3 void ScaleUVRowUp2_Linear_SSSE3(const uint8_t* src_ptr, uint8_t* dst_ptr, int dst_width) { @@ -2457,7 +2463,7 @@ void ScaleUVRowUp2_Linear_SSSE3(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_SSSE3 +#ifdef HAS_SCALEUVROWUP2_BILINEAR_SSSE3 void ScaleUVRowUp2_Bilinear_SSSE3(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, @@ -2542,7 +2548,7 @@ void ScaleUVRowUp2_Bilinear_SSSE3(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEUVROWUP2LINEAR_AVX2 +#ifdef HAS_SCALEUVROWUP2_LINEAR_AVX2 void ScaleUVRowUp2_Linear_AVX2(const uint8_t* src_ptr, uint8_t* dst_ptr, @@ -2584,7 +2590,7 @@ void ScaleUVRowUp2_Linear_AVX2(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_AVX2 +#ifdef HAS_SCALEUVROWUP2_BILINEAR_AVX2 void ScaleUVRowUp2_Bilinear_AVX2(const uint8_t* src_ptr, ptrdiff_t src_stride, uint8_t* dst_ptr, @@ -2666,10 +2672,10 @@ void ScaleUVRowUp2_Bilinear_AVX2(const uint8_t* src_ptr, } #endif -#ifdef HAS_SCALEUVROWUP2LINEAR_16_SSE2 -void ScaleUVRowUp2_Linear_16_SSE2(const uint16_t* src_ptr, - uint16_t* dst_ptr, - int dst_width) { +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_SSE41 +void ScaleUVRowUp2_Linear_16_SSE41(const uint16_t* src_ptr, + uint16_t* dst_ptr, + int dst_width) { asm volatile( "pxor %%xmm5,%%xmm5 \n" "pcmpeqd %%xmm4,%%xmm4 \n" @@ -2716,12 +2722,12 @@ void ScaleUVRowUp2_Linear_16_SSE2(const uint16_t* src_ptr, } #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_16_SSE2 -void ScaleUVRowUp2_Bilinear_16_SSE2(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst_ptr, - ptrdiff_t dst_stride, - int dst_width) { +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_SSE41 +void ScaleUVRowUp2_Bilinear_16_SSE41(const uint16_t* src_ptr, + ptrdiff_t src_stride, + uint16_t* dst_ptr, + ptrdiff_t dst_stride, + int dst_width) { asm volatile( "pxor %%xmm7,%%xmm7 \n" "pcmpeqd %%xmm6,%%xmm6 \n" @@ -2809,7 +2815,7 @@ void ScaleUVRowUp2_Bilinear_16_SSE2(const uint16_t* src_ptr, } #endif -#ifdef HAS_SCALEUVROWUP2LINEAR_16_AVX2 +#ifdef HAS_SCALEUVROWUP2_LINEAR_16_AVX2 void ScaleUVRowUp2_Linear_16_AVX2(const uint16_t* src_ptr, uint16_t* dst_ptr, int dst_width) { @@ -2856,7 +2862,7 @@ void ScaleUVRowUp2_Linear_16_AVX2(const uint16_t* src_ptr, } #endif -#ifdef HAS_SCALEUVROWUP2BILINEAR_16_AVX2 +#ifdef HAS_SCALEUVROWUP2_BILINEAR_16_AVX2 void ScaleUVRowUp2_Bilinear_16_AVX2(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst_ptr, diff --git a/third-party/libyuv/third_party/libyuv/source/scale_lsx.cc b/third-party/libyuv/third_party/libyuv/source/scale_lsx.cc new file mode 100644 index 0000000000..bfe5e9fbba --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/source/scale_lsx.cc @@ -0,0 +1,739 @@ +/* + * Copyright 2022 The LibYuv Project Authors. All rights reserved. + * + * Copyright (c) 2022 Loongson Technology Corporation Limited + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "libyuv/scale_row.h" + +#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) +#include "libyuv/loongson_intrinsics.h" + +#ifdef __cplusplus +namespace libyuv { +extern "C" { +#endif + +#define LOAD_DATA(_src, _in, _out) \ + { \ + int _tmp1, _tmp2, _tmp3, _tmp4; \ + DUP4_ARG2(__lsx_vpickve2gr_w, _in, 0, _in, 1, _in, 2, _in, 3, _tmp1, \ + _tmp2, _tmp3, _tmp4); \ + _out = __lsx_vinsgr2vr_w(_out, _src[_tmp1], 0); \ + _out = __lsx_vinsgr2vr_w(_out, _src[_tmp2], 1); \ + _out = __lsx_vinsgr2vr_w(_out, _src[_tmp3], 2); \ + _out = __lsx_vinsgr2vr_w(_out, _src[_tmp4], 3); \ + } + +void ScaleARGBRowDown2_LSX(const uint8_t* src_argb, + ptrdiff_t src_stride, + uint8_t* dst_argb, + int dst_width) { + int x; + int len = dst_width / 4; + (void)src_stride; + __m128i src0, src1, dst0; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1); + dst0 = __lsx_vpickod_w(src1, src0); + __lsx_vst(dst0, dst_argb, 0); + src_argb += 32; + dst_argb += 16; + } +} + +void ScaleARGBRowDown2Linear_LSX(const uint8_t* src_argb, + ptrdiff_t src_stride, + uint8_t* dst_argb, + int dst_width) { + int x; + int len = dst_width / 4; + (void)src_stride; + __m128i src0, src1, tmp0, tmp1, dst0; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1); + tmp0 = __lsx_vpickev_w(src1, src0); + tmp1 = __lsx_vpickod_w(src1, src0); + dst0 = __lsx_vavgr_bu(tmp1, tmp0); + __lsx_vst(dst0, dst_argb, 0); + src_argb += 32; + dst_argb += 16; + } +} + +void ScaleARGBRowDown2Box_LSX(const uint8_t* src_argb, + ptrdiff_t src_stride, + uint8_t* dst_argb, + int dst_width) { + int x; + int len = dst_width / 4; + const uint8_t* s = src_argb; + const uint8_t* t = src_argb + src_stride; + __m128i src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3, dst0; + __m128i reg0, reg1, reg2, reg3; + __m128i shuff = {0x0703060205010400, 0x0F0B0E0A0D090C08}; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, s, 0, s, 16, src0, src1); + DUP2_ARG2(__lsx_vld, t, 0, t, 16, src2, src3); + DUP4_ARG3(__lsx_vshuf_b, src0, src0, shuff, src1, src1, shuff, src2, src2, + shuff, src3, src3, shuff, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2, tmp3, + tmp3, reg0, reg1, reg2, reg3); + DUP2_ARG2(__lsx_vsadd_hu, reg0, reg2, reg1, reg3, reg0, reg1); + dst0 = __lsx_vsrarni_b_h(reg1, reg0, 2); + __lsx_vst(dst0, dst_argb, 0); + s += 32; + t += 32; + dst_argb += 16; + } +} + +void ScaleARGBRowDownEven_LSX(const uint8_t* src_argb, + ptrdiff_t src_stride, + int32_t src_stepx, + uint8_t* dst_argb, + int dst_width) { + int x; + int len = dst_width / 4; + int32_t stepx = src_stepx << 2; + (void)src_stride; + __m128i dst0, dst1, dst2, dst3; + + for (x = 0; x < len; x++) { + dst0 = __lsx_vldrepl_w(src_argb, 0); + src_argb += stepx; + dst1 = __lsx_vldrepl_w(src_argb, 0); + src_argb += stepx; + dst2 = __lsx_vldrepl_w(src_argb, 0); + src_argb += stepx; + dst3 = __lsx_vldrepl_w(src_argb, 0); + src_argb += stepx; + __lsx_vstelm_w(dst0, dst_argb, 0, 0); + __lsx_vstelm_w(dst1, dst_argb, 4, 0); + __lsx_vstelm_w(dst2, dst_argb, 8, 0); + __lsx_vstelm_w(dst3, dst_argb, 12, 0); + dst_argb += 16; + } +} + +void ScaleARGBRowDownEvenBox_LSX(const uint8_t* src_argb, + ptrdiff_t src_stride, + int src_stepx, + uint8_t* dst_argb, + int dst_width) { + int x; + int len = dst_width / 4; + int32_t stepx = src_stepx * 4; + const uint8_t* next_argb = src_argb + src_stride; + __m128i src0, src1, src2, src3; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128i reg0, reg1, dst0; + + for (x = 0; x < len; x++) { + tmp0 = __lsx_vldrepl_d(src_argb, 0); + src_argb += stepx; + tmp1 = __lsx_vldrepl_d(src_argb, 0); + src_argb += stepx; + tmp2 = __lsx_vldrepl_d(src_argb, 0); + src_argb += stepx; + tmp3 = __lsx_vldrepl_d(src_argb, 0); + src_argb += stepx; + tmp4 = __lsx_vldrepl_d(next_argb, 0); + next_argb += stepx; + tmp5 = __lsx_vldrepl_d(next_argb, 0); + next_argb += stepx; + tmp6 = __lsx_vldrepl_d(next_argb, 0); + next_argb += stepx; + tmp7 = __lsx_vldrepl_d(next_argb, 0); + next_argb += stepx; + DUP4_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, + src0, src1, src2, src3); + DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2); + DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3); + DUP2_ARG2(__lsx_vpackev_w, tmp1, tmp0, tmp3, tmp2, reg0, reg1); + DUP2_ARG2(__lsx_vpackod_w, tmp1, tmp0, tmp3, tmp2, tmp4, tmp5); + DUP2_ARG2(__lsx_vadd_h, reg0, tmp4, reg1, tmp5, reg0, reg1); + dst0 = __lsx_vsrarni_b_h(reg1, reg0, 2); + dst0 = __lsx_vshuf4i_b(dst0, 0xD8); + __lsx_vst(dst0, dst_argb, 0); + dst_argb += 16; + } +} + +void ScaleRowDown2_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width) { + int x; + int len = dst_width / 32; + __m128i src0, src1, src2, src3, dst0, dst1; + (void)src_stride; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48, + src0, src1, src2, src3); + DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, dst0, dst1); + __lsx_vst(dst0, dst, 0); + __lsx_vst(dst1, dst, 16); + src_ptr += 64; + dst += 32; + } +} + +void ScaleRowDown2Linear_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width) { + int x; + int len = dst_width / 32; + __m128i src0, src1, src2, src3; + __m128i tmp0, tmp1, tmp2, tmp3, dst0, dst1; + (void)src_stride; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48, + src0, src1, src2, src3); + DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp2); + DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, tmp1, tmp3); + DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp1, tmp2, tmp3, dst0, dst1); + __lsx_vst(dst0, dst, 0); + __lsx_vst(dst1, dst, 16); + src_ptr += 64; + dst += 32; + } +} + +void ScaleRowDown2Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width) { + int x; + int len = dst_width / 32; + const uint8_t* src_nex = src_ptr + src_stride; + __m128i src0, src1, src2, src3, src4, src5, src6, src7; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128i dst0, dst1; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48, + src0, src1, src2, src3); + DUP4_ARG2(__lsx_vld, src_nex, 0, src_nex, 16, src_nex, 32, src_nex, 48, + src4, src5, src6, src7); + DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, + src7, tmp0, tmp2, tmp4, tmp6); + DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, + src7, tmp1, tmp3, tmp5, tmp7); + DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, + tmp0, tmp1, tmp2, tmp3); + DUP2_ARG3(__lsx_vsrarni_b_h, tmp1, tmp0, 2, tmp3, tmp2, 2, dst0, dst1); + __lsx_vst(dst0, dst, 0); + __lsx_vst(dst1, dst, 16); + src_ptr += 64; + src_nex += 64; + dst += 32; + } +} + +void ScaleRowDown4_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width) { + int x; + int len = dst_width / 16; + __m128i src0, src1, src2, src3, tmp0, tmp1, dst0; + (void)src_stride; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48, + src0, src1, src2, src3); + DUP2_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, tmp0, tmp1); + dst0 = __lsx_vpickod_b(tmp1, tmp0); + __lsx_vst(dst0, dst, 0); + src_ptr += 64; + dst += 16; + } +} + +void ScaleRowDown4Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width) { + int x; + int len = dst_width / 16; + const uint8_t* ptr1 = src_ptr + src_stride; + const uint8_t* ptr2 = ptr1 + src_stride; + const uint8_t* ptr3 = ptr2 + src_stride; + __m128i src0, src1, src2, src3, src4, src5, src6, src7; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, dst0; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48, + src0, src1, src2, src3); + DUP4_ARG2(__lsx_vld, ptr1, 0, ptr1, 16, ptr1, 32, ptr1, 48, src4, src5, + src6, src7); + DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, + src7, tmp0, tmp2, tmp4, tmp6); + DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, + src7, tmp1, tmp3, tmp5, tmp7); + DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, + reg0, reg1, reg2, reg3); + DUP4_ARG2(__lsx_vld, ptr2, 0, ptr2, 16, ptr2, 32, ptr2, 48, src0, src1, + src2, src3); + DUP4_ARG2(__lsx_vld, ptr3, 0, ptr3, 16, ptr3, 32, ptr3, 48, src4, src5, + src6, src7); + DUP4_ARG2(__lsx_vaddwev_h_bu, src0, src4, src1, src5, src2, src6, src3, + src7, tmp0, tmp2, tmp4, tmp6); + DUP4_ARG2(__lsx_vaddwod_h_bu, src0, src4, src1, src5, src2, src6, src3, + src7, tmp1, tmp3, tmp5, tmp7); + DUP4_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, + reg4, reg5, reg6, reg7); + DUP4_ARG2(__lsx_vadd_h, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, + reg0, reg1, reg2, reg3); + DUP4_ARG2(__lsx_vhaddw_wu_hu, reg0, reg0, reg1, reg1, reg2, reg2, reg3, + reg3, reg0, reg1, reg2, reg3); + DUP2_ARG3(__lsx_vsrarni_h_w, reg1, reg0, 4, reg3, reg2, 4, tmp0, tmp1); + dst0 = __lsx_vpickev_b(tmp1, tmp0); + __lsx_vst(dst0, dst, 0); + src_ptr += 64; + ptr1 += 64; + ptr2 += 64; + ptr3 += 64; + dst += 16; + } +} + +void ScaleRowDown38_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width) { + int x, len; + __m128i src0, src1, tmp0; + __m128i shuff = {0x13100E0B08060300, 0x000000001E1B1816}; + + assert(dst_width % 3 == 0); + len = dst_width / 12; + (void)src_stride; + + for (x = 0; x < len; x++) { + DUP2_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src0, src1); + tmp0 = __lsx_vshuf_b(src1, src0, shuff); + __lsx_vstelm_d(tmp0, dst, 0, 0); + __lsx_vstelm_w(tmp0, dst, 8, 2); + src_ptr += 32; + dst += 12; + } +} + +void ScaleRowDown38_2_Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width) { + int x, len; + const uint8_t* src_nex = src_ptr + src_stride; + __m128i src0, src1, src2, src3, dst0; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128i reg0, reg1, reg2, reg3; + __m128i shuff = {0x0A08160604120200, 0x000000001E0E0C1A}; + __m128i const_0x2AAA = __lsx_vreplgr2vr_h(0x2AAA); + __m128i const_0x4000 = __lsx_vreplgr2vr_w(0x4000); + + assert((dst_width % 3 == 0) && (dst_width > 0)); + len = dst_width / 12; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_nex, 0, src_nex, 16, src0, + src1, src2, src3); + DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2); + DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3); + DUP2_ARG2(__lsx_vpickev_h, tmp2, tmp0, tmp3, tmp1, reg0, reg1); + DUP2_ARG2(__lsx_vpackod_h, tmp1, tmp0, tmp3, tmp2, reg2, reg3); + tmp4 = __lsx_vpickev_w(reg3, reg2); + tmp5 = __lsx_vadd_h(reg0, reg1); + tmp6 = __lsx_vadd_h(tmp5, tmp4); + tmp7 = __lsx_vmuh_h(tmp6, const_0x2AAA); + tmp0 = __lsx_vpickod_w(reg3, reg2); + tmp1 = __lsx_vhaddw_wu_hu(tmp0, tmp0); + tmp2 = __lsx_vmul_w(tmp1, const_0x4000); + dst0 = __lsx_vshuf_b(tmp2, tmp7, shuff); + __lsx_vstelm_d(dst0, dst_ptr, 0, 0); + __lsx_vstelm_w(dst0, dst_ptr, 8, 2); + src_ptr += 32; + src_nex += 32; + dst_ptr += 12; + } +} + +void ScaleRowDown38_3_Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst_ptr, + int dst_width) { + int x, len; + const uint8_t* ptr1 = src_ptr + src_stride; + const uint8_t* ptr2 = ptr1 + src_stride; + __m128i src0, src1, src2, src3, src4, src5; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128i reg0, reg1, reg2, reg3, dst0; + __m128i zero = __lsx_vldi(0); + __m128i shuff = {0x0A08160604120200, 0x000000001E0E0C1A}; + __m128i const_0x1C71 = __lsx_vreplgr2vr_h(0x1C71); + __m128i const_0x2AAA = __lsx_vreplgr2vr_w(0x2AAA); + + assert((dst_width % 3 == 0) && (dst_width > 0)); + len = dst_width / 12; + + for (x = 0; x < len; x++) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, ptr1, 0, ptr1, 16, src0, src1, + src2, src3); + DUP2_ARG2(__lsx_vld, ptr2, 0, ptr2, 16, src4, src5); + DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2); + DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3); + DUP2_ARG2(__lsx_vpackev_b, zero, src4, zero, src5, tmp4, tmp6); + DUP2_ARG2(__lsx_vpackod_b, zero, src4, zero, src5, tmp5, tmp7); + DUP4_ARG2(__lsx_vadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3, tmp7, + tmp0, tmp1, tmp2, tmp3); + DUP2_ARG2(__lsx_vpickev_h, tmp2, tmp0, tmp3, tmp1, reg0, reg1); + DUP2_ARG2(__lsx_vpackod_h, tmp1, tmp0, tmp3, tmp2, reg2, reg3); + tmp4 = __lsx_vpickev_w(reg3, reg2); + tmp5 = __lsx_vadd_h(reg0, reg1); + tmp6 = __lsx_vadd_h(tmp5, tmp4); + tmp7 = __lsx_vmuh_h(tmp6, const_0x1C71); + tmp0 = __lsx_vpickod_w(reg3, reg2); + tmp1 = __lsx_vhaddw_wu_hu(tmp0, tmp0); + tmp2 = __lsx_vmul_w(tmp1, const_0x2AAA); + dst0 = __lsx_vshuf_b(tmp2, tmp7, shuff); + __lsx_vstelm_d(dst0, dst_ptr, 0, 0); + __lsx_vstelm_w(dst0, dst_ptr, 8, 2); + src_ptr += 32; + ptr1 += 32; + ptr2 += 32; + dst_ptr += 12; + } +} + +void ScaleAddRow_LSX(const uint8_t* src_ptr, uint16_t* dst_ptr, int src_width) { + int x; + int len = src_width / 16; + __m128i src0, tmp0, tmp1, dst0, dst1; + __m128i zero = __lsx_vldi(0); + + assert(src_width > 0); + + for (x = 0; x < len; x++) { + src0 = __lsx_vld(src_ptr, 0); + DUP2_ARG2(__lsx_vld, dst_ptr, 0, dst_ptr, 16, dst0, dst1); + tmp0 = __lsx_vilvl_b(zero, src0); + tmp1 = __lsx_vilvh_b(zero, src0); + DUP2_ARG2(__lsx_vadd_h, dst0, tmp0, dst1, tmp1, dst0, dst1); + __lsx_vst(dst0, dst_ptr, 0); + __lsx_vst(dst1, dst_ptr, 16); + src_ptr += 16; + dst_ptr += 16; + } +} + +void ScaleFilterCols_LSX(uint8_t* dst_ptr, + const uint8_t* src_ptr, + int dst_width, + int x, + int dx) { + int j; + int len = dst_width / 16; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; + __m128i vec0, vec1, dst0; + __m128i vec_x = __lsx_vreplgr2vr_w(x); + __m128i vec_dx = __lsx_vreplgr2vr_w(dx); + __m128i const1 = __lsx_vreplgr2vr_w(0xFFFF); + __m128i const2 = __lsx_vreplgr2vr_w(0x40); + __m128i const_tmp = {0x0000000100000000, 0x0000000300000002}; + + vec0 = __lsx_vmul_w(vec_dx, const_tmp); + vec1 = __lsx_vslli_w(vec_dx, 2); + vec_x = __lsx_vadd_w(vec_x, vec0); + + for (j = 0; j < len; j++) { + tmp0 = __lsx_vsrai_w(vec_x, 16); + tmp4 = __lsx_vand_v(vec_x, const1); + vec_x = __lsx_vadd_w(vec_x, vec1); + tmp1 = __lsx_vsrai_w(vec_x, 16); + tmp5 = __lsx_vand_v(vec_x, const1); + vec_x = __lsx_vadd_w(vec_x, vec1); + tmp2 = __lsx_vsrai_w(vec_x, 16); + tmp6 = __lsx_vand_v(vec_x, const1); + vec_x = __lsx_vadd_w(vec_x, vec1); + tmp3 = __lsx_vsrai_w(vec_x, 16); + tmp7 = __lsx_vand_v(vec_x, const1); + vec_x = __lsx_vadd_w(vec_x, vec1); + DUP4_ARG2(__lsx_vsrai_w, tmp4, 9, tmp5, 9, tmp6, 9, tmp7, 9, tmp4, tmp5, + tmp6, tmp7); + LOAD_DATA(src_ptr, tmp0, reg0); + LOAD_DATA(src_ptr, tmp1, reg1); + LOAD_DATA(src_ptr, tmp2, reg2); + LOAD_DATA(src_ptr, tmp3, reg3); + DUP4_ARG2(__lsx_vaddi_wu, tmp0, 1, tmp1, 1, tmp2, 1, tmp3, 1, tmp0, tmp1, + tmp2, tmp3); + LOAD_DATA(src_ptr, tmp0, reg4); + LOAD_DATA(src_ptr, tmp1, reg5); + LOAD_DATA(src_ptr, tmp2, reg6); + LOAD_DATA(src_ptr, tmp3, reg7); + DUP4_ARG2(__lsx_vsub_w, reg4, reg0, reg5, reg1, reg6, reg2, reg7, reg3, + reg4, reg5, reg6, reg7); + DUP4_ARG2(__lsx_vmul_w, reg4, tmp4, reg5, tmp5, reg6, tmp6, reg7, tmp7, + reg4, reg5, reg6, reg7); + DUP4_ARG2(__lsx_vadd_w, reg4, const2, reg5, const2, reg6, const2, reg7, + const2, reg4, reg5, reg6, reg7); + DUP4_ARG2(__lsx_vsrai_w, reg4, 7, reg5, 7, reg6, 7, reg7, 7, reg4, reg5, + reg6, reg7); + DUP4_ARG2(__lsx_vadd_w, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, + reg0, reg1, reg2, reg3); + DUP2_ARG2(__lsx_vpickev_h, reg1, reg0, reg3, reg2, tmp0, tmp1); + dst0 = __lsx_vpickev_b(tmp1, tmp0); + __lsx_vst(dst0, dst_ptr, 0); + dst_ptr += 16; + } +} + +void ScaleARGBCols_LSX(uint8_t* dst_argb, + const uint8_t* src_argb, + int dst_width, + int x, + int dx) { + const uint32_t* src = (const uint32_t*)src_argb; + uint32_t* dst = (uint32_t*)dst_argb; + int j; + int len = dst_width / 4; + __m128i tmp0, tmp1, tmp2, dst0; + __m128i vec_x = __lsx_vreplgr2vr_w(x); + __m128i vec_dx = __lsx_vreplgr2vr_w(dx); + __m128i const_tmp = {0x0000000100000000, 0x0000000300000002}; + + tmp0 = __lsx_vmul_w(vec_dx, const_tmp); + tmp1 = __lsx_vslli_w(vec_dx, 2); + vec_x = __lsx_vadd_w(vec_x, tmp0); + + for (j = 0; j < len; j++) { + tmp2 = __lsx_vsrai_w(vec_x, 16); + vec_x = __lsx_vadd_w(vec_x, tmp1); + LOAD_DATA(src, tmp2, dst0); + __lsx_vst(dst0, dst, 0); + dst += 4; + } +} + +void ScaleARGBFilterCols_LSX(uint8_t* dst_argb, + const uint8_t* src_argb, + int dst_width, + int x, + int dx) { + const uint32_t* src = (const uint32_t*)src_argb; + int j; + int len = dst_width / 8; + __m128i src0, src1, src2, src3; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; + __m128i vec0, vec1, dst0, dst1; + __m128i vec_x = __lsx_vreplgr2vr_w(x); + __m128i vec_dx = __lsx_vreplgr2vr_w(dx); + __m128i const_tmp = {0x0000000100000000, 0x0000000300000002}; + __m128i const_7f = __lsx_vldi(0x7F); + + vec0 = __lsx_vmul_w(vec_dx, const_tmp); + vec1 = __lsx_vslli_w(vec_dx, 2); + vec_x = __lsx_vadd_w(vec_x, vec0); + + for (j = 0; j < len; j++) { + tmp0 = __lsx_vsrai_w(vec_x, 16); + reg0 = __lsx_vsrai_w(vec_x, 9); + vec_x = __lsx_vadd_w(vec_x, vec1); + tmp1 = __lsx_vsrai_w(vec_x, 16); + reg1 = __lsx_vsrai_w(vec_x, 9); + vec_x = __lsx_vadd_w(vec_x, vec1); + DUP2_ARG2(__lsx_vand_v, reg0, const_7f, reg1, const_7f, reg0, reg1); + DUP2_ARG2(__lsx_vshuf4i_b, reg0, 0, reg1, 0, reg0, reg1); + DUP2_ARG2(__lsx_vxor_v, reg0, const_7f, reg1, const_7f, reg2, reg3); + DUP2_ARG2(__lsx_vilvl_b, reg0, reg2, reg1, reg3, reg4, reg6); + DUP2_ARG2(__lsx_vilvh_b, reg0, reg2, reg1, reg3, reg5, reg7); + LOAD_DATA(src, tmp0, src0); + LOAD_DATA(src, tmp1, src1); + DUP2_ARG2(__lsx_vaddi_wu, tmp0, 1, tmp1, 1, tmp0, tmp1); + LOAD_DATA(src, tmp0, src2); + LOAD_DATA(src, tmp1, src3); + DUP2_ARG2(__lsx_vilvl_b, src2, src0, src3, src1, tmp4, tmp6); + DUP2_ARG2(__lsx_vilvh_b, src2, src0, src3, src1, tmp5, tmp7); + DUP4_ARG2(__lsx_vdp2_h_bu, tmp4, reg4, tmp5, reg5, tmp6, reg6, tmp7, reg7, + tmp0, tmp1, tmp2, tmp3); + DUP2_ARG3(__lsx_vsrani_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, dst0, dst1); + __lsx_vst(dst0, dst_argb, 0); + __lsx_vst(dst1, dst_argb, 16); + dst_argb += 32; + } +} + +void ScaleRowDown34_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* dst, + int dst_width) { + int x; + (void)src_stride; + __m128i src0, src1, src2, src3; + __m128i dst0, dst1, dst2; + __m128i shuff0 = {0x0908070504030100, 0x141311100F0D0C0B}; + __m128i shuff1 = {0x0F0D0C0B09080705, 0x1918171514131110}; + __m128i shuff2 = {0x141311100F0D0C0B, 0x1F1D1C1B19181715}; + + assert((dst_width % 3 == 0) && (dst_width > 0)); + + for (x = 0; x < dst_width; x += 48) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48, + src0, src1, src2, src3); + DUP2_ARG3(__lsx_vshuf_b, src1, src0, shuff0, src2, src1, shuff1, dst0, + dst1); + dst2 = __lsx_vshuf_b(src3, src2, shuff2); + __lsx_vst(dst0, dst, 0); + __lsx_vst(dst1, dst, 16); + __lsx_vst(dst2, dst, 32); + src_ptr += 64; + dst += 48; + } +} + +void ScaleRowDown34_0_Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* d, + int dst_width) { + const uint8_t* src_nex = src_ptr + src_stride; + int x; + __m128i src0, src1, src2, src3, src4, src5, src6, src7; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9; + __m128i tmp10, tmp11, dst0, dst1, dst2; + __m128i const0 = {0x0103030101010103, 0x0101010303010101}; + __m128i const1 = {0x0301010101030301, 0x0103030101010103}; + __m128i const2 = {0x0101010303010101, 0x0301010101030301}; + __m128i shuff0 = {0x0504030202010100, 0x0A09090807060605}; + __m128i shuff1 = {0x0F0E0E0D0D0C0B0A, 0x1514131212111110}; + __m128i shuff2 = {0x0A09090807060605, 0x0F0E0E0D0D0C0B0A}; + __m128i shift0 = {0x0002000200010002, 0x0001000200020001}; + __m128i shift1 = {0x0002000100020002, 0x0002000200010002}; + __m128i shift2 = {0x0001000200020001, 0x0002000100020002}; + + assert((dst_width % 3 == 0) && (dst_width > 0)); + + for (x = 0; x < dst_width; x += 48) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48, + src0, src1, src2, src3); + DUP4_ARG2(__lsx_vld, src_nex, 0, src_nex, 16, src_nex, 32, src_nex, 48, + src4, src5, src6, src7); + DUP4_ARG3(__lsx_vshuf_b, src0, src0, shuff0, src1, src0, shuff1, src1, src1, + shuff2, src2, src2, shuff0, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG3(__lsx_vshuf_b, src3, src2, shuff1, src3, src3, shuff2, src4, src4, + shuff0, src5, src4, shuff1, tmp4, tmp5, tmp6, tmp7); + DUP4_ARG3(__lsx_vshuf_b, src5, src5, shuff2, src6, src6, shuff0, src7, src6, + shuff1, src7, src7, shuff2, tmp8, tmp9, tmp10, tmp11); + DUP4_ARG2(__lsx_vdp2_h_bu, tmp0, const0, tmp1, const1, tmp2, const2, tmp3, + const0, src0, src1, src2, src3); + DUP4_ARG2(__lsx_vdp2_h_bu, tmp4, const1, tmp5, const2, tmp6, const0, tmp7, + const1, src4, src5, src6, src7); + DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1, tmp11, + const2, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vsrar_h, src0, shift0, src1, shift1, src2, shift2, src3, + shift0, src0, src1, src2, src3); + DUP4_ARG2(__lsx_vsrar_h, src4, shift1, src5, shift2, src6, shift0, src7, + shift1, src4, src5, src6, src7); + DUP4_ARG2(__lsx_vsrar_h, tmp0, shift2, tmp1, shift0, tmp2, shift1, tmp3, + shift2, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vslli_h, src0, 1, src1, 1, src2, 1, src3, 1, tmp5, tmp6, + tmp7, tmp8); + DUP2_ARG2(__lsx_vslli_h, src4, 1, src5, 1, tmp9, tmp10); + DUP4_ARG2(__lsx_vadd_h, src0, tmp5, src1, tmp6, src2, tmp7, src3, tmp8, + src0, src1, src2, src3); + DUP2_ARG2(__lsx_vadd_h, src4, tmp9, src5, tmp10, src4, src5); + DUP4_ARG2(__lsx_vadd_h, src0, src6, src1, src7, src2, tmp0, src3, tmp1, + src0, src1, src2, src3); + DUP2_ARG2(__lsx_vadd_h, src4, tmp2, src5, tmp3, src4, src5); + DUP2_ARG3(__lsx_vsrarni_b_h, src1, src0, 2, src3, src2, 2, dst0, dst1); + dst2 = __lsx_vsrarni_b_h(src5, src4, 2); + __lsx_vst(dst0, d, 0); + __lsx_vst(dst1, d, 16); + __lsx_vst(dst2, d, 32); + src_ptr += 64; + src_nex += 64; + d += 48; + } +} + +void ScaleRowDown34_1_Box_LSX(const uint8_t* src_ptr, + ptrdiff_t src_stride, + uint8_t* d, + int dst_width) { + const uint8_t* src_nex = src_ptr + src_stride; + int x; + __m128i src0, src1, src2, src3, src4, src5, src6, src7; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9; + __m128i tmp10, tmp11, dst0, dst1, dst2; + __m128i const0 = {0x0103030101010103, 0x0101010303010101}; + __m128i const1 = {0x0301010101030301, 0x0103030101010103}; + __m128i const2 = {0x0101010303010101, 0x0301010101030301}; + __m128i shuff0 = {0x0504030202010100, 0x0A09090807060605}; + __m128i shuff1 = {0x0F0E0E0D0D0C0B0A, 0x1514131212111110}; + __m128i shuff2 = {0x0A09090807060605, 0x0F0E0E0D0D0C0B0A}; + __m128i shift0 = {0x0002000200010002, 0x0001000200020001}; + __m128i shift1 = {0x0002000100020002, 0x0002000200010002}; + __m128i shift2 = {0x0001000200020001, 0x0002000100020002}; + + assert((dst_width % 3 == 0) && (dst_width > 0)); + + for (x = 0; x < dst_width; x += 48) { + DUP4_ARG2(__lsx_vld, src_ptr, 0, src_ptr, 16, src_ptr, 32, src_ptr, 48, + src0, src1, src2, src3); + DUP4_ARG2(__lsx_vld, src_nex, 0, src_nex, 16, src_nex, 32, src_nex, 48, + src4, src5, src6, src7); + DUP4_ARG3(__lsx_vshuf_b, src0, src0, shuff0, src1, src0, shuff1, src1, src1, + shuff2, src2, src2, shuff0, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG3(__lsx_vshuf_b, src3, src2, shuff1, src3, src3, shuff2, src4, src4, + shuff0, src5, src4, shuff1, tmp4, tmp5, tmp6, tmp7); + DUP4_ARG3(__lsx_vshuf_b, src5, src5, shuff2, src6, src6, shuff0, src7, src6, + shuff1, src7, src7, shuff2, tmp8, tmp9, tmp10, tmp11); + DUP4_ARG2(__lsx_vdp2_h_bu, tmp0, const0, tmp1, const1, tmp2, const2, tmp3, + const0, src0, src1, src2, src3); + DUP4_ARG2(__lsx_vdp2_h_bu, tmp4, const1, tmp5, const2, tmp6, const0, tmp7, + const1, src4, src5, src6, src7); + DUP4_ARG2(__lsx_vdp2_h_bu, tmp8, const2, tmp9, const0, tmp10, const1, tmp11, + const2, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vsrar_h, src0, shift0, src1, shift1, src2, shift2, src3, + shift0, src0, src1, src2, src3); + DUP4_ARG2(__lsx_vsrar_h, src4, shift1, src5, shift2, src6, shift0, src7, + shift1, src4, src5, src6, src7); + DUP4_ARG2(__lsx_vsrar_h, tmp0, shift2, tmp1, shift0, tmp2, shift1, tmp3, + shift2, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vadd_h, src0, src6, src1, src7, src2, tmp0, src3, tmp1, + src0, src1, src2, src3); + DUP2_ARG2(__lsx_vadd_h, src4, tmp2, src5, tmp3, src4, src5); + DUP2_ARG3(__lsx_vsrarni_b_h, src1, src0, 1, src3, src2, 1, dst0, dst1); + dst2 = __lsx_vsrarni_b_h(src5, src4, 1); + __lsx_vst(dst0, d, 0); + __lsx_vst(dst1, d, 16); + __lsx_vst(dst2, d, 32); + src_ptr += 64; + src_nex += 64; + d += 48; + } +} + +#ifdef __cplusplus +} // extern "C" +} // namespace libyuv +#endif + +#endif // !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx) diff --git a/third-party/libyuv/third_party/libyuv/source/scale_rgb.cc b/third-party/libyuv/third_party/libyuv/source/scale_rgb.cc new file mode 100644 index 0000000000..8db59b56fc --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/source/scale_rgb.cc @@ -0,0 +1,66 @@ +/* + * Copyright 2022 The LibYuv Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "libyuv/scale.h" /* For FilterMode */ + +#include +#include + +#include "libyuv/convert_argb.h" +#include "libyuv/convert_from_argb.h" +#include "libyuv/row.h" +#include "libyuv/scale_argb.h" +#include "libyuv/scale_rgb.h" + +#ifdef __cplusplus +namespace libyuv { +extern "C" { +#endif + +// Scale a 24 bit image. +// Converts to ARGB as intermediate step + +LIBYUV_API +int RGBScale(const uint8_t* src_rgb, + int src_stride_rgb, + int src_width, + int src_height, + uint8_t* dst_rgb, + int dst_stride_rgb, + int dst_width, + int dst_height, + enum FilterMode filtering) { + int r; + uint8_t* src_argb = + (uint8_t*)malloc(src_width * src_height * 4 + dst_width * dst_height * 4); + uint8_t* dst_argb = src_argb + src_width * src_height * 4; + + if (!src_argb) { + return 1; + } + + r = RGB24ToARGB(src_rgb, src_stride_rgb, src_argb, src_width * 4, src_width, + src_height); + if (!r) { + r = ARGBScale(src_argb, src_width * 4, src_width, src_height, dst_argb, + dst_width * 4, dst_width, dst_height, filtering); + if (!r) { + r = ARGBToRGB24(dst_argb, dst_width * 4, dst_rgb, dst_stride_rgb, + dst_width, dst_height); + } + } + free(src_argb); + return r; +} + +#ifdef __cplusplus +} // extern "C" +} // namespace libyuv +#endif diff --git a/third-party/libyuv/third_party/libyuv/source/scale_uv.cc b/third-party/libyuv/third_party/libyuv/source/scale_uv.cc index d9a314453e..8bd6b58602 100644 --- a/third-party/libyuv/third_party/libyuv/source/scale_uv.cc +++ b/third-party/libyuv/third_party/libyuv/source/scale_uv.cc @@ -83,9 +83,9 @@ static void ScaleUVDown2(int src_width, assert((dy & 0x1ffff) == 0); // Test vertical scale is multiple of 2. // Advance to odd row, even column. if (filtering == kFilterBilinear) { - src_uv += (y >> 16) * src_stride + (x >> 16) * 2; + src_uv += (y >> 16) * (int64_t)src_stride + (x >> 16) * 2; } else { - src_uv += (y >> 16) * src_stride + ((x >> 16) - 1) * 2; + src_uv += (y >> 16) * (int64_t)src_stride + ((x >> 16) - 1) * 2; } #if defined(HAS_SCALEUVROWDOWN2BOX_SSSE3) @@ -147,22 +147,6 @@ static void ScaleUVDown2(int src_width, } } #endif -#if defined(HAS_SCALEUVROWDOWN2_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleUVRowDown2 = - filtering == kFilterNone - ? ScaleUVRowDown2_Any_MMI - : (filtering == kFilterLinear ? ScaleUVRowDown2Linear_Any_MMI - : ScaleUVRowDown2Box_Any_MMI); - if (IS_ALIGNED(dst_width, 2)) { - ScaleUVRowDown2 = - filtering == kFilterNone - ? ScaleUVRowDown2_MMI - : (filtering == kFilterLinear ? ScaleUVRowDown2Linear_MMI - : ScaleUVRowDown2Box_MMI); - } - } -#endif #if defined(HAS_SCALEUVROWDOWN2_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleUVRowDown2 = @@ -209,14 +193,14 @@ static void ScaleUVDown4Box(int src_width, int dy) { int j; // Allocate 2 rows of UV. - const int kRowSize = (dst_width * 2 * 2 + 15) & ~15; - align_buffer_64(row, kRowSize * 2); + const int row_size = (dst_width * 2 * 2 + 15) & ~15; + align_buffer_64(row, row_size * 2); int row_stride = src_stride * (dy >> 16); void (*ScaleUVRowDown2)(const uint8_t* src_uv, ptrdiff_t src_stride, uint8_t* dst_uv, int dst_width) = ScaleUVRowDown2Box_C; // Advance to odd row, even column. - src_uv += (y >> 16) * src_stride + (x >> 16) * 2; + src_uv += (y >> 16) * (int64_t)src_stride + (x >> 16) * 2; (void)src_width; (void)src_height; (void)dx; @@ -250,9 +234,9 @@ static void ScaleUVDown4Box(int src_width, for (j = 0; j < dst_height; ++j) { ScaleUVRowDown2(src_uv, src_stride, row, dst_width * 2); - ScaleUVRowDown2(src_uv + src_stride * 2, src_stride, row + kRowSize, + ScaleUVRowDown2(src_uv + src_stride * 2, src_stride, row + row_size, dst_width * 2); - ScaleUVRowDown2(row, kRowSize, dst_uv, dst_width); + ScaleUVRowDown2(row, row_size, dst_uv, dst_width); src_uv += row_stride; dst_uv += dst_stride; } @@ -279,7 +263,7 @@ static void ScaleUVDownEven(int src_width, enum FilterMode filtering) { int j; int col_step = dx >> 16; - int row_stride = (dy >> 16) * src_stride; + int row_stride = (dy >> 16) * (int64_t)src_stride; void (*ScaleUVRowDownEven)(const uint8_t* src_uv, ptrdiff_t src_stride, int src_step, uint8_t* dst_uv, int dst_width) = filtering ? ScaleUVRowDownEvenBox_C : ScaleUVRowDownEven_C; @@ -287,7 +271,7 @@ static void ScaleUVDownEven(int src_width, (void)src_height; assert(IS_ALIGNED(src_width, 2)); assert(IS_ALIGNED(src_height, 2)); - src_uv += (y >> 16) * src_stride + (x >> 16) * 2; + src_uv += (y >> 16) * (int64_t)src_stride + (x >> 16) * 2; #if defined(HAS_SCALEUVROWDOWNEVEN_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { ScaleUVRowDownEven = filtering ? ScaleUVRowDownEvenBox_Any_SSSE3 @@ -316,16 +300,6 @@ static void ScaleUVDownEven(int src_width, } } #endif -#if defined(HAS_SCALEUVROWDOWNEVEN_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleUVRowDownEven = - filtering ? ScaleUVRowDownEvenBox_Any_MMI : ScaleUVRowDownEven_Any_MMI; - if (IS_ALIGNED(dst_width, 2)) { - ScaleUVRowDownEven = - filtering ? ScaleUVRowDownEvenBox_MMI : ScaleUVRowDownEven_MMI; - } - } -#endif #if defined(HAS_SCALEUVROWDOWNEVEN_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleUVRowDownEven = @@ -415,6 +389,14 @@ static void ScaleUVBilinearDown(int src_width, } } #endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(clip_src_width, 32)) { + InterpolateRow = InterpolateRow_LSX; + } + } +#endif #if defined(HAS_SCALEUVFILTERCOLS_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) { ScaleUVFilterCols = ScaleUVFilterCols_SSSE3; @@ -447,7 +429,7 @@ static void ScaleUVBilinearDown(int src_width, } for (j = 0; j < dst_height; ++j) { int yi = y >> 16; - const uint8_t* src = src_uv + yi * src_stride; + const uint8_t* src = src_uv + yi * (int64_t)src_stride; if (filtering == kFilterLinear) { ScaleUVFilterCols(dst_uv, src, dst_width, x, dx); } else { @@ -513,14 +495,6 @@ static void ScaleUVBilinearUp(int src_width, } } #endif -#if defined(HAS_INTERPOLATEROW_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - InterpolateRow = InterpolateRow_Any_MMI; - if (IS_ALIGNED(dst_width, 4)) { - InterpolateRow = InterpolateRow_MMI; - } - } -#endif #if defined(HAS_INTERPOLATEROW_MSA) if (TestCpuFlag(kCpuHasMSA)) { InterpolateRow = InterpolateRow_Any_MSA; @@ -528,6 +502,14 @@ static void ScaleUVBilinearUp(int src_width, InterpolateRow = InterpolateRow_MSA; } } +#endif +#if defined(HAS_INTERPOLATEROW_LSX) + if (TestCpuFlag(kCpuHasLSX)) { + InterpolateRow = InterpolateRow_Any_LSX; + if (IS_ALIGNED(dst_width, 16)) { + InterpolateRow = InterpolateRow_LSX; + } + } #endif if (src_width >= 32768) { ScaleUVFilterCols = filtering ? ScaleUVFilterCols64_C : ScaleUVCols64_C; @@ -566,14 +548,6 @@ static void ScaleUVBilinearUp(int src_width, } } #endif -#if defined(HAS_SCALEUVCOLS_MMI) - if (!filtering && TestCpuFlag(kCpuHasMMI)) { - ScaleUVFilterCols = ScaleUVCols_Any_MMI; - if (IS_ALIGNED(dst_width, 1)) { - ScaleUVFilterCols = ScaleUVCols_MMI; - } - } -#endif #if defined(HAS_SCALEUVCOLS_MSA) if (!filtering && TestCpuFlag(kCpuHasMSA)) { ScaleUVFilterCols = ScaleUVCols_Any_MSA; @@ -588,11 +562,6 @@ static void ScaleUVBilinearUp(int src_width, if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(dst_width, 8)) { ScaleUVFilterCols = ScaleUVColsUp2_SSSE3; } -#endif -#if defined(HAS_SCALEUVCOLSUP2_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) { - ScaleUVFilterCols = ScaleUVColsUp2_MMI; - } #endif } @@ -602,14 +571,14 @@ static void ScaleUVBilinearUp(int src_width, { int yi = y >> 16; - const uint8_t* src = src_uv + yi * src_stride; + const uint8_t* src = src_uv + yi * (int64_t)src_stride; // Allocate 2 rows of UV. - const int kRowSize = (dst_width * 2 + 15) & ~15; - align_buffer_64(row, kRowSize * 2); + const int row_size = (dst_width * 2 + 15) & ~15; + align_buffer_64(row, row_size * 2); uint8_t* rowptr = row; - int rowstride = kRowSize; + int rowstride = row_size; int lasty = yi; ScaleUVFilterCols(rowptr, src, dst_width, x, dx); @@ -617,7 +586,9 @@ static void ScaleUVBilinearUp(int src_width, src += src_stride; } ScaleUVFilterCols(rowptr + rowstride, src, dst_width, x, dx); - src += src_stride; + if (src_height > 2) { + src += src_stride; + } for (j = 0; j < dst_height; ++j) { yi = y >> 16; @@ -625,14 +596,16 @@ static void ScaleUVBilinearUp(int src_width, if (y > max_y) { y = max_y; yi = y >> 16; - src = src_uv + yi * src_stride; + src = src_uv + yi * (int64_t)src_stride; } if (yi != lasty) { ScaleUVFilterCols(rowptr, src, dst_width, x, dx); rowptr += rowstride; rowstride = -rowstride; lasty = yi; - src += src_stride; + if ((y + 65536) < max_y) { + src += src_stride; + } } } if (filtering == kFilterLinear) { @@ -690,12 +663,13 @@ void ScaleUVLinearUp2(int src_width, #endif if (dst_height == 1) { - ScaleRowUp(src_uv + ((src_height - 1) / 2) * src_stride, dst_uv, dst_width); + ScaleRowUp(src_uv + ((src_height - 1) / 2) * (int64_t)src_stride, dst_uv, + dst_width); } else { dy = FixedDiv(src_height - 1, dst_height - 1); y = (1 << 15) - 1; for (i = 0; i < dst_height; ++i) { - ScaleRowUp(src_uv + (y >> 16) * src_stride, dst_uv, dst_width); + ScaleRowUp(src_uv + (y >> 16) * (int64_t)src_stride, dst_uv, dst_width); dst_uv += dst_stride; y += dy; } @@ -777,9 +751,9 @@ void ScaleUVLinearUp2_16(int src_width, // This function can only scale up by 2 times horizontally. assert(src_width == ((dst_width + 1) / 2)); -#ifdef HAS_SCALEUVROWUP2LINEAR_16_SSE2 - if (TestCpuFlag(kCpuHasSSE2)) { - ScaleRowUp = ScaleUVRowUp2_Linear_16_Any_SSE2; +#ifdef HAS_SCALEUVROWUP2LINEAR_16_SSE41 + if (TestCpuFlag(kCpuHasSSE41)) { + ScaleRowUp = ScaleUVRowUp2_Linear_16_Any_SSE41; } #endif @@ -796,12 +770,13 @@ void ScaleUVLinearUp2_16(int src_width, #endif if (dst_height == 1) { - ScaleRowUp(src_uv + ((src_height - 1) / 2) * src_stride, dst_uv, dst_width); + ScaleRowUp(src_uv + ((src_height - 1) / 2) * (int64_t)src_stride, dst_uv, + dst_width); } else { dy = FixedDiv(src_height - 1, dst_height - 1); y = (1 << 15) - 1; for (i = 0; i < dst_height; ++i) { - ScaleRowUp(src_uv + (y >> 16) * src_stride, dst_uv, dst_width); + ScaleRowUp(src_uv + (y >> 16) * (int64_t)src_stride, dst_uv, dst_width); dst_uv += dst_stride; y += dy; } @@ -829,9 +804,9 @@ void ScaleUVBilinearUp2_16(int src_width, assert(src_width == ((dst_width + 1) / 2)); assert(src_height == ((dst_height + 1) / 2)); -#ifdef HAS_SCALEUVROWUP2BILINEAR_16_SSE2 - if (TestCpuFlag(kCpuHasSSE2)) { - Scale2RowUp = ScaleUVRowUp2_Bilinear_16_Any_SSE2; +#ifdef HAS_SCALEUVROWUP2BILINEAR_16_SSE41 + if (TestCpuFlag(kCpuHasSSE41)) { + Scale2RowUp = ScaleUVRowUp2_Bilinear_16_Any_SSE41; } #endif @@ -896,14 +871,6 @@ static void ScaleUVSimple(int src_width, } } #endif -#if defined(HAS_SCALEUVCOLS_MMI) - if (TestCpuFlag(kCpuHasMMI)) { - ScaleUVCols = ScaleUVCols_Any_MMI; - if (IS_ALIGNED(dst_width, 1)) { - ScaleUVCols = ScaleUVCols_MMI; - } - } -#endif #if defined(HAS_SCALEUVCOLS_MSA) if (TestCpuFlag(kCpuHasMSA)) { ScaleUVCols = ScaleUVCols_Any_MSA; @@ -918,16 +885,12 @@ static void ScaleUVSimple(int src_width, if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(dst_width, 8)) { ScaleUVCols = ScaleUVColsUp2_SSSE3; } -#endif -#if defined(HAS_SCALEUVCOLSUP2_MMI) - if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(dst_width, 4)) { - ScaleUVCols = ScaleUVColsUp2_MMI; - } #endif } for (j = 0; j < dst_height; ++j) { - ScaleUVCols(dst_uv, src_uv + (y >> 16) * src_stride, dst_width, x, dx); + ScaleUVCols(dst_uv, src_uv + (y >> 16) * (int64_t)src_stride, dst_width, x, + dx); dst_uv += dst_stride; y += dy; } @@ -935,43 +898,43 @@ static void ScaleUVSimple(int src_width, // Copy UV with optional flipping #if HAS_UVCOPY -static int UVCopy(const uint8_t* src_UV, +static int UVCopy(const uint8_t* src_uv, int src_stride_uv, - uint8_t* dst_UV, + uint8_t* dst_uv, int dst_stride_uv, int width, int height) { - if (!src_UV || !dst_UV || width <= 0 || height == 0) { + if (!src_uv || !dst_uv || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. if (height < 0) { height = -height; - src_UV = src_UV + (height - 1) * src_stride_uv; + src_uv = src_uv + (height - 1) * (int64_t)src_stride_uv; src_stride_uv = -src_stride_uv; } - CopyPlane(src_UV, src_stride_uv, dst_UV, dst_stride_uv, width * 2, height); + CopyPlane(src_uv, src_stride_uv, dst_uv, dst_stride_uv, width * 2, height); return 0; } -static int UVCopy_16(const uint16_t* src_UV, +static int UVCopy_16(const uint16_t* src_uv, int src_stride_uv, - uint16_t* dst_UV, + uint16_t* dst_uv, int dst_stride_uv, int width, int height) { - if (!src_UV || !dst_UV || width <= 0 || height == 0) { + if (!src_uv || !dst_uv || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. if (height < 0) { height = -height; - src_UV = src_UV + (height - 1) * src_stride_uv; + src_uv = src_uv + (height - 1) * (int64_t)src_stride_uv; src_stride_uv = -src_stride_uv; } - CopyPlane_16(src_UV, src_stride_uv, dst_UV, dst_stride_uv, width * 2, height); + CopyPlane_16(src_uv, src_stride_uv, dst_uv, dst_stride_uv, width * 2, height); return 0; } #endif // HAS_UVCOPY @@ -1005,7 +968,7 @@ static void ScaleUV(const uint8_t* src, // Negative src_height means invert the image. if (src_height < 0) { src_height = -src_height; - src = src + (src_height - 1) * src_stride; + src = src + (src_height - 1) * (int64_t)src_stride; src_stride = -src_stride; } ScaleSlope(src_width, src_height, dst_width, dst_height, filtering, &x, &y, @@ -1020,7 +983,7 @@ static void ScaleUV(const uint8_t* src, if (clip_y) { int64_t clipf = (int64_t)(clip_y)*dy; y += (clipf & 0xffff); - src += (clipf >> 16) * src_stride; + src += (clipf >> 16) * (int64_t)src_stride; dst += clip_y * dst_stride; } @@ -1061,8 +1024,8 @@ static void ScaleUV(const uint8_t* src, #ifdef HAS_UVCOPY if (dx == 0x10000 && dy == 0x10000) { // Straight copy. - UVCopy(src + (y >> 16) * src_stride + (x >> 16) * 2, src_stride, dst, - dst_stride, clip_width, clip_height); + UVCopy(src + (y >> 16) * (int64_t)src_stride + (x >> 16) * 2, + src_stride, dst, dst_stride, clip_width, clip_height); return; } #endif @@ -1073,7 +1036,7 @@ static void ScaleUV(const uint8_t* src, if (dx == 0x10000 && (x & 0xffff) == 0) { // Arbitrary scale vertically, but unscaled horizontally. ScalePlaneVertical(src_height, clip_width, clip_height, src_stride, - dst_stride, src, dst, x, y, dy, 4, filtering); + dst_stride, src, dst, x, y, dy, /*bpp=*/2, filtering); return; } if (filtering && (dst_width + 1) / 2 == src_width) { @@ -1155,7 +1118,7 @@ int UVScale_16(const uint16_t* src_uv, // Negative src_height means invert the image. if (src_height < 0) { src_height = -src_height; - src_uv = src_uv + (src_height - 1) * src_stride_uv; + src_uv = src_uv + (src_height - 1) * (int64_t)src_stride_uv; src_stride_uv = -src_stride_uv; } src_width = Abs(src_width); @@ -1163,12 +1126,13 @@ int UVScale_16(const uint16_t* src_uv, #ifdef HAS_UVCOPY if (!filtering && src_width == dst_width && (src_height % dst_height == 0)) { if (dst_height == 1) { - UVCopy_16(src_uv + ((src_height - 1) / 2) * src_stride_uv, src_stride_uv, - dst_uv, dst_stride_uv, dst_width, dst_height); + UVCopy_16(src_uv + ((src_height - 1) / 2) * (int64_t)src_stride_uv, + src_stride_uv, dst_uv, dst_stride_uv, dst_width, dst_height); } else { dy = src_height / dst_height; - UVCopy_16(src_uv + src_stride_uv * ((dy - 1) / 2), src_stride_uv * dy, - dst_uv, dst_stride_uv, dst_width, dst_height); + UVCopy_16(src_uv + ((dy - 1) / 2) * (int64_t)src_stride_uv, + dy * (int64_t)src_stride_uv, dst_uv, dst_stride_uv, dst_width, + dst_height); } return 0; diff --git a/third-party/libyuv/third_party/libyuv/tools_libyuv/autoroller/roll_deps.py b/third-party/libyuv/third_party/libyuv/tools_libyuv/autoroller/roll_deps.py index 5b3cf8d6ee..5496e42403 100755 --- a/third-party/libyuv/third_party/libyuv/tools_libyuv/autoroller/roll_deps.py +++ b/third-party/libyuv/third_party/libyuv/tools_libyuv/autoroller/roll_deps.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright 2017 The LibYuv Project Authors. All rights reserved. # # Use of this source code is governed by a BSD-style license @@ -11,7 +12,6 @@ # https://webrtc.googlesource.com/src/+/master/tools_webrtc/autoroller/roll_deps.py # customized for libyuv. - """Script to automatically roll dependencies in the libyuv DEPS file.""" import argparse @@ -22,7 +22,7 @@ import os import re import subprocess import sys -import urllib2 +import urllib.request # Skip these dependencies (list without solution name prefix). @@ -78,7 +78,7 @@ def ParseDepsDict(deps_content): def ParseLocalDepsFile(filename): with open(filename, 'rb') as f: - deps_content = f.read() + deps_content = f.read().decode('utf-8') return ParseDepsDict(deps_content) @@ -98,7 +98,7 @@ def ParseCommitPosition(commit_message): def _RunCommand(command, working_dir=None, ignore_exit_code=False, - extra_env=None): + extra_env=None, input_data=None): """Runs a command and returns the output from that command. If the command fails (exit code != 0), the function will exit the process. @@ -113,12 +113,14 @@ def _RunCommand(command, working_dir=None, ignore_exit_code=False, assert all(isinstance(value, str) for value in extra_env.values()) logging.debug('extra env: %s', extra_env) env.update(extra_env) - p = subprocess.Popen(command, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, env=env, - cwd=working_dir, universal_newlines=True) - std_output = p.stdout.read() - err_output = p.stderr.read() - p.wait() + p = subprocess.Popen(command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env, + cwd=working_dir, + universal_newlines=True) + std_output, err_output = p.communicate(input_data) p.stdout.close() p.stderr.close() if not ignore_exit_code and p.returncode != 0: @@ -154,7 +156,7 @@ def _ReadGitilesContent(url): # Download and decode BASE64 content until # https://code.google.com/p/gitiles/issues/detail?id=7 is fixed. base64_content = ReadUrlContent(url + '?format=TEXT') - return base64.b64decode(base64_content[0]) + return base64.b64decode(base64_content[0]).decode('utf-8') def ReadRemoteCrFile(path_below_src, revision): @@ -170,7 +172,7 @@ def ReadRemoteCrCommit(revision): def ReadUrlContent(url): """Connect to a remote host and read the contents. Returns a list of lines.""" - conn = urllib2.urlopen(url) + conn = urllib.request.urlopen(url) try: return conn.readlines() except IOError as e: @@ -193,7 +195,7 @@ def GetMatchingDepsEntries(depsentry_dict, dir_path): A list of DepsEntry objects. """ result = [] - for path, depsentry in depsentry_dict.iteritems(): + for path, depsentry in depsentry_dict.items(): if path == dir_path: result.append(depsentry) else: @@ -203,26 +205,24 @@ def GetMatchingDepsEntries(depsentry_dict, dir_path): result.append(depsentry) return result - def BuildDepsentryDict(deps_dict): - """Builds a dict of paths to DepsEntry objects from a raw parsed deps dict.""" + """Builds a dict of paths to DepsEntry objects from a raw deps dict.""" result = {} + def AddDepsEntries(deps_subdict): - for path, deps_url_spec in deps_subdict.iteritems(): - # The deps url is either an URL and a condition, or just the URL. + for path, deps_url_spec in deps_subdict.items(): if isinstance(deps_url_spec, dict): if deps_url_spec.get('dep_type') == 'cipd': continue deps_url = deps_url_spec['url'] else: deps_url = deps_url_spec - - if not result.has_key(path): + if not path in result: url, revision = deps_url.split('@') if deps_url else (None, None) result[path] = DepsEntry(path, url, revision) AddDepsEntries(deps_dict['deps']) - for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']: + for deps_os in ['win', 'mac', 'linux', 'android', 'ios', 'unix']: AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {})) return result @@ -245,7 +245,7 @@ def CalculateChangedDeps(libyuv_deps, new_cr_deps): result = [] libyuv_entries = BuildDepsentryDict(libyuv_deps) new_cr_entries = BuildDepsentryDict(new_cr_deps) - for path, libyuv_deps_entry in libyuv_entries.iteritems(): + for path, libyuv_deps_entry in libyuv_entries.items(): if path in DONT_AUTOROLL_THESE: continue cr_deps_entry = new_cr_entries.get(path) @@ -277,7 +277,7 @@ def CalculateChangedClang(new_cr_rev): return match.group(1) raise RollError('Could not parse Clang revision from:\n' + '\n'.join(' ' + l for l in lines)) - with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f: + with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'r') as f: current_lines = f.readlines() current_rev = GetClangRev(current_lines) @@ -335,10 +335,10 @@ def UpdateDepsFile(deps_filename, old_cr_revision, new_cr_revision, # Update the chromium_revision variable. with open(deps_filename, 'rb') as deps_file: - deps_content = deps_file.read() + deps_content = deps_file.read().decode('utf-8') deps_content = deps_content.replace(old_cr_revision, new_cr_revision) with open(deps_filename, 'wb') as deps_file: - deps_file.write(deps_content) + deps_file.write(deps_content.encode('utf-8')) # Update each individual DEPS entry. for dep in changed_deps: @@ -415,13 +415,14 @@ def _UploadCL(commit_queue_mode): - 1: Run trybots but do not submit to CQ. - 0: Skip CQ, upload only. """ - cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks', '--send-mail'] + cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks'] if commit_queue_mode >= 2: logging.info('Sending the CL to the CQ...') - cmd.extend(['--use-commit-queue']) + cmd.extend(['-o', 'label=Bot-Commit+1']) + cmd.extend(['-o', 'label=Commit-Queue+2']) elif commit_queue_mode >= 1: logging.info('Starting CQ dry run...') - cmd.extend(['--cq-dry-run']) + cmd.extend(['-o', 'label=Commit-Queue+1']) extra_env = { 'EDITOR': 'true', 'SKIP_GCE_AUTH_FOR_GIT': '1', diff --git a/third-party/libyuv/third_party/libyuv/tools_libyuv/autoroller/unittests/roll_deps_test.py b/third-party/libyuv/third_party/libyuv/tools_libyuv/autoroller/unittests/roll_deps_test.py index a7e3f8a87e..af86bdd586 100755 --- a/third-party/libyuv/third_party/libyuv/tools_libyuv/autoroller/unittests/roll_deps_test.py +++ b/third-party/libyuv/third_party/libyuv/tools_libyuv/autoroller/unittests/roll_deps_test.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright 2017 The LibYuv Project Authors. All rights reserved. # # Use of this source code is governed by a BSD-style license @@ -14,15 +15,13 @@ import sys import tempfile import unittest +import roll_deps +from roll_deps import CalculateChangedDeps, GetMatchingDepsEntries, \ + ParseDepsDict, ParseLocalDepsFile, UpdateDepsFile SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PARENT_DIR = os.path.join(SCRIPT_DIR, os.pardir) sys.path.append(PARENT_DIR) -import roll_deps # pylint: disable=wrong-import-position -from roll_deps import CalculateChangedDeps, GetMatchingDepsEntries, \ - ParseDepsDict, ParseLocalDepsFile, \ - UpdateDepsFile # pylint: disable=wrong-import-position - TEST_DATA_VARS = { 'chromium_git': 'https://chromium.googlesource.com', @@ -46,7 +45,7 @@ class TestError(Exception): pass -class FakeCmd(object): +class FakeCmd(): def __init__(self): self.expectations = [] @@ -86,43 +85,43 @@ class TestRollChromiumRevision(unittest.TestCase): def testVarLookup(self): local_scope = {'foo': 'wrong', 'vars': {'foo': 'bar'}} lookup = roll_deps.VarLookup(local_scope) - self.assertEquals(lookup('foo'), 'bar') + self.assertEqual(lookup('foo'), 'bar') def testUpdateDepsFile(self): new_rev = 'aaaaabbbbbcccccdddddeeeeefffff0000011111' current_rev = TEST_DATA_VARS['chromium_revision'] UpdateDepsFile(self._libyuv_depsfile, current_rev, new_rev, []) - with open(self._libyuv_depsfile) as deps_file: + with open(self._libyuv_depsfile, 'r') as deps_file: deps_contents = deps_file.read() self.assertTrue(new_rev in deps_contents, 'Failed to find %s in\n%s' % (new_rev, deps_contents)) def testParseDepsDict(self): - with open(self._libyuv_depsfile) as deps_file: + with open(self._libyuv_depsfile, 'r') as deps_file: deps_contents = deps_file.read() local_scope = ParseDepsDict(deps_contents) vars_dict = local_scope['vars'] def assertVar(variable_name): - self.assertEquals(vars_dict[variable_name], TEST_DATA_VARS[variable_name]) + self.assertEqual(vars_dict[variable_name], TEST_DATA_VARS[variable_name]) assertVar('chromium_git') assertVar('chromium_revision') - self.assertEquals(len(local_scope['deps']), 3) + self.assertEqual(len(local_scope['deps']), 3) def testGetMatchingDepsEntriesReturnsPathInSimpleCase(self): entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing/gtest') - self.assertEquals(len(entries), 1) - self.assertEquals(entries[0], DEPS_ENTRIES['src/testing/gtest']) + self.assertEqual(len(entries), 1) + self.assertEqual(entries[0], DEPS_ENTRIES['src/testing/gtest']) def testGetMatchingDepsEntriesHandlesSimilarStartingPaths(self): entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing') - self.assertEquals(len(entries), 2) + self.assertEqual(len(entries), 2) def testGetMatchingDepsEntriesHandlesTwoPathsWithIdenticalFirstParts(self): entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/build') - self.assertEquals(len(entries), 1) - self.assertEquals(entries[0], DEPS_ENTRIES['src/build']) + self.assertEqual(len(entries), 1) + self.assertEqual(entries[0], DEPS_ENTRIES['src/build']) def testCalculateChangedDeps(self): _SetupGitLsRemoteCall(self.fake, @@ -130,14 +129,14 @@ class TestRollChromiumRevision(unittest.TestCase): libyuv_deps = ParseLocalDepsFile(self._libyuv_depsfile) new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile) changed_deps = CalculateChangedDeps(libyuv_deps, new_cr_deps) - self.assertEquals(len(changed_deps), 2) - self.assertEquals(changed_deps[0].path, 'src/build') - self.assertEquals(changed_deps[0].current_rev, BUILD_OLD_REV) - self.assertEquals(changed_deps[0].new_rev, BUILD_NEW_REV) + self.assertEqual(len(changed_deps), 2) + self.assertEqual(changed_deps[0].path, 'src/build') + self.assertEqual(changed_deps[0].current_rev, BUILD_OLD_REV) + self.assertEqual(changed_deps[0].new_rev, BUILD_NEW_REV) - self.assertEquals(changed_deps[1].path, 'src/buildtools') - self.assertEquals(changed_deps[1].current_rev, BUILDTOOLS_OLD_REV) - self.assertEquals(changed_deps[1].new_rev, BUILDTOOLS_NEW_REV) + self.assertEqual(changed_deps[1].path, 'src/buildtools') + self.assertEqual(changed_deps[1].current_rev, BUILDTOOLS_OLD_REV) + self.assertEqual(changed_deps[1].new_rev, BUILDTOOLS_NEW_REV) def _SetupGitLsRemoteCall(cmd_fake, url, revision): diff --git a/third-party/libyuv/third_party/libyuv/tools_libyuv/get_landmines.py b/third-party/libyuv/third_party/libyuv/tools_libyuv/get_landmines.py index c554f04a39..8b33483e64 100755 --- a/third-party/libyuv/third_party/libyuv/tools_libyuv/get_landmines.py +++ b/third-party/libyuv/third_party/libyuv/tools_libyuv/get_landmines.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + # Copyright 2016 The LibYuv Project Authors. All rights reserved. # # Use of this source code is governed by a BSD-style license @@ -25,8 +26,8 @@ def print_landmines(): # dependency problems, fix the dependency problems instead of adding a # landmine. # See the Chromium version in src/build/get_landmines.py for usage examples. - print 'Clobber to remove GYP artifacts after switching bots to GN.' - print 'Another try to remove GYP artifacts after switching bots to GN.' + print('Clobber to remove GYP artifacts after switching bots to GN.') + print('Another try to remove GYP artifacts after switching bots to GN.') def main(): diff --git a/third-party/libyuv/third_party/libyuv/unit_test/color_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/color_test.cc index e2d037ff79..01267ff1ed 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/color_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/color_test.cc @@ -22,7 +22,8 @@ namespace libyuv { // TODO(fbarchard): clang x86 has a higher accuracy YUV to RGB. // Port to Visual C and other CPUs -#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__)) +#if !defined(LIBYUV_BIT_EXACT) && !defined(LIBYUV_DISABLE_X86) && \ + (defined(__x86_64__) || defined(__i386__)) #define ERROR_FULL 5 #define ERROR_J420 4 #else @@ -579,28 +580,28 @@ TEST_F(LibYUVColorTest, TestGreyYUV) { static void PrintHistogram(int rh[256], int gh[256], int bh[256]) { int i; - printf("hist"); + printf("hist "); for (i = 0; i < 256; ++i) { if (rh[i] || gh[i] || bh[i]) { - printf("\t%8d", i - 128); + printf(" %8d", i - 128); } } - printf("\nred"); + printf("\nred "); for (i = 0; i < 256; ++i) { if (rh[i] || gh[i] || bh[i]) { - printf("\t%8d", rh[i]); + printf(" %8d", rh[i]); } } printf("\ngreen"); for (i = 0; i < 256; ++i) { if (rh[i] || gh[i] || bh[i]) { - printf("\t%8d", gh[i]); + printf(" %8d", gh[i]); } } - printf("\nblue"); + printf("\nblue "); for (i = 0; i < 256; ++i) { if (rh[i] || gh[i] || bh[i]) { - printf("\t%8d", bh[i]); + printf(" %8d", bh[i]); } } printf("\n"); @@ -608,10 +609,10 @@ static void PrintHistogram(int rh[256], int gh[256], int bh[256]) { // Step by 5 on inner loop goes from 0 to 255 inclusive. // Set to 1 for better converage. 3, 5 or 17 for faster testing. -#ifdef ENABLE_SLOW_TESTS -#define FASTSTEP 1 -#else +#ifdef DISABLE_SLOW_TESTS #define FASTSTEP 5 +#else +#define FASTSTEP 1 #endif // BT.601 limited range. diff --git a/third-party/libyuv/third_party/libyuv/unit_test/convert_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/convert_test.cc index 3855838381..47eff2ece2 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/convert_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/convert_test.cc @@ -33,9 +33,14 @@ // Some functions fail on big endian. Enable these tests on all cpus except // PowerPC, but they are not optimized so disabled by default. -#if !defined(__powerpc__) && defined(ENABLE_SLOW_TESTS) +#if !defined(DISABLE_SLOW_TESTS) && !defined(__powerpc__) #define LITTLE_ENDIAN_ONLY_TEST 1 #endif +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +// SLOW TESTS are those that are unoptimized C code. +// FULL TESTS are optimized but test many variations of the same code. +#define ENABLE_FULL_TESTS +#endif namespace libyuv { @@ -140,7 +145,7 @@ namespace libyuv { benchmark_width_ + 1, _Any, +, 0, SRC_DEPTH) \ TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, DST_SUBSAMP_Y, \ - benchmark_width_, _Unaligned, +, 1, SRC_DEPTH) \ + benchmark_width_, _Unaligned, +, 2, SRC_DEPTH) \ TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, DST_SUBSAMP_Y, \ benchmark_width_, _Invert, -, 0, SRC_DEPTH) \ @@ -173,6 +178,7 @@ TESTPLANARTOP(I210, uint16_t, 2, 2, 1, I010, uint16_t, 2, 2, 2, 10) TESTPLANARTOP(I412, uint16_t, 2, 1, 1, I012, uint16_t, 2, 2, 2, 12) TESTPLANARTOP(I212, uint16_t, 2, 2, 1, I012, uint16_t, 2, 2, 2, 12) TESTPLANARTOP(I010, uint16_t, 2, 2, 2, I420, uint8_t, 1, 2, 2, 10) +TESTPLANARTOP(I210, uint16_t, 2, 2, 1, I420, uint8_t, 1, 2, 2, 10) TESTPLANARTOP(I210, uint16_t, 2, 2, 1, I422, uint8_t, 1, 2, 1, 10) TESTPLANARTOP(I410, uint16_t, 2, 1, 1, I444, uint8_t, 1, 1, 1, 10) TESTPLANARTOP(I012, uint16_t, 2, 2, 2, I420, uint8_t, 1, 2, 2, 12) @@ -274,7 +280,7 @@ TESTPLANARTOP(I412, uint16_t, 2, 1, 1, I444, uint8_t, 1, 1, 1, 12) _Any, +, 0, PN, OFF_U, OFF_V) \ TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, \ - _Unaligned, +, 1, PN, OFF_U, OFF_V) \ + _Unaligned, +, 2, PN, OFF_U, OFF_V) \ TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, \ -, 0, PN, OFF_U, OFF_V) \ @@ -285,6 +291,8 @@ TESTPLANARTOP(I412, uint16_t, 2, 1, 1, I444, uint8_t, 1, 1, 1, 12) TESTAPLANARTOP(Android420, I420, 1, 0, 0, 2, 2, I420, 2, 2) TESTAPLANARTOP(Android420, NV12, 2, 0, 1, 2, 2, I420, 2, 2) TESTAPLANARTOP(Android420, NV21, 2, 1, 0, 2, 2, I420, 2, 2) +#undef TESTAPLANARTOP +#undef TESTAPLANARTOPI // wrapper to keep API the same int I400ToNV21(const uint8_t* src_y, @@ -389,7 +397,7 @@ int I400ToNV21(const uint8_t* src_y, DST_SUBSAMP_Y, benchmark_width_ + 1, _Any, +, 0, SRC_DEPTH) \ TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ - DST_SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1, \ + DST_SUBSAMP_Y, benchmark_width_, _Unaligned, +, 2, \ SRC_DEPTH) \ TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ @@ -412,7 +420,7 @@ TESTPLANARTOBP(I212, uint16_t, 2, 2, 1, P212, uint16_t, 2, 2, 1, 12) #define TESTBIPLANARTOBPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, \ DST_SUBSAMP_X, DST_SUBSAMP_Y, W1280, N, NEG, OFF, \ - DOY, SRC_DEPTH) \ + DOY, SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) \ TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ static_assert(SRC_BPC == 1 || SRC_BPC == 2, "SRC BPC unsupported"); \ static_assert(DST_BPC == 1 || DST_BPC == 2, "DST BPC unsupported"); \ @@ -427,12 +435,17 @@ TESTPLANARTOBP(I212, uint16_t, 2, 2, 1, P212, uint16_t, 2, 2, 1, 12) const int kWidth = W1280; \ const int kHeight = benchmark_height_; \ const int kSrcHalfWidth = SUBSAMPLE(kWidth, SRC_SUBSAMP_X); \ - const int kSrcHalfHeight = SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); \ const int kDstHalfWidth = SUBSAMPLE(kWidth, DST_SUBSAMP_X); \ const int kDstHalfHeight = SUBSAMPLE(kHeight, DST_SUBSAMP_Y); \ - align_buffer_page_end(src_y, kWidth* kHeight* SRC_BPC + OFF); \ - align_buffer_page_end(src_uv, \ - 2 * kSrcHalfWidth * kSrcHalfHeight * SRC_BPC + OFF); \ + const int kPaddedWidth = (kWidth + (TILE_WIDTH - 1)) & ~(TILE_WIDTH - 1); \ + const int kPaddedHeight = \ + (kHeight + (TILE_HEIGHT - 1)) & ~(TILE_HEIGHT - 1); \ + const int kSrcHalfPaddedWidth = SUBSAMPLE(kPaddedWidth, SRC_SUBSAMP_X); \ + const int kSrcHalfPaddedHeight = SUBSAMPLE(kPaddedHeight, SRC_SUBSAMP_Y); \ + align_buffer_page_end(src_y, kPaddedWidth* kPaddedHeight* SRC_BPC + OFF); \ + align_buffer_page_end( \ + src_uv, \ + 2 * kSrcHalfPaddedWidth * kSrcHalfPaddedHeight * SRC_BPC + OFF); \ align_buffer_page_end(dst_y_c, kWidth* kHeight* DST_BPC); \ align_buffer_page_end(dst_uv_c, \ 2 * kDstHalfWidth * kDstHalfHeight * DST_BPC); \ @@ -441,11 +454,11 @@ TESTPLANARTOBP(I212, uint16_t, 2, 2, 1, P212, uint16_t, 2, 2, 1, 12) 2 * kDstHalfWidth * kDstHalfHeight * DST_BPC); \ SRC_T* src_y_p = reinterpret_cast(src_y + OFF); \ SRC_T* src_uv_p = reinterpret_cast(src_uv + OFF); \ - for (int i = 0; i < kWidth * kHeight; ++i) { \ + for (int i = 0; i < kPaddedWidth * kPaddedHeight; ++i) { \ src_y_p[i] = \ (fastrand() & (((SRC_T)(-1)) << ((8 * SRC_BPC) - SRC_DEPTH))); \ } \ - for (int i = 0; i < kSrcHalfWidth * kSrcHalfHeight * 2; ++i) { \ + for (int i = 0; i < kSrcHalfPaddedWidth * kSrcHalfPaddedHeight * 2; ++i) { \ src_uv_p[i] = \ (fastrand() & (((SRC_T)(-1)) << ((8 * SRC_BPC) - SRC_DEPTH))); \ } \ @@ -490,136 +503,148 @@ TESTPLANARTOBP(I212, uint16_t, 2, 2, 1, P212, uint16_t, 2, 2, 1, 12) #define TESTBIPLANARTOBP(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, \ - DST_SUBSAMP_X, DST_SUBSAMP_Y, SRC_DEPTH) \ + DST_SUBSAMP_X, DST_SUBSAMP_Y, SRC_DEPTH, TILE_WIDTH, \ + TILE_HEIGHT) \ TESTBIPLANARTOBPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ DST_SUBSAMP_Y, benchmark_width_ + 1, _Any, +, 0, 1, \ - SRC_DEPTH) \ + SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) \ TESTBIPLANARTOBPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ - DST_SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1, 1, \ - SRC_DEPTH) \ + DST_SUBSAMP_Y, benchmark_width_, _Unaligned, +, 2, 1, \ + SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) \ TESTBIPLANARTOBPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ DST_SUBSAMP_Y, benchmark_width_, _Invert, -, 0, 1, \ - SRC_DEPTH) \ + SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) \ TESTBIPLANARTOBPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ - DST_SUBSAMP_Y, benchmark_width_, _Opt, +, 0, 1, SRC_DEPTH) \ + DST_SUBSAMP_Y, benchmark_width_, _Opt, +, 0, 1, SRC_DEPTH, \ + TILE_WIDTH, TILE_HEIGHT) \ TESTBIPLANARTOBPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ DST_SUBSAMP_Y, benchmark_width_, _NullY, +, 0, 0, \ - SRC_DEPTH) + SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) -TESTBIPLANARTOBP(NV21, uint8_t, 1, 2, 2, NV12, uint8_t, 1, 2, 2, 8) -TESTBIPLANARTOBP(NV12, uint8_t, 1, 2, 2, NV12Mirror, uint8_t, 1, 2, 2, 8) -TESTBIPLANARTOBP(NV12, uint8_t, 1, 2, 2, NV24, uint8_t, 1, 1, 1, 8) -TESTBIPLANARTOBP(NV16, uint8_t, 1, 2, 1, NV24, uint8_t, 1, 1, 1, 8) -TESTBIPLANARTOBP(P010, uint16_t, 2, 2, 2, P410, uint16_t, 2, 1, 1, 10) -TESTBIPLANARTOBP(P210, uint16_t, 2, 2, 1, P410, uint16_t, 2, 1, 1, 10) -TESTBIPLANARTOBP(P012, uint16_t, 2, 2, 2, P412, uint16_t, 2, 1, 1, 10) -TESTBIPLANARTOBP(P212, uint16_t, 2, 2, 1, P412, uint16_t, 2, 1, 1, 12) -TESTBIPLANARTOBP(P016, uint16_t, 2, 2, 2, P416, uint16_t, 2, 1, 1, 12) -TESTBIPLANARTOBP(P216, uint16_t, 2, 2, 1, P416, uint16_t, 2, 1, 1, 12) +TESTBIPLANARTOBP(NV21, uint8_t, 1, 2, 2, NV12, uint8_t, 1, 2, 2, 8, 1, 1) +TESTBIPLANARTOBP(NV12, uint8_t, 1, 2, 2, NV12Mirror, uint8_t, 1, 2, 2, 8, 1, 1) +TESTBIPLANARTOBP(NV12, uint8_t, 1, 2, 2, NV24, uint8_t, 1, 1, 1, 8, 1, 1) +TESTBIPLANARTOBP(NV16, uint8_t, 1, 2, 1, NV24, uint8_t, 1, 1, 1, 8, 1, 1) +TESTBIPLANARTOBP(P010, uint16_t, 2, 2, 2, P410, uint16_t, 2, 1, 1, 10, 1, 1) +TESTBIPLANARTOBP(P210, uint16_t, 2, 2, 1, P410, uint16_t, 2, 1, 1, 10, 1, 1) +TESTBIPLANARTOBP(P012, uint16_t, 2, 2, 2, P412, uint16_t, 2, 1, 1, 10, 1, 1) +TESTBIPLANARTOBP(P212, uint16_t, 2, 2, 1, P412, uint16_t, 2, 1, 1, 12, 1, 1) +TESTBIPLANARTOBP(P016, uint16_t, 2, 2, 2, P416, uint16_t, 2, 1, 1, 12, 1, 1) +TESTBIPLANARTOBP(P216, uint16_t, 2, 2, 1, P416, uint16_t, 2, 1, 1, 12, 1, 1) +TESTBIPLANARTOBP(MM21, uint8_t, 1, 2, 2, NV12, uint8_t, 1, 2, 2, 8, 16, 32) -#define TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ - SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, \ - DST_SUBSAMP_X, DST_SUBSAMP_Y, W1280, N, NEG, OFF, \ - SRC_DEPTH) \ - TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ - static_assert(SRC_BPC == 1 || SRC_BPC == 2, "SRC BPC unsupported"); \ - static_assert(DST_BPC == 1 || DST_BPC == 2, "DST BPC unsupported"); \ - static_assert(SRC_SUBSAMP_X == 1 || SRC_SUBSAMP_X == 2, \ - "SRC_SUBSAMP_X unsupported"); \ - static_assert(SRC_SUBSAMP_Y == 1 || SRC_SUBSAMP_Y == 2, \ - "SRC_SUBSAMP_Y unsupported"); \ - static_assert(DST_SUBSAMP_X == 1 || DST_SUBSAMP_X == 2, \ - "DST_SUBSAMP_X unsupported"); \ - static_assert(DST_SUBSAMP_Y == 1 || DST_SUBSAMP_Y == 2, \ - "DST_SUBSAMP_Y unsupported"); \ - const int kWidth = W1280; \ - const int kHeight = benchmark_height_; \ - const int kSrcHalfWidth = SUBSAMPLE(kWidth, SRC_SUBSAMP_X); \ - const int kSrcHalfHeight = SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); \ - const int kDstHalfWidth = SUBSAMPLE(kWidth, DST_SUBSAMP_X); \ - const int kDstHalfHeight = SUBSAMPLE(kHeight, DST_SUBSAMP_Y); \ - align_buffer_page_end(src_y, kWidth* kHeight* SRC_BPC + OFF); \ - align_buffer_page_end(src_uv, \ - kSrcHalfWidth* kSrcHalfHeight* SRC_BPC * 2 + OFF); \ - align_buffer_page_end(dst_y_c, kWidth* kHeight* DST_BPC); \ - align_buffer_page_end(dst_u_c, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ - align_buffer_page_end(dst_v_c, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ - align_buffer_page_end(dst_y_opt, kWidth* kHeight* DST_BPC); \ - align_buffer_page_end(dst_u_opt, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ - align_buffer_page_end(dst_v_opt, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ - SRC_T* src_y_p = reinterpret_cast(src_y + OFF); \ - SRC_T* src_uv_p = reinterpret_cast(src_uv + OFF); \ - for (int i = 0; i < kWidth * kHeight; ++i) { \ - src_y_p[i] = \ - (fastrand() & (((SRC_T)(-1)) << ((8 * SRC_BPC) - SRC_DEPTH))); \ - } \ - for (int i = 0; i < kSrcHalfWidth * kSrcHalfHeight * 2; ++i) { \ - src_uv_p[i] = \ - (fastrand() & (((SRC_T)(-1)) << ((8 * SRC_BPC) - SRC_DEPTH))); \ - } \ - memset(dst_y_c, 1, kWidth* kHeight* DST_BPC); \ - memset(dst_u_c, 2, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ - memset(dst_v_c, 3, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ - memset(dst_y_opt, 101, kWidth* kHeight* DST_BPC); \ - memset(dst_u_opt, 102, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ - memset(dst_v_opt, 103, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ - MaskCpuFlags(disable_cpu_flags_); \ - SRC_FMT_PLANAR##To##FMT_PLANAR( \ - src_y_p, kWidth, src_uv_p, kSrcHalfWidth * 2, \ - reinterpret_cast(dst_y_c), kWidth, \ - reinterpret_cast(dst_u_c), kDstHalfWidth, \ - reinterpret_cast(dst_v_c), kDstHalfWidth, kWidth, \ - NEG kHeight); \ - MaskCpuFlags(benchmark_cpu_info_); \ - for (int i = 0; i < benchmark_iterations_; ++i) { \ - SRC_FMT_PLANAR##To##FMT_PLANAR( \ - src_y_p, kWidth, src_uv_p, kSrcHalfWidth * 2, \ - reinterpret_cast(dst_y_opt), kWidth, \ - reinterpret_cast(dst_u_opt), kDstHalfWidth, \ - reinterpret_cast(dst_v_opt), kDstHalfWidth, kWidth, \ - NEG kHeight); \ - } \ - for (int i = 0; i < kHeight * kWidth * DST_BPC; ++i) { \ - EXPECT_EQ(dst_y_c[i], dst_y_opt[i]); \ - } \ - for (int i = 0; i < kDstHalfWidth * kDstHalfHeight * DST_BPC; ++i) { \ - EXPECT_EQ(dst_u_c[i], dst_u_opt[i]); \ - EXPECT_EQ(dst_v_c[i], dst_v_opt[i]); \ - } \ - free_aligned_buffer_page_end(dst_y_c); \ - free_aligned_buffer_page_end(dst_u_c); \ - free_aligned_buffer_page_end(dst_v_c); \ - free_aligned_buffer_page_end(dst_y_opt); \ - free_aligned_buffer_page_end(dst_u_opt); \ - free_aligned_buffer_page_end(dst_v_opt); \ - free_aligned_buffer_page_end(src_y); \ - free_aligned_buffer_page_end(src_uv); \ +#define TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ + SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, \ + DST_SUBSAMP_X, DST_SUBSAMP_Y, W1280, N, NEG, OFF, \ + SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) \ + TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ + static_assert(SRC_BPC == 1 || SRC_BPC == 2, "SRC BPC unsupported"); \ + static_assert(DST_BPC == 1 || DST_BPC == 2, "DST BPC unsupported"); \ + static_assert(SRC_SUBSAMP_X == 1 || SRC_SUBSAMP_X == 2, \ + "SRC_SUBSAMP_X unsupported"); \ + static_assert(SRC_SUBSAMP_Y == 1 || SRC_SUBSAMP_Y == 2, \ + "SRC_SUBSAMP_Y unsupported"); \ + static_assert(DST_SUBSAMP_X == 1 || DST_SUBSAMP_X == 2, \ + "DST_SUBSAMP_X unsupported"); \ + static_assert(DST_SUBSAMP_Y == 1 || DST_SUBSAMP_Y == 2, \ + "DST_SUBSAMP_Y unsupported"); \ + const int kWidth = W1280; \ + const int kHeight = benchmark_height_; \ + const int kSrcHalfWidth = SUBSAMPLE(kWidth, SRC_SUBSAMP_X); \ + const int kDstHalfWidth = SUBSAMPLE(kWidth, DST_SUBSAMP_X); \ + const int kDstHalfHeight = SUBSAMPLE(kHeight, DST_SUBSAMP_Y); \ + const int kPaddedWidth = (kWidth + (TILE_WIDTH - 1)) & ~(TILE_WIDTH - 1); \ + const int kPaddedHeight = \ + (kHeight + (TILE_HEIGHT - 1)) & ~(TILE_HEIGHT - 1); \ + const int kSrcHalfPaddedWidth = SUBSAMPLE(kPaddedWidth, SRC_SUBSAMP_X); \ + const int kSrcHalfPaddedHeight = SUBSAMPLE(kPaddedHeight, SRC_SUBSAMP_Y); \ + align_buffer_page_end(src_y, kPaddedWidth* kPaddedHeight* SRC_BPC + OFF); \ + align_buffer_page_end( \ + src_uv, kSrcHalfPaddedWidth* kSrcHalfPaddedHeight* SRC_BPC * 2 + OFF); \ + align_buffer_page_end(dst_y_c, kWidth* kHeight* DST_BPC); \ + align_buffer_page_end(dst_u_c, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ + align_buffer_page_end(dst_v_c, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ + align_buffer_page_end(dst_y_opt, kWidth* kHeight* DST_BPC); \ + align_buffer_page_end(dst_u_opt, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ + align_buffer_page_end(dst_v_opt, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ + SRC_T* src_y_p = reinterpret_cast(src_y + OFF); \ + SRC_T* src_uv_p = reinterpret_cast(src_uv + OFF); \ + for (int i = 0; i < kPaddedWidth * kPaddedHeight; ++i) { \ + src_y_p[i] = \ + (fastrand() & (((SRC_T)(-1)) << ((8 * SRC_BPC) - SRC_DEPTH))); \ + } \ + for (int i = 0; i < kSrcHalfPaddedWidth * kSrcHalfPaddedHeight * 2; ++i) { \ + src_uv_p[i] = \ + (fastrand() & (((SRC_T)(-1)) << ((8 * SRC_BPC) - SRC_DEPTH))); \ + } \ + memset(dst_y_c, 1, kWidth* kHeight* DST_BPC); \ + memset(dst_u_c, 2, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ + memset(dst_v_c, 3, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ + memset(dst_y_opt, 101, kWidth* kHeight* DST_BPC); \ + memset(dst_u_opt, 102, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ + memset(dst_v_opt, 103, kDstHalfWidth* kDstHalfHeight* DST_BPC); \ + MaskCpuFlags(disable_cpu_flags_); \ + SRC_FMT_PLANAR##To##FMT_PLANAR( \ + src_y_p, kWidth, src_uv_p, kSrcHalfWidth * 2, \ + reinterpret_cast(dst_y_c), kWidth, \ + reinterpret_cast(dst_u_c), kDstHalfWidth, \ + reinterpret_cast(dst_v_c), kDstHalfWidth, kWidth, \ + NEG kHeight); \ + MaskCpuFlags(benchmark_cpu_info_); \ + for (int i = 0; i < benchmark_iterations_; ++i) { \ + SRC_FMT_PLANAR##To##FMT_PLANAR( \ + src_y_p, kWidth, src_uv_p, kSrcHalfWidth * 2, \ + reinterpret_cast(dst_y_opt), kWidth, \ + reinterpret_cast(dst_u_opt), kDstHalfWidth, \ + reinterpret_cast(dst_v_opt), kDstHalfWidth, kWidth, \ + NEG kHeight); \ + } \ + for (int i = 0; i < kHeight * kWidth * DST_BPC; ++i) { \ + EXPECT_EQ(dst_y_c[i], dst_y_opt[i]); \ + } \ + for (int i = 0; i < kDstHalfWidth * kDstHalfHeight * DST_BPC; ++i) { \ + EXPECT_EQ(dst_u_c[i], dst_u_opt[i]); \ + EXPECT_EQ(dst_v_c[i], dst_v_opt[i]); \ + } \ + free_aligned_buffer_page_end(dst_y_c); \ + free_aligned_buffer_page_end(dst_u_c); \ + free_aligned_buffer_page_end(dst_v_c); \ + free_aligned_buffer_page_end(dst_y_opt); \ + free_aligned_buffer_page_end(dst_u_opt); \ + free_aligned_buffer_page_end(dst_v_opt); \ + free_aligned_buffer_page_end(src_y); \ + free_aligned_buffer_page_end(src_uv); \ } #define TESTBIPLANARTOP(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, \ - DST_SUBSAMP_X, DST_SUBSAMP_Y, SRC_DEPTH) \ + DST_SUBSAMP_X, DST_SUBSAMP_Y, SRC_DEPTH, TILE_WIDTH, \ + TILE_HEIGHT) \ TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ - DST_SUBSAMP_Y, benchmark_width_ + 1, _Any, +, 0, SRC_DEPTH) \ + DST_SUBSAMP_Y, benchmark_width_ + 1, _Any, +, 0, SRC_DEPTH, \ + TILE_WIDTH, TILE_HEIGHT) \ TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ - DST_SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1, \ - SRC_DEPTH) \ + DST_SUBSAMP_Y, benchmark_width_, _Unaligned, +, 2, \ + SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) \ TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ - DST_SUBSAMP_Y, benchmark_width_, _Invert, -, 0, SRC_DEPTH) \ + DST_SUBSAMP_Y, benchmark_width_, _Invert, -, 0, SRC_DEPTH, \ + TILE_WIDTH, TILE_HEIGHT) \ TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \ SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \ - DST_SUBSAMP_Y, benchmark_width_, _Opt, +, 0, SRC_DEPTH) + DST_SUBSAMP_Y, benchmark_width_, _Opt, +, 0, SRC_DEPTH, \ + TILE_WIDTH, TILE_HEIGHT) -TESTBIPLANARTOP(NV12, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8) -TESTBIPLANARTOP(NV21, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8) +TESTBIPLANARTOP(NV12, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8, 1, 1) +TESTBIPLANARTOP(NV21, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8, 1, 1) +TESTBIPLANARTOP(MM21, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8, 16, 32) // Provide matrix wrappers for full range bt.709 #define F420ToABGR(a, b, c, d, e, f, g, h, i, j) \ @@ -649,6 +674,19 @@ TESTBIPLANARTOP(NV21, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8) #define V444ToARGB(a, b, c, d, e, f, g, h, i, j) \ I444ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvV2020Constants, i, j) +#define I420ToARGBFilter(a, b, c, d, e, f, g, h, i, j) \ + I420ToARGBMatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \ + kFilterBilinear) +#define I422ToARGBFilter(a, b, c, d, e, f, g, h, i, j) \ + I422ToARGBMatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \ + kFilterBilinear) +#define I420ToRGB24Filter(a, b, c, d, e, f, g, h, i, j) \ + I420ToRGB24MatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \ + kFilterBilinear) +#define I422ToRGB24Filter(a, b, c, d, e, f, g, h, i, j) \ + I420ToRGB24MatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \ + kFilterBilinear) + #define ALIGNINT(V, ALIGN) (((V) + (ALIGN)-1) / (ALIGN) * (ALIGN)) #define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ @@ -699,17 +737,27 @@ TESTBIPLANARTOP(NV21, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8) free_aligned_buffer_page_end(dst_argb_opt); \ } +#if defined(ENABLE_FULL_TESTS) #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN) \ TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_ + 1, _Any, +, 0) \ TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ - YALIGN, benchmark_width_, _Unaligned, +, 1) \ + YALIGN, benchmark_width_, _Unaligned, +, 4) \ TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Invert, -, 0) \ TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Opt, +, 0) +#else +#define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ + YALIGN) \ + TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ + YALIGN, benchmark_width_ + 1, _Any, +, 0) \ + TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ + YALIGN, benchmark_width_, _Opt, +, 0) +#endif +#if defined(ENABLE_FULL_TESTS) TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1) TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1) TESTPLANARTOB(J420, 2, 2, ARGB, 4, 4, 1) @@ -750,8 +798,12 @@ TESTPLANARTOB(V422, 2, 1, ARGB, 4, 4, 1) TESTPLANARTOB(V422, 2, 1, ABGR, 4, 4, 1) TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1) TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1) +TESTPLANARTOB(I422, 1, 1, RGB24, 3, 3, 1) +TESTPLANARTOB(I422, 1, 1, RAW, 3, 3, 1) TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1) TESTPLANARTOB(I444, 1, 1, ABGR, 4, 4, 1) +TESTPLANARTOB(I444, 1, 1, RGB24, 3, 3, 1) +TESTPLANARTOB(I444, 1, 1, RAW, 3, 3, 1) TESTPLANARTOB(J444, 1, 1, ARGB, 4, 4, 1) TESTPLANARTOB(J444, 1, 1, ABGR, 4, 4, 1) TESTPLANARTOB(H444, 1, 1, ARGB, 4, 4, 1) @@ -772,6 +824,38 @@ TESTPLANARTOB(H420, 2, 2, AR30, 4, 4, 1) TESTPLANARTOB(I420, 2, 2, AB30, 4, 4, 1) TESTPLANARTOB(H420, 2, 2, AB30, 4, 4, 1) #endif +TESTPLANARTOB(I420, 2, 2, ARGBFilter, 4, 4, 1) +TESTPLANARTOB(I422, 2, 1, ARGBFilter, 4, 4, 1) +TESTPLANARTOB(I420, 2, 2, RGB24Filter, 3, 3, 1) +TESTPLANARTOB(I422, 2, 2, RGB24Filter, 3, 3, 1) +#else +TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1) +TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1) +TESTPLANARTOB(I420, 2, 2, BGRA, 4, 4, 1) +TESTPLANARTOB(I420, 2, 2, RAW, 3, 3, 1) +TESTPLANARTOB(I420, 2, 2, RGB24, 3, 3, 1) +TESTPLANARTOB(I420, 2, 2, RGBA, 4, 4, 1) +#ifdef LITTLE_ENDIAN_ONLY_TEST +TESTPLANARTOB(I420, 2, 2, RGB565, 2, 2, 1) +TESTPLANARTOB(I420, 2, 2, ARGB1555, 2, 2, 1) +TESTPLANARTOB(I420, 2, 2, ARGB4444, 2, 2, 1) +TESTPLANARTOB(I422, 2, 1, RGB565, 2, 2, 1) +#endif +TESTPLANARTOB(I420, 2, 2, I400, 1, 1, 1) +TESTPLANARTOB(I420, 2, 2, UYVY, 2, 4, 1) +TESTPLANARTOB(I420, 2, 2, YUY2, 2, 4, 1) +TESTPLANARTOB(I422, 2, 1, ABGR, 4, 4, 1) +TESTPLANARTOB(I422, 2, 1, ARGB, 4, 4, 1) +TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1) +TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1) +TESTPLANARTOB(I422, 2, 1, UYVY, 2, 4, 1) +TESTPLANARTOB(I422, 2, 1, YUY2, 2, 4, 1) +TESTPLANARTOB(I420, 2, 2, ARGBFilter, 4, 4, 1) +TESTPLANARTOB(I422, 2, 1, ARGBFilter, 4, 4, 1) +TESTPLANARTOB(I420, 2, 2, RGB24Filter, 3, 3, 1) +TESTPLANARTOB(I444, 1, 1, ABGR, 4, 4, 1) +TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1) +#endif #define TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, W1280, N, NEG, OFF, ATTEN) \ @@ -820,18 +904,25 @@ TESTPLANARTOB(H420, 2, 2, AB30, 4, 4, 1) free_aligned_buffer_page_end(dst_argb_opt); \ } +#if defined(ENABLE_FULL_TESTS) #define TESTQPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN) \ TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_ + 1, _Any, +, 0, 0) \ TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ - YALIGN, benchmark_width_, _Unaligned, +, 1, 0) \ + YALIGN, benchmark_width_, _Unaligned, +, 2, 0) \ TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Invert, -, 0, 0) \ TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Opt, +, 0, 0) \ TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Premult, +, 0, 1) +#else +#define TESTQPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ + YALIGN) \ + TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ + YALIGN, benchmark_width_, _Opt, +, 0, 0) +#endif #define J420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \ I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \ @@ -924,6 +1015,14 @@ TESTPLANARTOB(H420, 2, 2, AB30, 4, 4, 1) I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \ l, m) +#define I420AlphaToARGBFilter(a, b, c, d, e, f, g, h, i, j, k, l, m) \ + I420AlphaToARGBMatrixFilter(a, b, c, d, e, f, g, h, i, j, \ + &kYuvI601Constants, k, l, m, kFilterBilinear) +#define I422AlphaToARGBFilter(a, b, c, d, e, f, g, h, i, j, k, l, m) \ + I422AlphaToARGBMatrixFilter(a, b, c, d, e, f, g, h, i, j, \ + &kYuvI601Constants, k, l, m, kFilterBilinear) + +#if defined(ENABLE_FULL_TESTS) TESTQPLANARTOB(I420Alpha, 2, 2, ARGB, 4, 4, 1) TESTQPLANARTOB(I420Alpha, 2, 2, ABGR, 4, 4, 1) TESTQPLANARTOB(J420Alpha, 2, 2, ARGB, 4, 4, 1) @@ -960,6 +1059,15 @@ TESTQPLANARTOB(U444Alpha, 1, 1, ARGB, 4, 4, 1) TESTQPLANARTOB(U444Alpha, 1, 1, ABGR, 4, 4, 1) TESTQPLANARTOB(V444Alpha, 1, 1, ARGB, 4, 4, 1) TESTQPLANARTOB(V444Alpha, 1, 1, ABGR, 4, 4, 1) +TESTQPLANARTOB(I420Alpha, 2, 2, ARGBFilter, 4, 4, 1) +TESTQPLANARTOB(I422Alpha, 2, 1, ARGBFilter, 4, 4, 1) +#else +TESTQPLANARTOB(I420Alpha, 2, 2, ARGB, 4, 4, 1) +TESTQPLANARTOB(I422Alpha, 2, 1, ARGB, 4, 4, 1) +TESTQPLANARTOB(I444Alpha, 1, 1, ARGB, 4, 4, 1) +TESTQPLANARTOB(I420Alpha, 2, 2, ARGBFilter, 4, 4, 1) +TESTQPLANARTOB(I422Alpha, 2, 1, ARGBFilter, 4, 4, 1) +#endif #define TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, \ BPP_B, W1280, N, NEG, OFF) \ @@ -1019,7 +1127,7 @@ TESTQPLANARTOB(V444Alpha, 1, 1, ABGR, 4, 4, 1) TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \ benchmark_width_ + 1, _Any, +, 0) \ TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \ - benchmark_width_, _Unaligned, +, 1) \ + benchmark_width_, _Unaligned, +, 2) \ TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \ benchmark_width_, _Invert, -, 0) \ TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \ @@ -1117,15 +1225,23 @@ TESTBIPLANARTOB(NV12, 2, 2, RGB565, RGB565, 2) free_aligned_buffer_page_end(src_argb); \ } +#if defined(ENABLE_FULL_TESTS) #define TESTATOPLANAR(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ benchmark_width_ + 1, _Any, +, 0) \ TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - benchmark_width_, _Unaligned, +, 1) \ + benchmark_width_, _Unaligned, +, 2) \ TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ benchmark_width_, _Invert, -, 0) \ TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ benchmark_width_, _Opt, +, 0) +#else +#define TESTATOPLANAR(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ + TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ + benchmark_width_ + 1, _Any, +, 0) \ + TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ + benchmark_width_, _Opt, +, 0) +#endif TESTATOPLANAR(ABGR, 4, 1, I420, 2, 2) TESTATOPLANAR(ARGB, 4, 1, I420, 2, 2) @@ -1133,6 +1249,8 @@ TESTATOPLANAR(ARGB, 4, 1, I422, 2, 1) TESTATOPLANAR(ARGB, 4, 1, I444, 1, 1) TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2) TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1) +TESTATOPLANAR(ABGR, 4, 1, J420, 2, 2) +TESTATOPLANAR(ABGR, 4, 1, J422, 2, 1) #ifdef LITTLE_ENDIAN_ONLY_TEST TESTATOPLANAR(ARGB4444, 2, 1, I420, 2, 2) TESTATOPLANAR(RGB565, 2, 1, I420, 2, 2) @@ -1202,7 +1320,7 @@ TESTATOPLANAR(YUY2, 2, 1, I422, 2, 1) TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ benchmark_width_ + 1, _Any, +, 0) \ TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ - benchmark_width_, _Unaligned, +, 1) \ + benchmark_width_, _Unaligned, +, 2) \ TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ benchmark_width_, _Invert, -, 0) \ TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ @@ -1212,6 +1330,7 @@ TESTATOBIPLANAR(ARGB, 1, 4, NV12, 2, 2) TESTATOBIPLANAR(ARGB, 1, 4, NV21, 2, 2) TESTATOBIPLANAR(ABGR, 1, 4, NV12, 2, 2) TESTATOBIPLANAR(ABGR, 1, 4, NV21, 2, 2) +TESTATOBIPLANAR(RAW, 1, 3, JNV21, 2, 2) TESTATOBIPLANAR(YUY2, 2, 4, NV12, 2, 2) TESTATOBIPLANAR(UYVY, 2, 4, NV12, 2, 2) TESTATOBIPLANAR(AYUV, 1, 4, NV12, 2, 2) @@ -1291,18 +1410,25 @@ TESTATOBIPLANAR(AYUV, 1, 4, NV21, 2, 2) } \ } +#if defined(ENABLE_FULL_TESTS) #define TESTATOB(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \ EPP_B, STRIDE_B, HEIGHT_B) \ TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \ STRIDE_B, HEIGHT_B, benchmark_width_ + 1, _Any, +, 0) \ TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \ - STRIDE_B, HEIGHT_B, benchmark_width_, _Unaligned, +, 1) \ + STRIDE_B, HEIGHT_B, benchmark_width_, _Unaligned, +, 4) \ TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \ STRIDE_B, HEIGHT_B, benchmark_width_, _Invert, -, 0) \ TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \ STRIDE_B, HEIGHT_B, benchmark_width_, _Opt, +, 0) \ TESTATOBRANDOM(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \ EPP_B, STRIDE_B, HEIGHT_B) +#else +#define TESTATOB(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \ + EPP_B, STRIDE_B, HEIGHT_B) \ + TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \ + STRIDE_B, HEIGHT_B, benchmark_width_, _Opt, +, 0) +#endif TESTATOB(AB30, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1) TESTATOB(AB30, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) @@ -1329,6 +1455,7 @@ TESTATOB(ARGB, uint8_t, 4, 4, 1, ARGBMirror, uint8_t, 4, 4, 1) TESTATOB(ARGB, uint8_t, 4, 4, 1, BGRA, uint8_t, 4, 4, 1) TESTATOB(ARGB, uint8_t, 4, 4, 1, I400, uint8_t, 1, 1, 1) TESTATOB(ARGB, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1) +TESTATOB(ABGR, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1) TESTATOB(RGBA, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1) TESTATOB(ARGB, uint8_t, 4, 4, 1, RAW, uint8_t, 3, 3, 1) TESTATOB(ARGB, uint8_t, 4, 4, 1, RGB24, uint8_t, 3, 3, 1) @@ -1339,7 +1466,7 @@ TESTATOB(ARGB, uint8_t, 4, 4, 1, RGB565, uint8_t, 2, 2, 1) #endif TESTATOB(ARGB, uint8_t, 4, 4, 1, RGBA, uint8_t, 4, 4, 1) TESTATOB(ARGB, uint8_t, 4, 4, 1, UYVY, uint8_t, 2, 4, 1) -TESTATOB(ARGB, uint8_t, 4, 4, 1, YUY2, uint8_t, 2, 4, 1) // 4 +TESTATOB(ARGB, uint8_t, 4, 4, 1, YUY2, uint8_t, 2, 4, 1) TESTATOB(ARGB1555, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1) TESTATOB(ARGB4444, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1) TESTATOB(BGRA, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) @@ -1373,6 +1500,127 @@ TESTATOB(AB64, uint16_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1) TESTATOB(AR64, uint16_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1) TESTATOB(AB64, uint16_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1) +// in place test +#define TESTATOAI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \ + EPP_B, STRIDE_B, HEIGHT_B, W1280, N, NEG, OFF) \ + TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##N) { \ + const int kWidth = W1280; \ + const int kHeight = benchmark_height_; \ + const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ + const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \ + const int kStrideA = \ + (kWidth * EPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \ + const int kStrideB = \ + (kWidth * EPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \ + align_buffer_page_end(src_argb, \ + kStrideA* kHeightA*(int)sizeof(TYPE_A) + OFF); \ + align_buffer_page_end(dst_argb_c, \ + kStrideA* kHeightA*(int)sizeof(TYPE_A) + OFF); \ + align_buffer_page_end(dst_argb_opt, \ + kStrideA* kHeightA*(int)sizeof(TYPE_A) + OFF); \ + for (int i = 0; i < kStrideA * kHeightA * (int)sizeof(TYPE_A); ++i) { \ + src_argb[i + OFF] = (fastrand() & 0xff); \ + } \ + memcpy(dst_argb_c + OFF, src_argb, \ + kStrideA * kHeightA * (int)sizeof(TYPE_A)); \ + memcpy(dst_argb_opt + OFF, src_argb, \ + kStrideA * kHeightA * (int)sizeof(TYPE_A)); \ + MaskCpuFlags(disable_cpu_flags_); \ + FMT_A##To##FMT_B((TYPE_A*)(dst_argb_c /* src */ + OFF), kStrideA, \ + (TYPE_B*)dst_argb_c, kStrideB, kWidth, NEG kHeight); \ + MaskCpuFlags(benchmark_cpu_info_); \ + for (int i = 0; i < benchmark_iterations_; ++i) { \ + FMT_A##To##FMT_B((TYPE_A*)(dst_argb_opt /* src */ + OFF), kStrideA, \ + (TYPE_B*)dst_argb_opt, kStrideB, kWidth, NEG kHeight); \ + } \ + memcpy(dst_argb_opt + OFF, src_argb, \ + kStrideA * kHeightA * (int)sizeof(TYPE_A)); \ + FMT_A##To##FMT_B((TYPE_A*)(dst_argb_opt /* src */ + OFF), kStrideA, \ + (TYPE_B*)dst_argb_opt, kStrideB, kWidth, NEG kHeight); \ + for (int i = 0; i < kStrideB * kHeightB * (int)sizeof(TYPE_B); ++i) { \ + EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \ + } \ + free_aligned_buffer_page_end(src_argb); \ + free_aligned_buffer_page_end(dst_argb_c); \ + free_aligned_buffer_page_end(dst_argb_opt); \ + } + +#define TESTATOA(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \ + EPP_B, STRIDE_B, HEIGHT_B) \ + TESTATOAI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \ + STRIDE_B, HEIGHT_B, benchmark_width_, _Inplace, +, 0) + +TESTATOA(AB30, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1) +TESTATOA(AB30, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) +#ifdef LITTLE_ENDIAN_ONLY_TEST +TESTATOA(ABGR, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1) +#endif +TESTATOA(ABGR, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) +#ifdef LITTLE_ENDIAN_ONLY_TEST +TESTATOA(AR30, uint8_t, 4, 4, 1, AB30, uint8_t, 4, 4, 1) +#endif +TESTATOA(AR30, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1) +#ifdef LITTLE_ENDIAN_ONLY_TEST +TESTATOA(AR30, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1) +TESTATOA(AR30, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) +#endif +TESTATOA(ARGB, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1) +#ifdef LITTLE_ENDIAN_ONLY_TEST +TESTATOA(ARGB, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1) +#endif +TESTATOA(ARGB, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, ARGB1555, uint8_t, 2, 2, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, ARGB4444, uint8_t, 2, 2, 1) +// TODO(fbarchard): Support in place for mirror. +// TESTATOA(ARGB, uint8_t, 4, 4, 1, ARGBMirror, uint8_t, 4, 4, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, BGRA, uint8_t, 4, 4, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, I400, uint8_t, 1, 1, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1) +TESTATOA(RGBA, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, RAW, uint8_t, 3, 3, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, RGB24, uint8_t, 3, 3, 1) +TESTATOA(ABGR, uint8_t, 4, 4, 1, RAW, uint8_t, 3, 3, 1) +TESTATOA(ABGR, uint8_t, 4, 4, 1, RGB24, uint8_t, 3, 3, 1) +#ifdef LITTLE_ENDIAN_ONLY_TEST +TESTATOA(ARGB, uint8_t, 4, 4, 1, RGB565, uint8_t, 2, 2, 1) +#endif +TESTATOA(ARGB, uint8_t, 4, 4, 1, RGBA, uint8_t, 4, 4, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, UYVY, uint8_t, 2, 4, 1) +TESTATOA(ARGB, uint8_t, 4, 4, 1, YUY2, uint8_t, 2, 4, 1) +// TODO(fbarchard): Support in place for conversions that increase bpp. +// TESTATOA(ARGB1555, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1) +// TESTATOA(ARGB4444, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1) +TESTATOA(BGRA, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) +// TESTATOA(I400, uint8_t, 1, 1, 1, ARGB, uint8_t, 4, 4, 1) +TESTATOA(I400, uint8_t, 1, 1, 1, I400, uint8_t, 1, 1, 1) +// TESTATOA(I400, uint8_t, 1, 1, 1, I400Mirror, uint8_t, 1, 1, 1) +// TESTATOA(J400, uint8_t, 1, 1, 1, ARGB, uint8_t, 4, 4, 1) +TESTATOA(J400, uint8_t, 1, 1, 1, J400, uint8_t, 1, 1, 1) +// TESTATOA(RAW, uint8_t, 3, 3, 1, ARGB, uint8_t, 4, 4, 1) +// TESTATOA(RAW, uint8_t, 3, 3, 1, RGBA, uint8_t, 4, 4, 1) +TESTATOA(RAW, uint8_t, 3, 3, 1, RGB24, uint8_t, 3, 3, 1) +// TESTATOA(RGB24, uint8_t, 3, 3, 1, ARGB, uint8_t, 4, 4, 1) +TESTATOA(RGB24, uint8_t, 3, 3, 1, J400, uint8_t, 1, 1, 1) +// TESTATOA(RGB24, uint8_t, 3, 3, 1, RGB24Mirror, uint8_t, 3, 3, 1) +TESTATOA(RAW, uint8_t, 3, 3, 1, J400, uint8_t, 1, 1, 1) +#ifdef LITTLE_ENDIAN_ONLY_TEST +// TESTATOA(RGB565, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1) +#endif +TESTATOA(RGBA, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) +// TESTATOA(UYVY, uint8_t, 2, 4, 1, ARGB, uint8_t, 4, 4, 1) +// TESTATOA(YUY2, uint8_t, 2, 4, 1, ARGB, uint8_t, 4, 4, 1) +TESTATOA(YUY2, uint8_t, 2, 4, 1, Y, uint8_t, 1, 1, 1) +// TESTATOA(ARGB, uint8_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1) +// TESTATOA(ARGB, uint8_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1) +// TESTATOA(ABGR, uint8_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1) +// TESTATOA(ABGR, uint8_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1) +TESTATOA(AR64, uint16_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) +TESTATOA(AB64, uint16_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1) +TESTATOA(AR64, uint16_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1) +TESTATOA(AB64, uint16_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1) +TESTATOA(AR64, uint16_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1) +TESTATOA(AB64, uint16_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1) + #define TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ HEIGHT_B, W1280, N, NEG, OFF) \ TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither##N) { \ @@ -1448,7 +1696,7 @@ TESTATOB(AB64, uint16_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1) TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ HEIGHT_B, benchmark_width_ + 1, _Any, +, 0) \ TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ - HEIGHT_B, benchmark_width_, _Unaligned, +, 1) \ + HEIGHT_B, benchmark_width_, _Unaligned, +, 2) \ TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ HEIGHT_B, benchmark_width_, _Invert, -, 0) \ TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ @@ -1460,9 +1708,11 @@ TESTATOB(AB64, uint16_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1) TESTATOBD(ARGB, 4, 4, 1, RGB565, 2, 2, 1) #endif -#define TESTSYMI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, W1280, N, NEG, \ +// These conversions called twice, produce the original result. +// e.g. endian swap twice. +#define TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, W1280, N, NEG, \ OFF) \ - TEST_F(LibYUVConvertTest, FMT_ATOB##_Symetric##N) { \ + TEST_F(LibYUVConvertTest, FMT_ATOB##_Endswap##N) { \ const int kWidth = W1280; \ const int kHeight = benchmark_height_; \ const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ @@ -1501,34 +1751,25 @@ TESTATOBD(ARGB, 4, 4, 1, RGB565, 2, 2, 1) free_aligned_buffer_page_end(dst_argb_opt); \ } -#define TESTSYM(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A) \ - TESTSYMI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_ + 1, \ +#if defined(ENABLE_FULL_TESTS) +#define TESTEND(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A) \ + TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_ + 1, \ _Any, +, 0) \ - TESTSYMI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, \ - _Unaligned, +, 1) \ - TESTSYMI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, \ + TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, \ + _Unaligned, +, 2) \ + TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, \ _Opt, +, 0) +#else +#define TESTEND(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A) \ + TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, \ + _Opt, +, 0) +#endif -TESTSYM(ARGBToARGB, uint8_t, 4, 4, 1) -TESTSYM(ARGBToBGRA, uint8_t, 4, 4, 1) -TESTSYM(ARGBToABGR, uint8_t, 4, 4, 1) -TESTSYM(BGRAToARGB, uint8_t, 4, 4, 1) -TESTSYM(ABGRToARGB, uint8_t, 4, 4, 1) -TESTSYM(AB64ToAR64, uint16_t, 4, 4, 1) - -TEST_F(LibYUVConvertTest, Test565) { - SIMD_ALIGNED(uint8_t orig_pixels[256][4]); - SIMD_ALIGNED(uint8_t pixels565[256][2]); - - for (int i = 0; i < 256; ++i) { - for (int j = 0; j < 4; ++j) { - orig_pixels[i][j] = i; - } - } - ARGBToRGB565(&orig_pixels[0][0], 0, &pixels565[0][0], 0, 256, 1); - uint32_t checksum = HashDjb2(&pixels565[0][0], sizeof(pixels565), 5381); - EXPECT_EQ(610919429u, checksum); -} +TESTEND(ARGBToBGRA, uint8_t, 4, 4, 1) +TESTEND(ARGBToABGR, uint8_t, 4, 4, 1) +TESTEND(BGRAToARGB, uint8_t, 4, 4, 1) +TESTEND(ABGRToARGB, uint8_t, 4, 4, 1) +TESTEND(AB64ToAR64, uint16_t, 4, 4, 1) #ifdef HAVE_JPEG TEST_F(LibYUVConvertTest, ValidateJpeg) { @@ -2167,7 +2408,8 @@ TEST_F(LibYUVConvertTest, TestMJPGToNV12_420) { free_aligned_buffer_page_end(dst_vu); } -TEST_F(LibYUVConvertTest, TestMJPGToNV21_422) { +// TODO(fbarchard): Improve test to compare against I422, not checksum +TEST_F(LibYUVConvertTest, DISABLED_TestMJPGToNV21_422) { int width = 0; int height = 0; int ret = MJPGSize(kTest3Jpg, kTest3JpgLen, &width, &height); @@ -2191,13 +2433,13 @@ TEST_F(LibYUVConvertTest, TestMJPGToNV21_422) { uint32_t dst_y_hash = HashDjb2(dst_y, width * height, 5381); uint32_t dst_uv_hash = HashDjb2(dst_uv, half_width * half_height * 2, 5381); EXPECT_EQ(dst_y_hash, 2682851208u); - EXPECT_EQ(dst_uv_hash, 3543430771u); + EXPECT_EQ(dst_uv_hash, 493520167u); free_aligned_buffer_page_end(dst_y); free_aligned_buffer_page_end(dst_uv); } -TEST_F(LibYUVConvertTest, TestMJPGToNV12_422) { +TEST_F(LibYUVConvertTest, DISABLED_TestMJPGToNV12_422) { int width = 0; int height = 0; int ret = MJPGSize(kTest3Jpg, kTest3JpgLen, &width, &height); @@ -2224,7 +2466,7 @@ TEST_F(LibYUVConvertTest, TestMJPGToNV12_422) { half_height); uint32_t dst_vu_hash = HashDjb2(dst_vu, half_width * half_height * 2, 5381); EXPECT_EQ(dst_y_hash, 2682851208u); - EXPECT_EQ(dst_vu_hash, 3543430771u); + EXPECT_EQ(dst_vu_hash, 493520167u); free_aligned_buffer_page_end(dst_y); free_aligned_buffer_page_end(dst_uv); @@ -2538,7 +2780,7 @@ TEST_F(LibYUVConvertTest, I420CropOddY) { const int SUBSAMP_Y = 2; const int kWidth = benchmark_width_; const int kHeight = benchmark_height_; - const int crop_y = 1; + const int crop_y = benchmark_height_ > 1 ? 1 : 0; const int kDestWidth = benchmark_width_; const int kDestHeight = benchmark_height_ - crop_y * 2; const int kStrideU = SUBSAMPLE(kWidth, SUBSAMP_X); @@ -2746,7 +2988,7 @@ TEST_F(LibYUVConvertTest, TestDither) { TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_ + 1, _Any, +, 0, FMT_C, BPP_C) \ TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ - YALIGN, benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C) \ + YALIGN, benchmark_width_, _Unaligned, +, 2, FMT_C, BPP_C) \ TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Invert, -, 0, FMT_C, BPP_C) \ TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ @@ -2816,9 +3058,53 @@ TESTPLANARTOBD(I420, 2, 2, RGB565, 2, 2, 1, ARGB, 4) TESTPTOB(TestYUY2ToNV12, YUY2ToI420, YUY2ToNV12) TESTPTOB(TestUYVYToNV12, UYVYToI420, UYVYToNV12) -// Transitive tests. A to B to C is same as A to C. -// Benchmarks A To B to C for comparison to 1 step, benchmarked elsewhere. +TEST_F(LibYUVConvertTest, MM21ToYUY2) { + const int kWidth = (benchmark_width_ + 15) & (~15); + const int kHeight = (benchmark_height_ + 31) & (~31); + align_buffer_page_end(orig_y, kWidth * kHeight); + align_buffer_page_end(orig_uv, + 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); + + align_buffer_page_end(tmp_y, kWidth * kHeight); + align_buffer_page_end(tmp_u, SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); + align_buffer_page_end(tmp_v, SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); + + align_buffer_page_end(dst_yuyv, 4 * SUBSAMPLE(kWidth, 2) * kHeight); + align_buffer_page_end(golden_yuyv, 4 * SUBSAMPLE(kWidth, 2) * kHeight); + + MemRandomize(orig_y, kWidth * kHeight); + MemRandomize(orig_uv, 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); + + /* Convert MM21 to YUY2 in 2 steps for reference */ + libyuv::MM21ToI420(orig_y, kWidth, orig_uv, 2 * SUBSAMPLE(kWidth, 2), tmp_y, + kWidth, tmp_u, SUBSAMPLE(kWidth, 2), tmp_v, + SUBSAMPLE(kWidth, 2), kWidth, kHeight); + libyuv::I420ToYUY2(tmp_y, kWidth, tmp_u, SUBSAMPLE(kWidth, 2), tmp_v, + SUBSAMPLE(kWidth, 2), golden_yuyv, + 4 * SUBSAMPLE(kWidth, 2), kWidth, kHeight); + + /* Convert to NV12 */ + for (int i = 0; i < benchmark_iterations_; ++i) { + libyuv::MM21ToYUY2(orig_y, kWidth, orig_uv, 2 * SUBSAMPLE(kWidth, 2), + dst_yuyv, 4 * SUBSAMPLE(kWidth, 2), kWidth, kHeight); + } + + for (int i = 0; i < 4 * SUBSAMPLE(kWidth, 2) * kHeight; ++i) { + EXPECT_EQ(dst_yuyv[i], golden_yuyv[i]); + } + + free_aligned_buffer_page_end(orig_y); + free_aligned_buffer_page_end(orig_uv); + free_aligned_buffer_page_end(tmp_y); + free_aligned_buffer_page_end(tmp_u); + free_aligned_buffer_page_end(tmp_v); + free_aligned_buffer_page_end(dst_yuyv); + free_aligned_buffer_page_end(golden_yuyv); +} + +// Transitive test. A to B to C is same as A to C. +// Benchmarks A To B to C for comparison to 1 step, benchmarked elsewhere. #define TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ W1280, N, NEG, OFF, FMT_C, BPP_C) \ TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##To##FMT_C##N) { \ @@ -2867,17 +3153,25 @@ TESTPTOB(TestUYVYToNV12, UYVYToI420, UYVYToNV12) free_aligned_buffer_page_end(dst_argb_bc); \ } +#if defined(ENABLE_FULL_TESTS) #define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ FMT_C, BPP_C) \ TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ benchmark_width_ + 1, _Any, +, 0, FMT_C, BPP_C) \ TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ - benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C) \ + benchmark_width_, _Unaligned, +, 2, FMT_C, BPP_C) \ TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ benchmark_width_, _Invert, -, 0, FMT_C, BPP_C) \ TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ benchmark_width_, _Opt, +, 0, FMT_C, BPP_C) +#else +#define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ + FMT_C, BPP_C) \ + TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ + benchmark_width_, _Opt, +, 0, FMT_C, BPP_C) +#endif +#if defined(ENABLE_FULL_TESTS) TESTPLANARTOE(I420, 2, 2, ABGR, 1, 4, ARGB, 4) TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ABGR, 4) TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RAW, 3) @@ -2932,7 +3226,32 @@ TESTPLANARTOE(I420, 2, 2, YUY2, 2, 4, ARGB, 4) TESTPLANARTOE(I420, 2, 2, UYVY, 2, 4, ARGB, 4) TESTPLANARTOE(I422, 2, 1, YUY2, 2, 4, ARGB, 4) TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4) +#else +TESTPLANARTOE(I420, 2, 2, ABGR, 1, 4, ARGB, 4) +TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB1555, 2) +TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB4444, 2) +TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RAW, 3) +TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB24, 3) +TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB565, 2) +TESTPLANARTOE(I420, 2, 2, BGRA, 1, 4, ARGB, 4) +TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, ARGB, 4) +TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, RGB24, 3) +TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, ARGB, 4) +TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, RAW, 3) +TESTPLANARTOE(I420, 2, 2, RGBA, 1, 4, ARGB, 4) +TESTPLANARTOE(I420, 2, 2, UYVY, 2, 4, ARGB, 4) +TESTPLANARTOE(I420, 2, 2, YUY2, 2, 4, ARGB, 4) +TESTPLANARTOE(I422, 2, 1, ABGR, 1, 4, ARGB, 4) +TESTPLANARTOE(I422, 2, 1, ARGB, 1, 4, RGB565, 2) +TESTPLANARTOE(I422, 2, 1, BGRA, 1, 4, ARGB, 4) +TESTPLANARTOE(I422, 2, 1, RGBA, 1, 4, ARGB, 4) +TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4) +TESTPLANARTOE(I422, 2, 1, YUY2, 2, 4, ARGB, 4) +TESTPLANARTOE(I444, 1, 1, ABGR, 1, 4, ARGB, 4) +#endif +// Transitive test: Compare 1 step vs 2 step conversion for YUVA to ARGB. +// Benchmark 2 step conversion for comparison to 1 step conversion. #define TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ W1280, N, NEG, OFF, FMT_C, BPP_C, ATTEN) \ TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##To##FMT_C##N) { \ @@ -2946,6 +3265,12 @@ TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4) align_buffer_page_end(src_v, kSizeUV + OFF); \ align_buffer_page_end(src_a, kWidth* kHeight + OFF); \ align_buffer_page_end(dst_argb_b, kStrideB* kHeight + OFF); \ + const int kStrideC = kWidth * BPP_C; \ + align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \ + align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \ + memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \ + memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \ + memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \ for (int i = 0; i < kWidth * kHeight; ++i) { \ src_y[i + OFF] = (fastrand() & 0xff); \ src_a[i + OFF] = (fastrand() & 0xff); \ @@ -2954,26 +3279,21 @@ TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4) src_u[i + OFF] = (fastrand() & 0xff); \ src_v[i + OFF] = (fastrand() & 0xff); \ } \ - memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \ - FMT_PLANAR##To##FMT_B( \ - src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ - src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \ - dst_argb_b + OFF, kStrideB, kWidth, NEG kHeight, ATTEN); \ - /* Convert to a 3rd format in 1 step and 2 steps and compare */ \ - const int kStrideC = kWidth * BPP_C; \ - align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \ - align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \ - memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \ - memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \ for (int i = 0; i < benchmark_iterations_; ++i) { \ - FMT_PLANAR##To##FMT_C( \ + /* Convert A to B */ \ + FMT_PLANAR##To##FMT_B( \ src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \ - dst_argb_c + OFF, kStrideC, kWidth, NEG kHeight, ATTEN); \ + dst_argb_b + OFF, kStrideB, kWidth, NEG kHeight, ATTEN); \ /* Convert B to C */ \ FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, dst_argb_bc + OFF, \ kStrideC, kWidth, kHeight); \ } \ + /* Convert A to C */ \ + FMT_PLANAR##To##FMT_C( \ + src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ + src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \ + dst_argb_c + OFF, kStrideC, kWidth, NEG kHeight, ATTEN); \ for (int i = 0; i < kStrideC * kHeight; ++i) { \ EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \ } \ @@ -2986,19 +3306,27 @@ TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4) free_aligned_buffer_page_end(dst_argb_bc); \ } +#if defined(ENABLE_FULL_TESTS) #define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ FMT_C, BPP_C) \ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ benchmark_width_ + 1, _Any, +, 0, FMT_C, BPP_C, 0) \ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ - benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C, 0) \ + benchmark_width_, _Unaligned, +, 2, FMT_C, BPP_C, 0) \ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ benchmark_width_, _Invert, -, 0, FMT_C, BPP_C, 0) \ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ benchmark_width_, _Opt, +, 0, FMT_C, BPP_C, 0) \ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ benchmark_width_, _Premult, +, 0, FMT_C, BPP_C, 1) +#else +#define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ + FMT_C, BPP_C) \ + TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ + benchmark_width_, _Opt, +, 0, FMT_C, BPP_C, 0) +#endif +#if defined(ENABLE_FULL_TESTS) TESTQPLANARTOE(I420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4) TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4) TESTQPLANARTOE(J420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4) @@ -3033,6 +3361,11 @@ TESTQPLANARTOE(U444Alpha, 1, 1, ARGB, 1, 4, ABGR, 4) TESTQPLANARTOE(U444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4) TESTQPLANARTOE(V444Alpha, 1, 1, ARGB, 1, 4, ABGR, 4) TESTQPLANARTOE(V444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4) +#else +TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4) +TESTQPLANARTOE(I422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4) +TESTQPLANARTOE(I444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4) +#endif #define TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, W1280, N, NEG, \ OFF, FMT_C, BPP_C) \ @@ -3076,7 +3409,7 @@ TESTQPLANARTOE(V444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4) TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, \ benchmark_width_ + 1, _Any, +, 0, FMT_C, BPP_C) \ TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, benchmark_width_, \ - _Unaligned, +, 1, FMT_C, BPP_C) \ + _Unaligned, +, 4, FMT_C, BPP_C) \ TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, benchmark_width_, \ _Invert, -, 0, FMT_C, BPP_C) \ TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, benchmark_width_, \ @@ -3228,6 +3561,19 @@ TEST_F(LibYUVConvertTest, ABGRToAR30Row_Opt) { #define U410ToAB30(a, b, c, d, e, f, g, h, i, j) \ I410ToAB30Matrix(a, b, c, d, e, f, g, h, &kYuv2020Constants, i, j) +#define I010ToARGBFilter(a, b, c, d, e, f, g, h, i, j) \ + I010ToARGBMatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \ + kFilterBilinear) +#define I010ToAR30Filter(a, b, c, d, e, f, g, h, i, j) \ + I010ToAR30MatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \ + kFilterBilinear) +#define I210ToARGBFilter(a, b, c, d, e, f, g, h, i, j) \ + I210ToARGBMatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \ + kFilterBilinear) +#define I210ToAR30Filter(a, b, c, d, e, f, g, h, i, j) \ + I210ToAR30MatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \ + kFilterBilinear) + // TODO(fbarchard): Fix clamping issue affected by U channel. #define TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, \ BPP_B, ALIGN, YALIGN, W1280, N, NEG, SOFF, DOFF) \ @@ -3281,14 +3627,14 @@ TEST_F(LibYUVConvertTest, ABGRToAR30Row_Opt) { TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \ ALIGN, YALIGN, benchmark_width_ + 1, _Any, +, 0, 0) \ TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \ - ALIGN, YALIGN, benchmark_width_, _Unaligned, +, 1, 1) \ + ALIGN, YALIGN, benchmark_width_, _Unaligned, +, 4, 4) \ TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \ ALIGN, YALIGN, benchmark_width_, _Invert, -, 0, 0) \ TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \ ALIGN, YALIGN, benchmark_width_, _Opt, +, 0, 0) // These conversions are only optimized for x86 -#if defined(ENABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) TESTPLANAR16TOB(I010, 2, 2, 0x3ff, ARGB, 4, 4, 1) TESTPLANAR16TOB(I010, 2, 2, 0x3ff, ABGR, 4, 4, 1) TESTPLANAR16TOB(H010, 2, 2, 0x3ff, ARGB, 4, 4, 1) @@ -3308,6 +3654,8 @@ TESTPLANAR16TOB(H410, 1, 1, 0x3ff, ABGR, 4, 4, 1) TESTPLANAR16TOB(U410, 1, 1, 0x3ff, ARGB, 4, 4, 1) TESTPLANAR16TOB(U410, 1, 1, 0x3ff, ABGR, 4, 4, 1) TESTPLANAR16TOB(I012, 2, 2, 0xfff, ARGB, 4, 4, 1) +TESTPLANAR16TOB(I010, 2, 2, 0x3ff, ARGBFilter, 4, 4, 1) +TESTPLANAR16TOB(I210, 2, 1, 0x3ff, ARGBFilter, 4, 4, 1) #ifdef LITTLE_ENDIAN_ONLY_TEST TESTPLANAR16TOB(I010, 2, 2, 0x3ff, AR30, 4, 4, 1) @@ -3329,8 +3677,10 @@ TESTPLANAR16TOB(H410, 1, 1, 0x3ff, AB30, 4, 4, 1) TESTPLANAR16TOB(U410, 1, 1, 0x3ff, AR30, 4, 4, 1) TESTPLANAR16TOB(U410, 1, 1, 0x3ff, AB30, 4, 4, 1) TESTPLANAR16TOB(I012, 2, 2, 0xfff, AR30, 4, 4, 1) +TESTPLANAR16TOB(I010, 2, 2, 0x3ff, AR30Filter, 4, 4, 1) +TESTPLANAR16TOB(I210, 2, 1, 0x3ff, AR30Filter, 4, 4, 1) #endif // LITTLE_ENDIAN_ONLY_TEST -#endif // ENABLE_SLOW_TESTS +#endif // DISABLE_SLOW_TESTS #define TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ ALIGN, YALIGN, W1280, N, NEG, OFF, ATTEN, S_DEPTH) \ @@ -3388,18 +3738,25 @@ TESTPLANAR16TOB(I012, 2, 2, 0xfff, AR30, 4, 4, 1) free_aligned_buffer_page_end(dst_argb_opt); \ } +#if defined(ENABLE_FULL_TESTS) #define TESTQPLANAR16TOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ ALIGN, YALIGN, S_DEPTH) \ TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_ + 1, _Any, +, 0, 0, S_DEPTH) \ TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ - YALIGN, benchmark_width_, _Unaligned, +, 1, 0, S_DEPTH) \ + YALIGN, benchmark_width_, _Unaligned, +, 2, 0, S_DEPTH) \ TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Invert, -, 0, 0, S_DEPTH) \ TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Opt, +, 0, 0, S_DEPTH) \ TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Premult, +, 0, 1, S_DEPTH) +#else +#define TESTQPLANAR16TOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ + ALIGN, YALIGN, S_DEPTH) \ + TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ + YALIGN, benchmark_width_, _Opt, +, 0, 0, S_DEPTH) +#endif #define I010AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \ I010AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvI601Constants, k, \ @@ -3509,9 +3866,15 @@ TESTPLANAR16TOB(I012, 2, 2, 0xfff, AR30, 4, 4, 1) #define V410AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \ I410AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \ l, m) +#define I010AlphaToARGBFilter(a, b, c, d, e, f, g, h, i, j, k, l, m) \ + I010AlphaToARGBMatrixFilter(a, b, c, d, e, f, g, h, i, j, \ + &kYuvI601Constants, k, l, m, kFilterBilinear) +#define I210AlphaToARGBFilter(a, b, c, d, e, f, g, h, i, j, k, l, m) \ + I010AlphaToARGBMatrixFilter(a, b, c, d, e, f, g, h, i, j, \ + &kYuvI601Constants, k, l, m, kFilterBilinear) // These conversions are only optimized for x86 -#if defined(ENABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) TESTQPLANAR16TOB(I010Alpha, 2, 2, ARGB, 4, 4, 1, 10) TESTQPLANAR16TOB(I010Alpha, 2, 2, ABGR, 4, 4, 1, 10) TESTQPLANAR16TOB(J010Alpha, 2, 2, ARGB, 4, 4, 1, 10) @@ -3548,7 +3911,9 @@ TESTQPLANAR16TOB(U410Alpha, 1, 1, ARGB, 4, 4, 1, 10) TESTQPLANAR16TOB(U410Alpha, 1, 1, ABGR, 4, 4, 1, 10) TESTQPLANAR16TOB(V410Alpha, 1, 1, ARGB, 4, 4, 1, 10) TESTQPLANAR16TOB(V410Alpha, 1, 1, ABGR, 4, 4, 1, 10) -#endif // ENABLE_SLOW_TESTS +TESTQPLANAR16TOB(I010Alpha, 2, 2, ARGBFilter, 4, 4, 1, 10) +TESTQPLANAR16TOB(I210Alpha, 2, 1, ARGBFilter, 4, 4, 1, 10) +#endif // DISABLE_SLOW_TESTS #define TESTBIPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ ALIGN, YALIGN, W1280, N, NEG, SOFF, DOFF, S_DEPTH) \ @@ -3599,7 +3964,7 @@ TESTQPLANAR16TOB(V410Alpha, 1, 1, ABGR, 4, 4, 1, 10) TESTBIPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_ + 1, _Any, +, 0, 0, S_DEPTH) \ TESTBIPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ - YALIGN, benchmark_width_, _Unaligned, +, 1, 1, S_DEPTH) \ + YALIGN, benchmark_width_, _Unaligned, +, 4, 4, S_DEPTH) \ TESTBIPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ YALIGN, benchmark_width_, _Invert, -, 0, 0, S_DEPTH) \ TESTBIPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ @@ -3632,13 +3997,28 @@ TESTQPLANAR16TOB(V410Alpha, 1, 1, ABGR, 4, 4, 1, 10) #define P216ToAR30(a, b, c, d, e, f, g, h) \ P216ToAR30Matrix(a, b, c, d, e, f, &kYuvH709Constants, g, h) -#if defined(ENABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +#define P010ToARGBFilter(a, b, c, d, e, f, g, h) \ + P010ToARGBMatrixFilter(a, b, c, d, e, f, &kYuvH709Constants, g, h, \ + kFilterBilinear) +#define P210ToARGBFilter(a, b, c, d, e, f, g, h) \ + P210ToARGBMatrixFilter(a, b, c, d, e, f, &kYuvH709Constants, g, h, \ + kFilterBilinear) +#define P010ToAR30Filter(a, b, c, d, e, f, g, h) \ + P010ToAR30MatrixFilter(a, b, c, d, e, f, &kYuvH709Constants, g, h, \ + kFilterBilinear) +#define P210ToAR30Filter(a, b, c, d, e, f, g, h) \ + P210ToAR30MatrixFilter(a, b, c, d, e, f, &kYuvH709Constants, g, h, \ + kFilterBilinear) + +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) TESTBIPLANAR16TOB(P010, 2, 2, ARGB, 4, 4, 1, 10) TESTBIPLANAR16TOB(P210, 2, 1, ARGB, 4, 4, 1, 10) TESTBIPLANAR16TOB(P012, 2, 2, ARGB, 4, 4, 1, 12) TESTBIPLANAR16TOB(P212, 2, 1, ARGB, 4, 4, 1, 12) TESTBIPLANAR16TOB(P016, 2, 2, ARGB, 4, 4, 1, 16) TESTBIPLANAR16TOB(P216, 2, 1, ARGB, 4, 4, 1, 16) +TESTBIPLANAR16TOB(P010, 2, 2, ARGBFilter, 4, 4, 1, 10) +TESTBIPLANAR16TOB(P210, 2, 1, ARGBFilter, 4, 4, 1, 10) #ifdef LITTLE_ENDIAN_ONLY_TEST TESTBIPLANAR16TOB(P010, 2, 2, AR30, 4, 4, 1, 10) TESTBIPLANAR16TOB(P210, 2, 1, AR30, 4, 4, 1, 10) @@ -3646,8 +4026,10 @@ TESTBIPLANAR16TOB(P012, 2, 2, AR30, 4, 4, 1, 12) TESTBIPLANAR16TOB(P212, 2, 1, AR30, 4, 4, 1, 12) TESTBIPLANAR16TOB(P016, 2, 2, AR30, 4, 4, 1, 16) TESTBIPLANAR16TOB(P216, 2, 1, AR30, 4, 4, 1, 16) +TESTBIPLANAR16TOB(P010, 2, 2, AR30Filter, 4, 4, 1, 10) +TESTBIPLANAR16TOB(P210, 2, 1, AR30Filter, 4, 4, 1, 10) #endif // LITTLE_ENDIAN_ONLY_TEST -#endif // defined(ENABLE_SLOW_TESTS) +#endif // DISABLE_SLOW_TESTS static int Clamp(int y) { if (y < 0) { @@ -3703,10 +4085,11 @@ TEST_F(LibYUVConvertTest, TestH420ToARGB) { ++histogram_b[b]; ++histogram_g[g]; ++histogram_r[r]; - int expected_y = Clamp(static_cast((i - 16) * 1.164f)); - EXPECT_NEAR(b, expected_y, 1); - EXPECT_NEAR(g, expected_y, 1); - EXPECT_NEAR(r, expected_y, 1); + // Reference formula for Y channel contribution in YUV to RGB conversions: + int expected_y = Clamp(static_cast((i - 16) * 1.164f + 0.5f)); + EXPECT_EQ(b, expected_y); + EXPECT_EQ(g, expected_y); + EXPECT_EQ(r, expected_y); EXPECT_EQ(a, 255); } @@ -3828,7 +4211,7 @@ TEST_F(LibYUVConvertTest, TestH010ToAR30) { ++histogram_b[b10]; ++histogram_g[g10]; ++histogram_r[r10]; - int expected_y = Clamp10(static_cast((i - 64) * 1.164f)); + int expected_y = Clamp10(static_cast((i - 64) * 1.164f + 0.5)); EXPECT_NEAR(b10, expected_y, 4); EXPECT_NEAR(g10, expected_y, 4); EXPECT_NEAR(r10, expected_y, 4); @@ -3981,30 +4364,6 @@ TEST_F(LibYUVConvertTest, TestH420ToAR30) { free_aligned_buffer_page_end(ar30_pixels); } -// Test RGB24 to ARGB and back to RGB24 -TEST_F(LibYUVConvertTest, TestARGBToRGB24) { - const int kSize = 256; - align_buffer_page_end(orig_rgb24, kSize * 3); - align_buffer_page_end(argb_pixels, kSize * 4); - align_buffer_page_end(dest_rgb24, kSize * 3); - - // Test grey scale - for (int i = 0; i < kSize * 3; ++i) { - orig_rgb24[i] = i; - } - - RGB24ToARGB(orig_rgb24, 0, argb_pixels, 0, kSize, 1); - ARGBToRGB24(argb_pixels, 0, dest_rgb24, 0, kSize, 1); - - for (int i = 0; i < kSize * 3; ++i) { - EXPECT_EQ(orig_rgb24[i], dest_rgb24[i]); - } - - free_aligned_buffer_page_end(orig_rgb24); - free_aligned_buffer_page_end(argb_pixels); - free_aligned_buffer_page_end(dest_rgb24); -} - // Test I400 with jpeg matrix is same as J400 TEST_F(LibYUVConvertTest, TestI400) { const int kSize = 256; @@ -4067,4 +4426,98 @@ TEST_F(LibYUVConvertTest, TestI400) { free_aligned_buffer_page_end(argb_pixels_2020_i400); } +// Test RGB24 to ARGB and back to RGB24 +TEST_F(LibYUVConvertTest, TestARGBToRGB24) { + const int kSize = 256; + align_buffer_page_end(orig_rgb24, kSize * 3); + align_buffer_page_end(argb_pixels, kSize * 4); + align_buffer_page_end(dest_rgb24, kSize * 3); + + // Test grey scale + for (int i = 0; i < kSize * 3; ++i) { + orig_rgb24[i] = i; + } + + RGB24ToARGB(orig_rgb24, 0, argb_pixels, 0, kSize, 1); + ARGBToRGB24(argb_pixels, 0, dest_rgb24, 0, kSize, 1); + + for (int i = 0; i < kSize * 3; ++i) { + EXPECT_EQ(orig_rgb24[i], dest_rgb24[i]); + } + + free_aligned_buffer_page_end(orig_rgb24); + free_aligned_buffer_page_end(argb_pixels); + free_aligned_buffer_page_end(dest_rgb24); +} + +TEST_F(LibYUVConvertTest, Test565) { + SIMD_ALIGNED(uint8_t orig_pixels[256][4]); + SIMD_ALIGNED(uint8_t pixels565[256][2]); + + for (int i = 0; i < 256; ++i) { + for (int j = 0; j < 4; ++j) { + orig_pixels[i][j] = i; + } + } + ARGBToRGB565(&orig_pixels[0][0], 0, &pixels565[0][0], 0, 256, 1); + uint32_t checksum = HashDjb2(&pixels565[0][0], sizeof(pixels565), 5381); + EXPECT_EQ(610919429u, checksum); +} + +// Test RGB24 to J420 is exact +#if defined(LIBYUV_BIT_EXACT) +TEST_F(LibYUVConvertTest, TestRGB24ToJ420) { + const int kSize = 256; + align_buffer_page_end(orig_rgb24, kSize * 3 * 2); // 2 rows of RGB24 + align_buffer_page_end(dest_j420, kSize * 3 / 2 * 2); + int iterations256 = (benchmark_width_ * benchmark_height_ + (kSize * 2 - 1)) / + (kSize * 2) * benchmark_iterations_; + + for (int i = 0; i < kSize * 3 * 2; ++i) { + orig_rgb24[i] = i; + } + + for (int i = 0; i < iterations256; ++i) { + RGB24ToJ420(orig_rgb24, kSize * 3, dest_j420, kSize, // Y plane + dest_j420 + kSize * 2, kSize / 2, // U plane + dest_j420 + kSize * 5 / 2, kSize / 2, // V plane + kSize, 2); + } + + uint32_t checksum = HashDjb2(dest_j420, kSize * 3 / 2 * 2, 5381); + EXPECT_EQ(2755440272u, checksum); + + free_aligned_buffer_page_end(orig_rgb24); + free_aligned_buffer_page_end(dest_j420); +} +#endif + +// Test RGB24 to I420 is exact +#if defined(LIBYUV_BIT_EXACT) +TEST_F(LibYUVConvertTest, TestRGB24ToI420) { + const int kSize = 256; + align_buffer_page_end(orig_rgb24, kSize * 3 * 2); // 2 rows of RGB24 + align_buffer_page_end(dest_i420, kSize * 3 / 2 * 2); + int iterations256 = (benchmark_width_ * benchmark_height_ + (kSize * 2 - 1)) / + (kSize * 2) * benchmark_iterations_; + + for (int i = 0; i < kSize * 3 * 2; ++i) { + orig_rgb24[i] = i; + } + + for (int i = 0; i < iterations256; ++i) { + RGB24ToI420(orig_rgb24, kSize * 3, dest_i420, kSize, // Y plane + dest_i420 + kSize * 2, kSize / 2, // U plane + dest_i420 + kSize * 5 / 2, kSize / 2, // V plane + kSize, 2); + } + + uint32_t checksum = HashDjb2(dest_i420, kSize * 3 / 2 * 2, 5381); + EXPECT_EQ(1526656597u, checksum); + + free_aligned_buffer_page_end(orig_rgb24); + free_aligned_buffer_page_end(dest_i420); +} +#endif + } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/unit_test/cpu_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/cpu_test.cc index e528558167..080778f5ff 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/cpu_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/cpu_test.cc @@ -40,6 +40,7 @@ TEST_F(LibYUVBaseTest, TestCpuHas) { int has_gfni = TestCpuFlag(kCpuHasGFNI); int has_avx512bw = TestCpuFlag(kCpuHasAVX512BW); int has_avx512vl = TestCpuFlag(kCpuHasAVX512VL); + int has_avx512vnni = TestCpuFlag(kCpuHasAVX512VNNI); int has_avx512vbmi = TestCpuFlag(kCpuHasAVX512VBMI); int has_avx512vbmi2 = TestCpuFlag(kCpuHasAVX512VBMI2); int has_avx512vbitalg = TestCpuFlag(kCpuHasAVX512VBITALG); @@ -57,6 +58,7 @@ TEST_F(LibYUVBaseTest, TestCpuHas) { printf("Has GFNI %d\n", has_gfni); printf("Has AVX512BW %d\n", has_avx512bw); printf("Has AVX512VL %d\n", has_avx512vl); + printf("Has AVX512VNNI %d\n", has_avx512vnni); printf("Has AVX512VBMI %d\n", has_avx512vbmi); printf("Has AVX512VBMI2 %d\n", has_avx512vbmi2); printf("Has AVX512VBITALG %d\n", has_avx512vbitalg); @@ -67,8 +69,15 @@ TEST_F(LibYUVBaseTest, TestCpuHas) { printf("Has MIPS %d\n", has_mips); int has_msa = TestCpuFlag(kCpuHasMSA); printf("Has MSA %d\n", has_msa); - int has_mmi = TestCpuFlag(kCpuHasMMI); - printf("Has MMI %d\n", has_mmi); +#endif + +#if defined(__loongarch__) + int has_loongarch = TestCpuFlag(kCpuHasLOONGARCH); + printf("Has LOONGARCH %d\n", has_loongarch); + int has_lsx = TestCpuFlag(kCpuHasLSX); + printf("Has LSX %d\n", has_lsx); + int has_lasx = TestCpuFlag(kCpuHasLASX); + printf("Has LASX %d\n", has_lasx); #endif } @@ -149,6 +158,9 @@ TEST_F(LibYUVBaseTest, TestCompilerMacros) { #ifdef _MIPS_ARCH_LOONGSON3A printf("_MIPS_ARCH_LOONGSON3A %d\n", _MIPS_ARCH_LOONGSON3A); #endif +#ifdef __loongarch__ + printf("__loongarch__ %d\n", __loongarch__); +#endif #ifdef _WIN32 printf("_WIN32 %d\n", _WIN32); #endif @@ -239,17 +251,13 @@ TEST_F(LibYUVBaseTest, TestLinuxNeon) { #endif } -TEST_F(LibYUVBaseTest, TestLinuxMipsMsaMmi) { +TEST_F(LibYUVBaseTest, TestLinuxMipsMsa) { if (FileExists("../../unit_test/testdata/mips.txt")) { printf("Note: testing to load \"../../unit_test/testdata/mips.txt\"\n"); EXPECT_EQ(0, MipsCpuCaps("../../unit_test/testdata/mips.txt")); - EXPECT_EQ(kCpuHasMMI, - MipsCpuCaps("../../unit_test/testdata/mips_loongson3.txt")); - EXPECT_EQ(kCpuHasMMI, - MipsCpuCaps("../../unit_test/testdata/mips_loongson_mmi.txt")); EXPECT_EQ(kCpuHasMSA, MipsCpuCaps("../../unit_test/testdata/mips_msa.txt")); - EXPECT_EQ(kCpuHasMMI | kCpuHasMSA, + EXPECT_EQ(kCpuHasMSA, MipsCpuCaps("../../unit_test/testdata/mips_loongson2k.txt")); } else { printf("WARNING: unable to load \"../../unit_test/testdata/mips.txt\"\n"); diff --git a/third-party/libyuv/third_party/libyuv/unit_test/planar_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/planar_test.cc index 5c60842136..4f462d0a1a 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/planar_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/planar_test.cc @@ -29,6 +29,12 @@ #include "libyuv/row.h" /* For ScaleSumSamples_Neon */ #endif +#if defined(LIBYUV_BIT_EXACT) +#define EXPECTED_ATTENUATE_DIFF 0 +#else +#define EXPECTED_ATTENUATE_DIFF 2 +#endif + namespace libyuv { TEST_F(LibYUVPlanarTest, TestAttenuate) { @@ -100,9 +106,9 @@ TEST_F(LibYUVPlanarTest, TestAttenuate) { EXPECT_EQ(32, atten_pixels[128 * 4 + 1]); EXPECT_EQ(21, atten_pixels[128 * 4 + 2]); EXPECT_EQ(128, atten_pixels[128 * 4 + 3]); - EXPECT_NEAR(255, atten_pixels[255 * 4 + 0], 1); - EXPECT_NEAR(127, atten_pixels[255 * 4 + 1], 1); - EXPECT_NEAR(85, atten_pixels[255 * 4 + 2], 1); + EXPECT_NEAR(254, atten_pixels[255 * 4 + 0], EXPECTED_ATTENUATE_DIFF); + EXPECT_NEAR(127, atten_pixels[255 * 4 + 1], EXPECTED_ATTENUATE_DIFF); + EXPECT_NEAR(85, atten_pixels[255 * 4 + 2], EXPECTED_ATTENUATE_DIFF); EXPECT_EQ(255, atten_pixels[255 * 4 + 3]); free_aligned_buffer_page_end(atten2_pixels); @@ -158,28 +164,29 @@ TEST_F(LibYUVPlanarTest, ARGBAttenuate_Any) { int max_diff = TestAttenuateI(benchmark_width_ + 1, benchmark_height_, benchmark_iterations_, disable_cpu_flags_, benchmark_cpu_info_, +1, 0); - EXPECT_LE(max_diff, 2); + + EXPECT_LE(max_diff, EXPECTED_ATTENUATE_DIFF); } TEST_F(LibYUVPlanarTest, ARGBAttenuate_Unaligned) { int max_diff = TestAttenuateI(benchmark_width_, benchmark_height_, benchmark_iterations_, disable_cpu_flags_, benchmark_cpu_info_, +1, 1); - EXPECT_LE(max_diff, 2); + EXPECT_LE(max_diff, EXPECTED_ATTENUATE_DIFF); } TEST_F(LibYUVPlanarTest, ARGBAttenuate_Invert) { int max_diff = TestAttenuateI(benchmark_width_, benchmark_height_, benchmark_iterations_, disable_cpu_flags_, benchmark_cpu_info_, -1, 0); - EXPECT_LE(max_diff, 2); + EXPECT_LE(max_diff, EXPECTED_ATTENUATE_DIFF); } TEST_F(LibYUVPlanarTest, ARGBAttenuate_Opt) { int max_diff = TestAttenuateI(benchmark_width_, benchmark_height_, benchmark_iterations_, disable_cpu_flags_, benchmark_cpu_info_, +1, 0); - EXPECT_LE(max_diff, 2); + EXPECT_LE(max_diff, EXPECTED_ATTENUATE_DIFF); } static int TestUnattenuateI(int width, @@ -231,28 +238,28 @@ TEST_F(LibYUVPlanarTest, ARGBUnattenuate_Any) { int max_diff = TestUnattenuateI(benchmark_width_ + 1, benchmark_height_, benchmark_iterations_, disable_cpu_flags_, benchmark_cpu_info_, +1, 0); - EXPECT_LE(max_diff, 2); + EXPECT_LE(max_diff, EXPECTED_ATTENUATE_DIFF); } TEST_F(LibYUVPlanarTest, ARGBUnattenuate_Unaligned) { int max_diff = TestUnattenuateI(benchmark_width_, benchmark_height_, benchmark_iterations_, disable_cpu_flags_, benchmark_cpu_info_, +1, 1); - EXPECT_LE(max_diff, 2); + EXPECT_LE(max_diff, EXPECTED_ATTENUATE_DIFF); } TEST_F(LibYUVPlanarTest, ARGBUnattenuate_Invert) { int max_diff = TestUnattenuateI(benchmark_width_, benchmark_height_, benchmark_iterations_, disable_cpu_flags_, benchmark_cpu_info_, -1, 0); - EXPECT_LE(max_diff, 2); + EXPECT_LE(max_diff, EXPECTED_ATTENUATE_DIFF); } TEST_F(LibYUVPlanarTest, ARGBUnattenuate_Opt) { int max_diff = TestUnattenuateI(benchmark_width_, benchmark_height_, benchmark_iterations_, disable_cpu_flags_, benchmark_cpu_info_, +1, 0); - EXPECT_LE(max_diff, 2); + EXPECT_LE(max_diff, EXPECTED_ATTENUATE_DIFF); } TEST_F(LibYUVPlanarTest, TestARGBComputeCumulativeSum) { @@ -1073,6 +1080,87 @@ TEST_F(LibYUVPlanarTest, TestInterpolatePlane) { } } +TEST_F(LibYUVPlanarTest, TestInterpolatePlane_16) { + SIMD_ALIGNED(uint16_t orig_pixels_0[1280]); + SIMD_ALIGNED(uint16_t orig_pixels_1[1280]); + SIMD_ALIGNED(uint16_t interpolate_pixels[1280]); + memset(orig_pixels_0, 0, sizeof(orig_pixels_0)); + memset(orig_pixels_1, 0, sizeof(orig_pixels_1)); + + orig_pixels_0[0] = 16u; + orig_pixels_0[1] = 32u; + orig_pixels_0[2] = 64u; + orig_pixels_0[3] = 128u; + orig_pixels_0[4] = 0u; + orig_pixels_0[5] = 0u; + orig_pixels_0[6] = 0u; + orig_pixels_0[7] = 255u; + orig_pixels_0[8] = 0u; + orig_pixels_0[9] = 0u; + orig_pixels_0[10] = 0u; + orig_pixels_0[11] = 0u; + orig_pixels_0[12] = 0u; + orig_pixels_0[13] = 0u; + orig_pixels_0[14] = 0u; + orig_pixels_0[15] = 0u; + + orig_pixels_1[0] = 0u; + orig_pixels_1[1] = 0u; + orig_pixels_1[2] = 0u; + orig_pixels_1[3] = 0u; + orig_pixels_1[4] = 0u; + orig_pixels_1[5] = 0u; + orig_pixels_1[6] = 0u; + orig_pixels_1[7] = 0u; + orig_pixels_1[8] = 0u; + orig_pixels_1[9] = 0u; + orig_pixels_1[10] = 0u; + orig_pixels_1[11] = 0u; + orig_pixels_1[12] = 255u; + orig_pixels_1[13] = 255u; + orig_pixels_1[14] = 255u; + orig_pixels_1[15] = 255u; + + InterpolatePlane_16(&orig_pixels_0[0], 0, &orig_pixels_1[0], 0, + &interpolate_pixels[0], 0, 16, 1, 128); + EXPECT_EQ(8u, interpolate_pixels[0]); + EXPECT_EQ(16u, interpolate_pixels[1]); + EXPECT_EQ(32u, interpolate_pixels[2]); + EXPECT_EQ(64u, interpolate_pixels[3]); + EXPECT_EQ(0u, interpolate_pixels[4]); + EXPECT_EQ(0u, interpolate_pixels[5]); + EXPECT_EQ(0u, interpolate_pixels[6]); + EXPECT_EQ(128u, interpolate_pixels[7]); + EXPECT_EQ(0u, interpolate_pixels[8]); + EXPECT_EQ(0u, interpolate_pixels[9]); + EXPECT_EQ(0u, interpolate_pixels[10]); + EXPECT_EQ(0u, interpolate_pixels[11]); + EXPECT_EQ(128u, interpolate_pixels[12]); + EXPECT_EQ(128u, interpolate_pixels[13]); + EXPECT_EQ(128u, interpolate_pixels[14]); + EXPECT_EQ(128u, interpolate_pixels[15]); + + InterpolatePlane_16(&orig_pixels_0[0], 0, &orig_pixels_1[0], 0, + &interpolate_pixels[0], 0, 16, 1, 0); + EXPECT_EQ(16u, interpolate_pixels[0]); + EXPECT_EQ(32u, interpolate_pixels[1]); + EXPECT_EQ(64u, interpolate_pixels[2]); + EXPECT_EQ(128u, interpolate_pixels[3]); + + InterpolatePlane_16(&orig_pixels_0[0], 0, &orig_pixels_1[0], 0, + &interpolate_pixels[0], 0, 16, 1, 192); + + EXPECT_EQ(4u, interpolate_pixels[0]); + EXPECT_EQ(8u, interpolate_pixels[1]); + EXPECT_EQ(16u, interpolate_pixels[2]); + EXPECT_EQ(32u, interpolate_pixels[3]); + + for (int i = 0; i < benchmark_pixels_div1280_; ++i) { + InterpolatePlane_16(&orig_pixels_0[0], 0, &orig_pixels_1[0], 0, + &interpolate_pixels[0], 0, 1280, 1, 123); + } +} + #define TESTTERP(FMT_A, BPP_A, STRIDE_A, FMT_B, BPP_B, STRIDE_B, W1280, TERP, \ N, NEG, OFF) \ TEST_F(LibYUVPlanarTest, ARGBInterpolate##TERP##N) { \ @@ -1477,6 +1565,251 @@ TEST_F(LibYUVPlanarTest, TestCopyPlane) { EXPECT_EQ(0, err); } +TEST_F(LibYUVPlanarTest, CopyPlane_Opt) { + int i; + int y_plane_size = benchmark_width_ * benchmark_height_; + align_buffer_page_end(orig_y, y_plane_size); + align_buffer_page_end(dst_c, y_plane_size); + align_buffer_page_end(dst_opt, y_plane_size); + + MemRandomize(orig_y, y_plane_size); + memset(dst_c, 1, y_plane_size); + memset(dst_opt, 2, y_plane_size); + + // Disable all optimizations. + MaskCpuFlags(disable_cpu_flags_); + for (i = 0; i < benchmark_iterations_; i++) { + CopyPlane(orig_y, benchmark_width_, dst_c, benchmark_width_, + benchmark_width_, benchmark_height_); + } + + // Enable optimizations. + MaskCpuFlags(benchmark_cpu_info_); + for (i = 0; i < benchmark_iterations_; i++) { + CopyPlane(orig_y, benchmark_width_, dst_opt, benchmark_width_, + benchmark_width_, benchmark_height_); + } + + for (i = 0; i < y_plane_size; ++i) { + EXPECT_EQ(dst_c[i], dst_opt[i]); + } + + free_aligned_buffer_page_end(orig_y); + free_aligned_buffer_page_end(dst_c); + free_aligned_buffer_page_end(dst_opt); +} + +TEST_F(LibYUVPlanarTest, TestCopyPlaneZero) { + // Test to verify copying a rect with a zero height or width does + // not touch destination memory. + uint8_t src = 42; + uint8_t dst = 0; + + // Disable all optimizations. + MaskCpuFlags(disable_cpu_flags_); + CopyPlane(&src, 0, &dst, 0, 0, 0); + EXPECT_EQ(src, 42); + EXPECT_EQ(dst, 0); + + CopyPlane(&src, 1, &dst, 1, 1, 0); + EXPECT_EQ(src, 42); + EXPECT_EQ(dst, 0); + + CopyPlane(&src, 1, &dst, 1, 0, 1); + EXPECT_EQ(src, 42); + EXPECT_EQ(dst, 0); + + // Enable optimizations. + MaskCpuFlags(benchmark_cpu_info_); + CopyPlane(&src, 0, &dst, 0, 0, 0); + EXPECT_EQ(src, 42); + EXPECT_EQ(dst, 0); + + CopyPlane(&src, 1, &dst, 1, 1, 0); + EXPECT_EQ(src, 42); + EXPECT_EQ(dst, 0); + + CopyPlane(&src, 1, &dst, 1, 0, 1); + EXPECT_EQ(src, 42); + EXPECT_EQ(dst, 0); +} + +TEST_F(LibYUVPlanarTest, TestDetilePlane) { + int i, j; + + // orig is tiled. Allocate enough memory for tiles. + int tile_width = (benchmark_width_ + 15) & ~15; + int tile_height = (benchmark_height_ + 15) & ~15; + int tile_plane_size = tile_width * tile_height; + int y_plane_size = benchmark_width_ * benchmark_height_; + align_buffer_page_end(tile_y, tile_plane_size); + align_buffer_page_end(dst_c, y_plane_size); + align_buffer_page_end(dst_opt, y_plane_size); + + MemRandomize(tile_y, tile_plane_size); + memset(dst_c, 0, y_plane_size); + memset(dst_opt, 0, y_plane_size); + + // Disable all optimizations. + MaskCpuFlags(disable_cpu_flags_); + for (j = 0; j < benchmark_iterations_; j++) { + DetilePlane(tile_y, tile_width, dst_c, benchmark_width_, benchmark_width_, + benchmark_height_, 16); + } + + // Enable optimizations. + MaskCpuFlags(benchmark_cpu_info_); + for (j = 0; j < benchmark_iterations_; j++) { + DetilePlane(tile_y, tile_width, dst_opt, benchmark_width_, benchmark_width_, + benchmark_height_, 16); + } + + for (i = 0; i < y_plane_size; ++i) { + EXPECT_EQ(dst_c[i], dst_opt[i]); + } + + free_aligned_buffer_page_end(tile_y); + free_aligned_buffer_page_end(dst_c); + free_aligned_buffer_page_end(dst_opt); +} + +TEST_F(LibYUVPlanarTest, TestDetilePlane_16) { + int i, j; + + // orig is tiled. Allocate enough memory for tiles. + int tile_width = (benchmark_width_ + 15) & ~15; + int tile_height = (benchmark_height_ + 15) & ~15; + int tile_plane_size = tile_width * tile_height * 2; + int y_plane_size = benchmark_width_ * benchmark_height_ * 2; + align_buffer_page_end(tile_y, tile_plane_size); + align_buffer_page_end(dst_c, y_plane_size); + align_buffer_page_end(dst_opt, y_plane_size); + + MemRandomize(tile_y, tile_plane_size); + memset(dst_c, 0, y_plane_size); + memset(dst_opt, 0, y_plane_size); + + // Disable all optimizations. + MaskCpuFlags(disable_cpu_flags_); + for (j = 0; j < benchmark_iterations_; j++) { + DetilePlane_16((const uint16_t*)tile_y, tile_width, (uint16_t*)dst_c, + benchmark_width_, benchmark_width_, benchmark_height_, 16); + } + + // Enable optimizations. + MaskCpuFlags(benchmark_cpu_info_); + for (j = 0; j < benchmark_iterations_; j++) { + DetilePlane_16((const uint16_t*)tile_y, tile_width, (uint16_t*)dst_opt, + benchmark_width_, benchmark_width_, benchmark_height_, 16); + } + + for (i = 0; i < y_plane_size; ++i) { + EXPECT_EQ(dst_c[i], dst_opt[i]); + } + + free_aligned_buffer_page_end(tile_y); + free_aligned_buffer_page_end(dst_c); + free_aligned_buffer_page_end(dst_opt); +} + +// Compares DetileSplitUV to 2 step Detile + SplitUV +TEST_F(LibYUVPlanarTest, TestDetileSplitUVPlane_Correctness) { + int i, j; + + // orig is tiled. Allocate enough memory for tiles. + int tile_width = (benchmark_width_ + 15) & ~15; + int tile_height = (benchmark_height_ + 15) & ~15; + int tile_plane_size = tile_width * tile_height; + int uv_plane_size = ((benchmark_width_ + 1) / 2) * benchmark_height_; + align_buffer_page_end(tile_uv, tile_plane_size); + align_buffer_page_end(detiled_uv, tile_plane_size); + align_buffer_page_end(dst_u_two_stage, uv_plane_size); + align_buffer_page_end(dst_u_opt, uv_plane_size); + align_buffer_page_end(dst_v_two_stage, uv_plane_size); + align_buffer_page_end(dst_v_opt, uv_plane_size); + + MemRandomize(tile_uv, tile_plane_size); + memset(detiled_uv, 0, tile_plane_size); + memset(dst_u_two_stage, 0, uv_plane_size); + memset(dst_u_opt, 0, uv_plane_size); + memset(dst_v_two_stage, 0, uv_plane_size); + memset(dst_v_opt, 0, uv_plane_size); + + DetileSplitUVPlane(tile_uv, tile_width, dst_u_opt, (benchmark_width_ + 1) / 2, + dst_v_opt, (benchmark_width_ + 1) / 2, benchmark_width_, + benchmark_height_, 16); + + // Benchmark 2 step conversion for comparison. + for (j = 0; j < benchmark_iterations_; j++) { + DetilePlane(tile_uv, tile_width, detiled_uv, benchmark_width_, + benchmark_width_, benchmark_height_, 16); + SplitUVPlane(detiled_uv, tile_width, dst_u_two_stage, + (benchmark_width_ + 1) / 2, dst_v_two_stage, + (benchmark_width_ + 1) / 2, (benchmark_width_ + 1) / 2, + benchmark_height_); + } + + for (i = 0; i < uv_plane_size; ++i) { + EXPECT_EQ(dst_u_two_stage[i], dst_u_opt[i]); + EXPECT_EQ(dst_v_two_stage[i], dst_v_opt[i]); + } + + free_aligned_buffer_page_end(tile_uv); + free_aligned_buffer_page_end(detiled_uv); + free_aligned_buffer_page_end(dst_u_two_stage); + free_aligned_buffer_page_end(dst_u_opt); + free_aligned_buffer_page_end(dst_v_two_stage); + free_aligned_buffer_page_end(dst_v_opt); +} + +TEST_F(LibYUVPlanarTest, TestDetileSplitUVPlane_Benchmark) { + int i, j; + + // orig is tiled. Allocate enough memory for tiles. + int tile_width = (benchmark_width_ + 15) & ~15; + int tile_height = (benchmark_height_ + 15) & ~15; + int tile_plane_size = tile_width * tile_height; + int uv_plane_size = ((benchmark_width_ + 1) / 2) * benchmark_height_; + align_buffer_page_end(tile_uv, tile_plane_size); + align_buffer_page_end(dst_u_c, uv_plane_size); + align_buffer_page_end(dst_u_opt, uv_plane_size); + align_buffer_page_end(dst_v_c, uv_plane_size); + align_buffer_page_end(dst_v_opt, uv_plane_size); + + MemRandomize(tile_uv, tile_plane_size); + memset(dst_u_c, 0, uv_plane_size); + memset(dst_u_opt, 0, uv_plane_size); + memset(dst_v_c, 0, uv_plane_size); + memset(dst_v_opt, 0, uv_plane_size); + + // Disable all optimizations. + MaskCpuFlags(disable_cpu_flags_); + + DetileSplitUVPlane(tile_uv, tile_width, dst_u_c, (benchmark_width_ + 1) / 2, + dst_v_c, (benchmark_width_ + 1) / 2, benchmark_width_, + benchmark_height_, 16); + + // Enable optimizations. + MaskCpuFlags(benchmark_cpu_info_); + + for (j = 0; j < benchmark_iterations_; j++) { + DetileSplitUVPlane( + tile_uv, tile_width, dst_u_opt, (benchmark_width_ + 1) / 2, dst_v_opt, + (benchmark_width_ + 1) / 2, benchmark_width_, benchmark_height_, 16); + } + + for (i = 0; i < uv_plane_size; ++i) { + EXPECT_EQ(dst_u_c[i], dst_u_opt[i]); + EXPECT_EQ(dst_v_c[i], dst_v_opt[i]); + } + + free_aligned_buffer_page_end(tile_uv); + free_aligned_buffer_page_end(dst_u_c); + free_aligned_buffer_page_end(dst_u_opt); + free_aligned_buffer_page_end(dst_v_c); + free_aligned_buffer_page_end(dst_v_opt); +} + static int TestMultiply(int width, int height, int benchmark_iterations, @@ -1966,7 +2299,7 @@ static int TestBlur(int width, return max_diff; } -#if defined(ENABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) #define DISABLED_ARM(name) name #else #define DISABLED_ARM(name) DISABLED_##name @@ -3131,13 +3464,13 @@ TEST_F(LibYUVPlanarTest, SplitXRGBPlane_Opt) { #define TESTQPLANARTOP(FUNC, STYPE, DTYPE, DEPTH) \ TESTQPLANARTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_ + 1, _Any, +, 0) \ TESTQPLANARTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Unaligned, +, \ - 1) \ + 2) \ TESTQPLANARTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Invert, -, 0) \ TESTQPLANARTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Opt, +, 0) \ TESTQPLANAROTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_ + 1, _Any, +, \ 0) \ TESTQPLANAROTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Unaligned, +, \ - 1) \ + 2) \ TESTQPLANAROTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Invert, -, 0) \ TESTQPLANAROTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Opt, +, 0) @@ -3190,7 +3523,7 @@ TESTQPLANARTOP(MergeARGB16To8, uint16_t, uint8_t, 16) #define TESTTPLANARTOP(FUNC, STYPE, DTYPE, DEPTH) \ TESTTPLANARTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_ + 1, _Any, +, 0) \ TESTTPLANARTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Unaligned, +, \ - 1) \ + 2) \ TESTTPLANARTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Invert, -, 0) \ TESTTPLANARTOPI(FUNC, STYPE, DTYPE, DEPTH, benchmark_width_, _Opt, +, 0) @@ -3216,19 +3549,19 @@ TEST_F(LibYUVPlanarTest, MergeUVRow_16_Opt) { MergeUVRow_16_C(reinterpret_cast(src_pixels_u), reinterpret_cast(src_pixels_v), - reinterpret_cast(dst_pixels_uv_c), 64, kPixels); + reinterpret_cast(dst_pixels_uv_c), 16, kPixels); int has_avx2 = TestCpuFlag(kCpuHasAVX2); for (int i = 0; i < benchmark_iterations_; ++i) { if (has_avx2) { MergeUVRow_16_AVX2(reinterpret_cast(src_pixels_u), reinterpret_cast(src_pixels_v), - reinterpret_cast(dst_pixels_uv_opt), 64, + reinterpret_cast(dst_pixels_uv_opt), 16, kPixels); } else { MergeUVRow_16_C(reinterpret_cast(src_pixels_u), reinterpret_cast(src_pixels_v), - reinterpret_cast(dst_pixels_uv_opt), 64, + reinterpret_cast(dst_pixels_uv_opt), 16, kPixels); } } @@ -3315,6 +3648,64 @@ TEST_F(LibYUVPlanarTest, Convert16To8Plane) { free_aligned_buffer_page_end(dst_pixels_y_c); } +TEST_F(LibYUVPlanarTest, YUY2ToY) { + const int kPixels = benchmark_width_ * benchmark_height_; + align_buffer_page_end(src_pixels_y, kPixels * 2); + align_buffer_page_end(dst_pixels_y_opt, kPixels); + align_buffer_page_end(dst_pixels_y_c, kPixels); + + MemRandomize(src_pixels_y, kPixels * 2); + memset(dst_pixels_y_opt, 0, kPixels); + memset(dst_pixels_y_c, 1, kPixels); + + MaskCpuFlags(disable_cpu_flags_); + YUY2ToY(src_pixels_y, benchmark_width_ * 2, dst_pixels_y_c, benchmark_width_, + benchmark_width_, benchmark_height_); + MaskCpuFlags(benchmark_cpu_info_); + + for (int i = 0; i < benchmark_iterations_; ++i) { + YUY2ToY(src_pixels_y, benchmark_width_ * 2, dst_pixels_y_opt, + benchmark_width_, benchmark_width_, benchmark_height_); + } + + for (int i = 0; i < kPixels; ++i) { + EXPECT_EQ(dst_pixels_y_opt[i], dst_pixels_y_c[i]); + } + + free_aligned_buffer_page_end(src_pixels_y); + free_aligned_buffer_page_end(dst_pixels_y_opt); + free_aligned_buffer_page_end(dst_pixels_y_c); +} + +TEST_F(LibYUVPlanarTest, UYVYToY) { + const int kPixels = benchmark_width_ * benchmark_height_; + align_buffer_page_end(src_pixels_y, kPixels * 2); + align_buffer_page_end(dst_pixels_y_opt, kPixels); + align_buffer_page_end(dst_pixels_y_c, kPixels); + + MemRandomize(src_pixels_y, kPixels * 2); + memset(dst_pixels_y_opt, 0, kPixels); + memset(dst_pixels_y_c, 1, kPixels); + + MaskCpuFlags(disable_cpu_flags_); + UYVYToY(src_pixels_y, benchmark_width_ * 2, dst_pixels_y_c, benchmark_width_, + benchmark_width_, benchmark_height_); + MaskCpuFlags(benchmark_cpu_info_); + + for (int i = 0; i < benchmark_iterations_; ++i) { + UYVYToY(src_pixels_y, benchmark_width_ * 2, dst_pixels_y_opt, + benchmark_width_, benchmark_width_, benchmark_height_); + } + + for (int i = 0; i < kPixels; ++i) { + EXPECT_EQ(dst_pixels_y_opt[i], dst_pixels_y_c[i]); + } + + free_aligned_buffer_page_end(src_pixels_y); + free_aligned_buffer_page_end(dst_pixels_y_opt); + free_aligned_buffer_page_end(dst_pixels_y_c); +} + #ifdef ENABLE_ROW_TESTS // TODO(fbarchard): Improve test for more platforms. #ifdef HAS_CONVERT16TO8ROW_AVX2 @@ -3361,6 +3752,35 @@ TEST_F(LibYUVPlanarTest, Convert16To8Row_Opt) { free_aligned_buffer_page_end(dst_pixels_y_c); } #endif // HAS_CONVERT16TO8ROW_AVX2 + +#ifdef HAS_UYVYTOYROW_NEON +TEST_F(LibYUVPlanarTest, UYVYToYRow_Opt) { + // NEON does multiple of 16, so round count up + const int kPixels = (benchmark_width_ * benchmark_height_ + 15) & ~15; + align_buffer_page_end(src_pixels_y, kPixels * 2); + align_buffer_page_end(dst_pixels_y_opt, kPixels); + align_buffer_page_end(dst_pixels_y_c, kPixels); + + MemRandomize(src_pixels_y, kPixels * 2); + memset(dst_pixels_y_opt, 0, kPixels); + memset(dst_pixels_y_c, 1, kPixels); + + UYVYToYRow_C(src_pixels_y, dst_pixels_y_c, kPixels); + + for (int i = 0; i < benchmark_iterations_; ++i) { + UYVYToYRow_NEON(src_pixels_y, dst_pixels_y_opt, kPixels); + } + + for (int i = 0; i < kPixels; ++i) { + EXPECT_EQ(dst_pixels_y_opt[i], dst_pixels_y_c[i]); + } + + free_aligned_buffer_page_end(src_pixels_y); + free_aligned_buffer_page_end(dst_pixels_y_opt); + free_aligned_buffer_page_end(dst_pixels_y_c); +} +#endif // HAS_UYVYTOYROW_NEON + #endif // ENABLE_ROW_TESTS TEST_F(LibYUVPlanarTest, Convert8To16Plane) { diff --git a/third-party/libyuv/third_party/libyuv/unit_test/rotate_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/rotate_test.cc index 1bab584fa1..d3887414dd 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/rotate_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/rotate_test.cc @@ -16,6 +16,8 @@ namespace libyuv { +#define SUBSAMPLE(v, a) ((((v) + (a)-1)) / (a)) + static void I420TestRotate(int src_width, int src_height, int dst_width, @@ -135,6 +137,94 @@ TEST_F(LibYUVRotateTest, DISABLED_I420Rotate270_Odd) { benchmark_cpu_info_); } +static void I422TestRotate(int src_width, + int src_height, + int dst_width, + int dst_height, + libyuv::RotationMode mode, + int benchmark_iterations, + int disable_cpu_flags, + int benchmark_cpu_info) { + if (src_width < 1) { + src_width = 1; + } + if (src_height == 0) { + src_height = 1; + } + if (dst_width < 1) { + dst_width = 1; + } + if (dst_height < 1) { + dst_height = 1; + } + int src_i422_y_size = src_width * Abs(src_height); + int src_i422_uv_size = ((src_width + 1) / 2) * Abs(src_height); + int src_i422_size = src_i422_y_size + src_i422_uv_size * 2; + align_buffer_page_end(src_i422, src_i422_size); + for (int i = 0; i < src_i422_size; ++i) { + src_i422[i] = fastrand() & 0xff; + } + + int dst_i422_y_size = dst_width * dst_height; + int dst_i422_uv_size = ((dst_width + 1) / 2) * dst_height; + int dst_i422_size = dst_i422_y_size + dst_i422_uv_size * 2; + align_buffer_page_end(dst_i422_c, dst_i422_size); + align_buffer_page_end(dst_i422_opt, dst_i422_size); + memset(dst_i422_c, 2, dst_i422_size); + memset(dst_i422_opt, 3, dst_i422_size); + + MaskCpuFlags(disable_cpu_flags); // Disable all CPU optimization. + I422Rotate(src_i422, src_width, src_i422 + src_i422_y_size, + (src_width + 1) / 2, src_i422 + src_i422_y_size + src_i422_uv_size, + (src_width + 1) / 2, dst_i422_c, dst_width, + dst_i422_c + dst_i422_y_size, (dst_width + 1) / 2, + dst_i422_c + dst_i422_y_size + dst_i422_uv_size, + (dst_width + 1) / 2, src_width, src_height, mode); + + MaskCpuFlags(benchmark_cpu_info); // Enable all CPU optimization. + for (int i = 0; i < benchmark_iterations; ++i) { + I422Rotate( + src_i422, src_width, src_i422 + src_i422_y_size, (src_width + 1) / 2, + src_i422 + src_i422_y_size + src_i422_uv_size, (src_width + 1) / 2, + dst_i422_opt, dst_width, dst_i422_opt + dst_i422_y_size, + (dst_width + 1) / 2, dst_i422_opt + dst_i422_y_size + dst_i422_uv_size, + (dst_width + 1) / 2, src_width, src_height, mode); + } + + // Rotation should be exact. + for (int i = 0; i < dst_i422_size; ++i) { + EXPECT_EQ(dst_i422_c[i], dst_i422_opt[i]); + } + + free_aligned_buffer_page_end(dst_i422_c); + free_aligned_buffer_page_end(dst_i422_opt); + free_aligned_buffer_page_end(src_i422); +} + +TEST_F(LibYUVRotateTest, I422Rotate0_Opt) { + I422TestRotate(benchmark_width_, benchmark_height_, benchmark_width_, + benchmark_height_, kRotate0, benchmark_iterations_, + disable_cpu_flags_, benchmark_cpu_info_); +} + +TEST_F(LibYUVRotateTest, I422Rotate90_Opt) { + I422TestRotate(benchmark_width_, benchmark_height_, benchmark_height_, + benchmark_width_, kRotate90, benchmark_iterations_, + disable_cpu_flags_, benchmark_cpu_info_); +} + +TEST_F(LibYUVRotateTest, I422Rotate180_Opt) { + I422TestRotate(benchmark_width_, benchmark_height_, benchmark_width_, + benchmark_height_, kRotate180, benchmark_iterations_, + disable_cpu_flags_, benchmark_cpu_info_); +} + +TEST_F(LibYUVRotateTest, I422Rotate270_Opt) { + I422TestRotate(benchmark_width_, benchmark_height_, benchmark_height_, + benchmark_width_, kRotate270, benchmark_iterations_, + disable_cpu_flags_, benchmark_cpu_info_); +} + static void I444TestRotate(int src_width, int src_height, int dst_width, @@ -391,4 +481,119 @@ TEST_F(LibYUVRotateTest, NV12Rotate270_Invert) { disable_cpu_flags_, benchmark_cpu_info_); } +// Test Android 420 to I420 Rotate +#define TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, \ + SRC_SUBSAMP_Y, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ + W1280, N, NEG, OFF, PN, OFF_U, OFF_V, ROT) \ + TEST_F(LibYUVRotateTest, \ + SRC_FMT_PLANAR##To##FMT_PLANAR##Rotate##ROT##To##PN##N) { \ + const int kWidth = W1280; \ + const int kHeight = benchmark_height_; \ + const int kSizeUV = \ + SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); \ + align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ + align_buffer_page_end(src_uv, \ + kSizeUV*((PIXEL_STRIDE == 3) ? 3 : 2) + OFF); \ + align_buffer_page_end(dst_y_c, kWidth* kHeight); \ + align_buffer_page_end(dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \ + SUBSAMPLE(kHeight, SUBSAMP_Y)); \ + align_buffer_page_end(dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \ + SUBSAMPLE(kHeight, SUBSAMP_Y)); \ + align_buffer_page_end(dst_y_opt, kWidth* kHeight); \ + align_buffer_page_end(dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \ + SUBSAMPLE(kHeight, SUBSAMP_Y)); \ + align_buffer_page_end(dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \ + SUBSAMPLE(kHeight, SUBSAMP_Y)); \ + uint8_t* src_u = src_uv + OFF_U; \ + uint8_t* src_v = src_uv + (PIXEL_STRIDE == 1 ? kSizeUV : OFF_V); \ + int src_stride_uv = SUBSAMPLE(kWidth, SUBSAMP_X) * PIXEL_STRIDE; \ + for (int i = 0; i < kHeight; ++i) \ + for (int j = 0; j < kWidth; ++j) \ + src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \ + for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \ + for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \ + src_u[(i * src_stride_uv) + j * PIXEL_STRIDE + OFF] = \ + (fastrand() & 0xff); \ + src_v[(i * src_stride_uv) + j * PIXEL_STRIDE + OFF] = \ + (fastrand() & 0xff); \ + } \ + } \ + memset(dst_y_c, 1, kWidth* kHeight); \ + memset(dst_u_c, 2, \ + SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ + memset(dst_v_c, 3, \ + SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ + memset(dst_y_opt, 101, kWidth* kHeight); \ + memset(dst_u_opt, 102, \ + SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ + memset(dst_v_opt, 103, \ + SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ + MaskCpuFlags(disable_cpu_flags_); \ + SRC_FMT_PLANAR##To##FMT_PLANAR##Rotate( \ + src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ + src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), PIXEL_STRIDE, dst_y_c, \ + kWidth, dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_c, \ + SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight, \ + (libyuv::RotationMode)ROT); \ + MaskCpuFlags(benchmark_cpu_info_); \ + for (int i = 0; i < benchmark_iterations_; ++i) { \ + SRC_FMT_PLANAR##To##FMT_PLANAR##Rotate( \ + src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ + src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), PIXEL_STRIDE, \ + dst_y_opt, kWidth, dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \ + dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight, \ + (libyuv::RotationMode)ROT); \ + } \ + for (int i = 0; i < kHeight; ++i) { \ + for (int j = 0; j < kWidth; ++j) { \ + EXPECT_EQ(dst_y_c[i * kWidth + j], dst_y_opt[i * kWidth + j]); \ + } \ + } \ + for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ + for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ + EXPECT_EQ(dst_u_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j], \ + dst_u_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]); \ + } \ + } \ + for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ + for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ + EXPECT_EQ(dst_v_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j], \ + dst_v_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]); \ + } \ + } \ + free_aligned_buffer_page_end(dst_y_c); \ + free_aligned_buffer_page_end(dst_u_c); \ + free_aligned_buffer_page_end(dst_v_c); \ + free_aligned_buffer_page_end(dst_y_opt); \ + free_aligned_buffer_page_end(dst_u_opt); \ + free_aligned_buffer_page_end(dst_v_opt); \ + free_aligned_buffer_page_end(src_y); \ + free_aligned_buffer_page_end(src_uv); \ + } + +#define TESTAPLANARTOP(SRC_FMT_PLANAR, PN, PIXEL_STRIDE, OFF_U, OFF_V, \ + SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, SUBSAMP_X, \ + SUBSAMP_Y) \ + TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ + FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_ + 1, \ + _Any, +, 0, PN, OFF_U, OFF_V, 0) \ + TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ + FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, \ + _Unaligned, +, 2, PN, OFF_U, OFF_V, 0) \ + TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ + FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, \ + -, 0, PN, OFF_U, OFF_V, 0) \ + TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ + FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, \ + 0, PN, OFF_U, OFF_V, 0) \ + TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ + FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, \ + 0, PN, OFF_U, OFF_V, 180) + +TESTAPLANARTOP(Android420, I420, 1, 0, 0, 2, 2, I420, 2, 2) +TESTAPLANARTOP(Android420, NV12, 2, 0, 1, 2, 2, I420, 2, 2) +TESTAPLANARTOP(Android420, NV21, 2, 1, 0, 2, 2, I420, 2, 2) +#undef TESTAPLANARTOP +#undef TESTAPLANARTOPI + } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/unit_test/scale_argb_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/scale_argb_test.cc index 48ad75eafd..f54a68f11f 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/scale_argb_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/scale_argb_test.cc @@ -22,6 +22,12 @@ namespace libyuv { #define STRINGIZE(line) #line #define FILELINESTR(file, line) file ":" STRINGIZE(line) +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +// SLOW TESTS are those that are unoptimized C code. +// FULL TESTS are optimized but test many variations of the same code. +#define ENABLE_FULL_TESTS +#endif + // Test scaling with C vs Opt and return maximum pixel difference. 0 = exact. static int ARGBTestFilter(int src_width, int src_height, @@ -251,23 +257,30 @@ static int ARGBClipTestFilter(int src_width, // Test a scale factor with all 4 filters. Expect unfiltered to be exact, but // filtering is different fixed point implementations for SSSE3, Neon and C. -#ifdef ENABLE_SLOW_TESTS +#ifndef DISABLE_SLOW_TESTS #define TEST_FACTOR(name, nom, denom) \ TEST_FACTOR1(, name, None, nom, denom, 0) \ TEST_FACTOR1(, name, Linear, nom, denom, 3) \ TEST_FACTOR1(, name, Bilinear, nom, denom, 3) \ TEST_FACTOR1(, name, Box, nom, denom, 3) #else +#if defined(ENABLE_FULL_TESTS) #define TEST_FACTOR(name, nom, denom) \ TEST_FACTOR1(DISABLED_, name, None, nom, denom, 0) \ TEST_FACTOR1(DISABLED_, name, Linear, nom, denom, 3) \ TEST_FACTOR1(DISABLED_, name, Bilinear, nom, denom, 3) \ TEST_FACTOR1(DISABLED_, name, Box, nom, denom, 3) +#else +#define TEST_FACTOR(name, nom, denom) \ + TEST_FACTOR1(DISABLED_, name, Bilinear, nom, denom, 3) +#endif #endif TEST_FACTOR(2, 1, 2) TEST_FACTOR(4, 1, 4) -// TEST_FACTOR(8, 1, 8) Disable for benchmark performance. +#ifndef DISABLE_SLOW_TESTS +TEST_FACTOR(8, 1, 8) +#endif TEST_FACTOR(3by4, 3, 4) TEST_FACTOR(3by8, 3, 8) TEST_FACTOR(3, 1, 3) @@ -305,28 +318,33 @@ TEST_FACTOR(3, 1, 3) EXPECT_LE(diff, max_diff); \ } -/// Test scale to a specified size with all 4 filters. -#ifdef ENABLE_SLOW_TESTS +#ifndef DISABLE_SLOW_TESTS +// Test scale to a specified size with all 4 filters. #define TEST_SCALETO(name, width, height) \ TEST_SCALETO1(, name, width, height, None, 0) \ TEST_SCALETO1(, name, width, height, Linear, 3) \ TEST_SCALETO1(, name, width, height, Bilinear, 3) #else +#if defined(ENABLE_FULL_TESTS) #define TEST_SCALETO(name, width, height) \ TEST_SCALETO1(DISABLED_, name, width, height, None, 0) \ TEST_SCALETO1(DISABLED_, name, width, height, Linear, 3) \ TEST_SCALETO1(DISABLED_, name, width, height, Bilinear, 3) +#else +#define TEST_SCALETO(name, width, height) \ + TEST_SCALETO1(DISABLED_, name, width, height, Bilinear, 3) +#endif #endif TEST_SCALETO(ARGBScale, 1, 1) -TEST_SCALETO(ARGBScale, 256, 144) /* 128x72 * 2 */ -TEST_SCALETO(ARGBScale, 320, 240) TEST_SCALETO(ARGBScale, 569, 480) TEST_SCALETO(ARGBScale, 640, 360) -#ifdef ENABLE_SLOW_TESTS +#ifndef DISABLE_SLOW_TESTS +TEST_SCALETO(ARGBScale, 256, 144) /* 128x72 * 2 */ +TEST_SCALETO(ARGBScale, 320, 240) TEST_SCALETO(ARGBScale, 1280, 720) TEST_SCALETO(ARGBScale, 1920, 1080) -#endif // ENABLE_SLOW_TESTS +#endif // DISABLE_SLOW_TESTS #undef TEST_SCALETO1 #undef TEST_SCALETO @@ -339,10 +357,14 @@ TEST_SCALETO(ARGBScale, 1920, 1080) EXPECT_LE(diff, max_diff); \ } +#if defined(ENABLE_FULL_TESTS) // Test scale with swapped width and height with all 3 filters. TEST_SCALESWAPXY1(ARGBScale, None, 0) TEST_SCALESWAPXY1(ARGBScale, Linear, 0) TEST_SCALESWAPXY1(ARGBScale, Bilinear, 0) +#else +TEST_SCALESWAPXY1(ARGBScale, Bilinear, 0) +#endif #undef TEST_SCALESWAPXY1 // Scale with YUV conversion to ARGB and clipping. @@ -490,11 +512,11 @@ TEST_F(LibYUVScaleTest, YUVToRGBScaleDown) { } TEST_F(LibYUVScaleTest, ARGBTest3x) { - const int kSrcStride = 48 * 4; - const int kDstStride = 16 * 4; + const int kSrcStride = 480 * 4; + const int kDstStride = 160 * 4; const int kSize = kSrcStride * 3; align_buffer_page_end(orig_pixels, kSize); - for (int i = 0; i < 48 * 3; ++i) { + for (int i = 0; i < 480 * 3; ++i) { orig_pixels[i * 4 + 0] = i; orig_pixels[i * 4 + 1] = 255 - i; orig_pixels[i * 4 + 2] = i + 1; @@ -502,36 +524,36 @@ TEST_F(LibYUVScaleTest, ARGBTest3x) { } align_buffer_page_end(dest_pixels, kDstStride); - int iterations16 = - benchmark_width_ * benchmark_height_ / (16 * 1) * benchmark_iterations_; - for (int i = 0; i < iterations16; ++i) { - ARGBScale(orig_pixels, kSrcStride, 48, 3, dest_pixels, kDstStride, 16, 1, + int iterations160 = (benchmark_width_ * benchmark_height_ + (160 - 1)) / 160 * + benchmark_iterations_; + for (int i = 0; i < iterations160; ++i) { + ARGBScale(orig_pixels, kSrcStride, 480, 3, dest_pixels, kDstStride, 160, 1, kFilterBilinear); } - EXPECT_EQ(49, dest_pixels[0]); - EXPECT_EQ(255 - 49, dest_pixels[1]); - EXPECT_EQ(50, dest_pixels[2]); - EXPECT_EQ(59, dest_pixels[3]); + EXPECT_EQ(225, dest_pixels[0]); + EXPECT_EQ(255 - 225, dest_pixels[1]); + EXPECT_EQ(226, dest_pixels[2]); + EXPECT_EQ(235, dest_pixels[3]); - ARGBScale(orig_pixels, kSrcStride, 48, 3, dest_pixels, kDstStride, 16, 1, + ARGBScale(orig_pixels, kSrcStride, 480, 3, dest_pixels, kDstStride, 160, 1, kFilterNone); - EXPECT_EQ(49, dest_pixels[0]); - EXPECT_EQ(255 - 49, dest_pixels[1]); - EXPECT_EQ(50, dest_pixels[2]); - EXPECT_EQ(59, dest_pixels[3]); + EXPECT_EQ(225, dest_pixels[0]); + EXPECT_EQ(255 - 225, dest_pixels[1]); + EXPECT_EQ(226, dest_pixels[2]); + EXPECT_EQ(235, dest_pixels[3]); free_aligned_buffer_page_end(dest_pixels); free_aligned_buffer_page_end(orig_pixels); } TEST_F(LibYUVScaleTest, ARGBTest4x) { - const int kSrcStride = 64 * 4; - const int kDstStride = 16 * 4; + const int kSrcStride = 640 * 4; + const int kDstStride = 160 * 4; const int kSize = kSrcStride * 4; align_buffer_page_end(orig_pixels, kSize); - for (int i = 0; i < 64 * 4; ++i) { + for (int i = 0; i < 640 * 4; ++i) { orig_pixels[i * 4 + 0] = i; orig_pixels[i * 4 + 1] = 255 - i; orig_pixels[i * 4 + 2] = i + 1; @@ -539,26 +561,25 @@ TEST_F(LibYUVScaleTest, ARGBTest4x) { } align_buffer_page_end(dest_pixels, kDstStride); - int iterations16 = - benchmark_width_ * benchmark_height_ / (16 * 1) * benchmark_iterations_; - for (int i = 0; i < iterations16; ++i) { - ARGBScale(orig_pixels, kSrcStride, 64, 4, dest_pixels, kDstStride, 16, 1, + int iterations160 = (benchmark_width_ * benchmark_height_ + (160 - 1)) / 160 * + benchmark_iterations_; + for (int i = 0; i < iterations160; ++i) { + ARGBScale(orig_pixels, kSrcStride, 640, 4, dest_pixels, kDstStride, 160, 1, kFilterBilinear); } - EXPECT_NEAR((65 + 66 + 129 + 130 + 2) / 4, dest_pixels[0], 4); - EXPECT_NEAR((255 - 65 + 255 - 66 + 255 - 129 + 255 - 130 + 2) / 4, - dest_pixels[1], 4); - EXPECT_NEAR((1 * 4 + 65 + 66 + 129 + 130 + 2) / 4, dest_pixels[2], 4); - EXPECT_NEAR((10 * 4 + 65 + 66 + 129 + 130 + 2) / 4, dest_pixels[3], 4); + EXPECT_NEAR(66, dest_pixels[0], 4); + EXPECT_NEAR(255 - 66, dest_pixels[1], 4); + EXPECT_NEAR(67, dest_pixels[2], 4); + EXPECT_NEAR(76, dest_pixels[3], 4); - ARGBScale(orig_pixels, kSrcStride, 64, 4, dest_pixels, kDstStride, 16, 1, + ARGBScale(orig_pixels, kSrcStride, 640, 4, dest_pixels, kDstStride, 160, 1, kFilterNone); - EXPECT_EQ(130, dest_pixels[0]); - EXPECT_EQ(255 - 130, dest_pixels[1]); - EXPECT_EQ(130 + 1, dest_pixels[2]); - EXPECT_EQ(130 + 10, dest_pixels[3]); + EXPECT_EQ(2, dest_pixels[0]); + EXPECT_EQ(255 - 2, dest_pixels[1]); + EXPECT_EQ(3, dest_pixels[2]); + EXPECT_EQ(12, dest_pixels[3]); free_aligned_buffer_page_end(dest_pixels); free_aligned_buffer_page_end(orig_pixels); diff --git a/third-party/libyuv/third_party/libyuv/unit_test/scale_rgb_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/scale_rgb_test.cc new file mode 100644 index 0000000000..8296abe31d --- /dev/null +++ b/third-party/libyuv/third_party/libyuv/unit_test/scale_rgb_test.cc @@ -0,0 +1,280 @@ +/* + * Copyright 2022 The LibYuv Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "../unit_test/unit_test.h" +#include "libyuv/cpu_id.h" +#include "libyuv/scale_rgb.h" + +namespace libyuv { + +#define STRINGIZE(line) #line +#define FILELINESTR(file, line) file ":" STRINGIZE(line) + +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +// SLOW TESTS are those that are unoptimized C code. +// FULL TESTS are optimized but test many variations of the same code. +#define ENABLE_FULL_TESTS +#endif + +// Test scaling with C vs Opt and return maximum pixel difference. 0 = exact. +static int RGBTestFilter(int src_width, + int src_height, + int dst_width, + int dst_height, + FilterMode f, + int benchmark_iterations, + int disable_cpu_flags, + int benchmark_cpu_info) { + if (!SizeValid(src_width, src_height, dst_width, dst_height)) { + return 0; + } + + int i, j; + const int b = 0; // 128 to test for padding/stride. + int64_t src_rgb_plane_size = + (Abs(src_width) + b * 3) * (Abs(src_height) + b * 3) * 3LL; + int src_stride_rgb = (b * 3 + Abs(src_width)) * 3; + + align_buffer_page_end(src_rgb, src_rgb_plane_size); + if (!src_rgb) { + printf("Skipped. Alloc failed " FILELINESTR(__FILE__, __LINE__) "\n"); + return 0; + } + MemRandomize(src_rgb, src_rgb_plane_size); + + int64_t dst_rgb_plane_size = (dst_width + b * 3) * (dst_height + b * 3) * 3LL; + int dst_stride_rgb = (b * 3 + dst_width) * 3; + + align_buffer_page_end(dst_rgb_c, dst_rgb_plane_size); + align_buffer_page_end(dst_rgb_opt, dst_rgb_plane_size); + if (!dst_rgb_c || !dst_rgb_opt) { + printf("Skipped. Alloc failed " FILELINESTR(__FILE__, __LINE__) "\n"); + return 0; + } + memset(dst_rgb_c, 2, dst_rgb_plane_size); + memset(dst_rgb_opt, 3, dst_rgb_plane_size); + + // Warm up both versions for consistent benchmarks. + MaskCpuFlags(disable_cpu_flags); // Disable all CPU optimization. + RGBScale(src_rgb + (src_stride_rgb * b) + b * 3, src_stride_rgb, src_width, + src_height, dst_rgb_c + (dst_stride_rgb * b) + b * 3, dst_stride_rgb, + dst_width, dst_height, f); + MaskCpuFlags(benchmark_cpu_info); // Enable all CPU optimization. + RGBScale(src_rgb + (src_stride_rgb * b) + b * 3, src_stride_rgb, src_width, + src_height, dst_rgb_opt + (dst_stride_rgb * b) + b * 3, + dst_stride_rgb, dst_width, dst_height, f); + + MaskCpuFlags(disable_cpu_flags); // Disable all CPU optimization. + double c_time = get_time(); + RGBScale(src_rgb + (src_stride_rgb * b) + b * 3, src_stride_rgb, src_width, + src_height, dst_rgb_c + (dst_stride_rgb * b) + b * 3, dst_stride_rgb, + dst_width, dst_height, f); + + c_time = (get_time() - c_time); + + MaskCpuFlags(benchmark_cpu_info); // Enable all CPU optimization. + double opt_time = get_time(); + for (i = 0; i < benchmark_iterations; ++i) { + RGBScale(src_rgb + (src_stride_rgb * b) + b * 3, src_stride_rgb, src_width, + src_height, dst_rgb_opt + (dst_stride_rgb * b) + b * 3, + dst_stride_rgb, dst_width, dst_height, f); + } + opt_time = (get_time() - opt_time) / benchmark_iterations; + + // Report performance of C vs OPT + printf("filter %d - %8d us C - %8d us OPT\n", f, + static_cast(c_time * 1e6), static_cast(opt_time * 1e6)); + + // C version may be a little off from the optimized. Order of + // operations may introduce rounding somewhere. So do a difference + // of the buffers and look to see that the max difference isn't + // over 2. + int max_diff = 0; + for (i = b; i < (dst_height + b); ++i) { + for (j = b * 3; j < (dst_width + b) * 3; ++j) { + int abs_diff = Abs(dst_rgb_c[(i * dst_stride_rgb) + j] - + dst_rgb_opt[(i * dst_stride_rgb) + j]); + if (abs_diff > max_diff) { + max_diff = abs_diff; + } + } + } + + free_aligned_buffer_page_end(dst_rgb_c); + free_aligned_buffer_page_end(dst_rgb_opt); + free_aligned_buffer_page_end(src_rgb); + return max_diff; +} + +// The following adjustments in dimensions ensure the scale factor will be +// exactly achieved. +#define DX(x, nom, denom) static_cast((Abs(x) / nom) * nom) +#define SX(x, nom, denom) static_cast((x / nom) * denom) + +#define TEST_FACTOR1(name, filter, nom, denom, max_diff) \ + TEST_F(LibYUVScaleTest, RGBScaleDownBy##name##_##filter) { \ + int diff = RGBTestFilter( \ + SX(benchmark_width_, nom, denom), SX(benchmark_height_, nom, denom), \ + DX(benchmark_width_, nom, denom), DX(benchmark_height_, nom, denom), \ + kFilter##filter, benchmark_iterations_, disable_cpu_flags_, \ + benchmark_cpu_info_); \ + EXPECT_LE(diff, max_diff); \ + } + +#if defined(ENABLE_FULL_TESTS) +// Test a scale factor with all 4 filters. Expect unfiltered to be exact, but +// filtering is different fixed point implementations for SSSE3, Neon and C. +#define TEST_FACTOR(name, nom, denom) \ + TEST_FACTOR1(name, None, nom, denom, 0) \ + TEST_FACTOR1(name, Linear, nom, denom, 3) \ + TEST_FACTOR1(name, Bilinear, nom, denom, 3) \ + TEST_FACTOR1(name, Box, nom, denom, 3) +#else +// Test a scale factor with Bilinear. +#define TEST_FACTOR(name, nom, denom) \ + TEST_FACTOR1(name, Bilinear, nom, denom, 3) +#endif + +TEST_FACTOR(2, 1, 2) +#ifndef DISABLE_SLOW_TESTS +TEST_FACTOR(4, 1, 4) +// TEST_FACTOR(8, 1, 8) Disable for benchmark performance. +TEST_FACTOR(3by4, 3, 4) +TEST_FACTOR(3by8, 3, 8) +TEST_FACTOR(3, 1, 3) +#endif +#undef TEST_FACTOR1 +#undef TEST_FACTOR +#undef SX +#undef DX + +#define TEST_SCALETO1(name, width, height, filter, max_diff) \ + TEST_F(LibYUVScaleTest, name##To##width##x##height##_##filter) { \ + int diff = RGBTestFilter(benchmark_width_, benchmark_height_, width, \ + height, kFilter##filter, benchmark_iterations_, \ + disable_cpu_flags_, benchmark_cpu_info_); \ + EXPECT_LE(diff, max_diff); \ + } \ + TEST_F(LibYUVScaleTest, name##From##width##x##height##_##filter) { \ + int diff = RGBTestFilter(width, height, Abs(benchmark_width_), \ + Abs(benchmark_height_), kFilter##filter, \ + benchmark_iterations_, disable_cpu_flags_, \ + benchmark_cpu_info_); \ + EXPECT_LE(diff, max_diff); \ + } + +#if defined(ENABLE_FULL_TESTS) +/// Test scale to a specified size with all 4 filters. +#define TEST_SCALETO(name, width, height) \ + TEST_SCALETO1(name, width, height, None, 0) \ + TEST_SCALETO1(name, width, height, Linear, 3) \ + TEST_SCALETO1(name, width, height, Bilinear, 3) +#else +#define TEST_SCALETO(name, width, height) \ + TEST_SCALETO1(name, width, height, Bilinear, 3) +#endif + +TEST_SCALETO(RGBScale, 640, 360) +#ifndef DISABLE_SLOW_TESTS +TEST_SCALETO(RGBScale, 1, 1) +TEST_SCALETO(RGBScale, 256, 144) /* 128x72 * 3 */ +TEST_SCALETO(RGBScale, 320, 240) +TEST_SCALETO(RGBScale, 569, 480) +TEST_SCALETO(RGBScale, 1280, 720) +TEST_SCALETO(RGBScale, 1920, 1080) +#endif // DISABLE_SLOW_TESTS +#undef TEST_SCALETO1 +#undef TEST_SCALETO + +#define TEST_SCALESWAPXY1(name, filter, max_diff) \ + TEST_F(LibYUVScaleTest, name##SwapXY_##filter) { \ + int diff = RGBTestFilter(benchmark_width_, benchmark_height_, \ + benchmark_height_, benchmark_width_, \ + kFilter##filter, benchmark_iterations_, \ + disable_cpu_flags_, benchmark_cpu_info_); \ + EXPECT_LE(diff, max_diff); \ + } + +#if defined(ENABLE_FULL_TESTS) +// Test scale with swapped width and height with all 3 filters. +TEST_SCALESWAPXY1(RGBScale, None, 0) +TEST_SCALESWAPXY1(RGBScale, Linear, 0) +TEST_SCALESWAPXY1(RGBScale, Bilinear, 0) +#else +TEST_SCALESWAPXY1(RGBScale, Bilinear, 0) +#endif +#undef TEST_SCALESWAPXY1 + +TEST_F(LibYUVScaleTest, RGBTest3x) { + const int kSrcStride = 480 * 3; + const int kDstStride = 160 * 3; + const int kSize = kSrcStride * 3; + align_buffer_page_end(orig_pixels, kSize); + for (int i = 0; i < 480 * 3; ++i) { + orig_pixels[i * 3 + 0] = i; + orig_pixels[i * 3 + 1] = 255 - i; + } + align_buffer_page_end(dest_pixels, kDstStride); + + int iterations160 = (benchmark_width_ * benchmark_height_ + (160 - 1)) / 160 * + benchmark_iterations_; + for (int i = 0; i < iterations160; ++i) { + RGBScale(orig_pixels, kSrcStride, 480, 3, dest_pixels, kDstStride, 160, 1, + kFilterBilinear); + } + + EXPECT_EQ(225, dest_pixels[0]); + EXPECT_EQ(255 - 225, dest_pixels[1]); + + RGBScale(orig_pixels, kSrcStride, 480, 3, dest_pixels, kDstStride, 160, 1, + kFilterNone); + + EXPECT_EQ(225, dest_pixels[0]); + EXPECT_EQ(255 - 225, dest_pixels[1]); + + free_aligned_buffer_page_end(dest_pixels); + free_aligned_buffer_page_end(orig_pixels); +} + +TEST_F(LibYUVScaleTest, RGBTest4x) { + const int kSrcStride = 640 * 3; + const int kDstStride = 160 * 3; + const int kSize = kSrcStride * 4; + align_buffer_page_end(orig_pixels, kSize); + for (int i = 0; i < 640 * 4; ++i) { + orig_pixels[i * 3 + 0] = i; + orig_pixels[i * 3 + 1] = 255 - i; + } + align_buffer_page_end(dest_pixels, kDstStride); + + int iterations160 = (benchmark_width_ * benchmark_height_ + (160 - 1)) / 160 * + benchmark_iterations_; + for (int i = 0; i < iterations160; ++i) { + RGBScale(orig_pixels, kSrcStride, 640, 4, dest_pixels, kDstStride, 160, 1, + kFilterBilinear); + } + + EXPECT_EQ(66, dest_pixels[0]); + EXPECT_EQ(190, dest_pixels[1]); + + RGBScale(orig_pixels, kSrcStride, 64, 4, dest_pixels, kDstStride, 16, 1, + kFilterNone); + + EXPECT_EQ(2, dest_pixels[0]); // expect the 3rd pixel of the 3rd row + EXPECT_EQ(255 - 2, dest_pixels[1]); + + free_aligned_buffer_page_end(dest_pixels); + free_aligned_buffer_page_end(orig_pixels); +} + +} // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/unit_test/scale_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/scale_test.cc index 6da6b574d1..a8c95268dc 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/scale_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/scale_test.cc @@ -22,6 +22,12 @@ #define STRINGIZE(line) #line #define FILELINESTR(file, line) file ":" STRINGIZE(line) +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +// SLOW TESTS are those that are unoptimized C code. +// FULL TESTS are optimized but test many variations of the same code. +#define ENABLE_FULL_TESTS +#endif + namespace libyuv { // Test scaling with C vs Opt and return maximum pixel difference. 0 = exact. @@ -882,23 +888,31 @@ static int NV12TestFilter(int src_width, // Test a scale factor with all 4 filters. Expect unfiltered to be exact, but // filtering is different fixed point implementations for SSSE3, Neon and C. -#ifdef ENABLE_SLOW_TESTS +#ifndef DISABLE_SLOW_TESTS #define TEST_FACTOR(name, nom, denom, boxdiff) \ TEST_FACTOR1(, name, None, nom, denom, 0) \ TEST_FACTOR1(, name, Linear, nom, denom, 3) \ TEST_FACTOR1(, name, Bilinear, nom, denom, 3) \ TEST_FACTOR1(, name, Box, nom, denom, boxdiff) #else +#if defined(ENABLE_FULL_TESTS) #define TEST_FACTOR(name, nom, denom, boxdiff) \ TEST_FACTOR1(DISABLED_, name, None, nom, denom, 0) \ TEST_FACTOR1(DISABLED_, name, Linear, nom, denom, 3) \ TEST_FACTOR1(DISABLED_, name, Bilinear, nom, denom, 3) \ TEST_FACTOR1(DISABLED_, name, Box, nom, denom, boxdiff) +#else +#define TEST_FACTOR(name, nom, denom, boxdiff) \ + TEST_FACTOR1(DISABLED_, name, Bilinear, nom, denom, 3) \ + TEST_FACTOR1(DISABLED_, name, Box, nom, denom, boxdiff) +#endif #endif TEST_FACTOR(2, 1, 2, 0) TEST_FACTOR(4, 1, 4, 0) -// TEST_FACTOR(8, 1, 8, 0) Disable for benchmark performance. Takes 90 seconds. +#ifndef DISABLE_SLOW_TESTS +TEST_FACTOR(8, 1, 8, 0) +#endif TEST_FACTOR(3by4, 3, 4, 1) TEST_FACTOR(3by8, 3, 8, 1) TEST_FACTOR(3, 1, 3, 0) @@ -1008,7 +1022,7 @@ TEST_FACTOR(3, 1, 3, 0) EXPECT_LE(diff, max_diff); \ } -#ifdef ENABLE_SLOW_TESTS +#ifndef DISABLE_SLOW_TESTS // Test scale to a specified size with all 4 filters. #define TEST_SCALETO(name, width, height) \ TEST_SCALETO1(, name, width, height, None, 0) \ @@ -1016,23 +1030,28 @@ TEST_FACTOR(3, 1, 3, 0) TEST_SCALETO1(, name, width, height, Bilinear, 3) \ TEST_SCALETO1(, name, width, height, Box, 3) #else -// Test scale to a specified size with all 4 filters. +#if defined(ENABLE_FULL_TESTS) #define TEST_SCALETO(name, width, height) \ TEST_SCALETO1(DISABLED_, name, width, height, None, 0) \ TEST_SCALETO1(DISABLED_, name, width, height, Linear, 3) \ TEST_SCALETO1(DISABLED_, name, width, height, Bilinear, 3) \ TEST_SCALETO1(DISABLED_, name, width, height, Box, 3) +#else +#define TEST_SCALETO(name, width, height) \ + TEST_SCALETO1(DISABLED_, name, width, height, Bilinear, 3) \ + TEST_SCALETO1(DISABLED_, name, width, height, Box, 3) +#endif #endif TEST_SCALETO(Scale, 1, 1) -TEST_SCALETO(Scale, 256, 144) /* 128x72 * 2 */ -TEST_SCALETO(Scale, 320, 240) TEST_SCALETO(Scale, 569, 480) TEST_SCALETO(Scale, 640, 360) +#ifndef DISABLE_SLOW_TESTS +TEST_SCALETO(Scale, 256, 144) /* 128x72 * 2 */ +TEST_SCALETO(Scale, 320, 240) TEST_SCALETO(Scale, 1280, 720) -#ifdef ENABLE_SLOW_TESTS TEST_SCALETO(Scale, 1920, 1080) -#endif // ENABLE_SLOW_TESTS +#endif // DISABLE_SLOW_TESTS #undef TEST_SCALETO1 #undef TEST_SCALETO @@ -1088,16 +1107,21 @@ TEST_SCALETO(Scale, 1920, 1080) } // Test scale to a specified size with all 4 filters. -#ifdef ENABLE_SLOW_TESTS +#ifndef DISABLE_SLOW_TESTS TEST_SCALESWAPXY1(, Scale, None, 0) TEST_SCALESWAPXY1(, Scale, Linear, 3) TEST_SCALESWAPXY1(, Scale, Bilinear, 3) TEST_SCALESWAPXY1(, Scale, Box, 3) #else +#if defined(ENABLE_FULL_TESTS) TEST_SCALESWAPXY1(DISABLED_, Scale, None, 0) TEST_SCALESWAPXY1(DISABLED_, Scale, Linear, 3) TEST_SCALESWAPXY1(DISABLED_, Scale, Bilinear, 3) TEST_SCALESWAPXY1(DISABLED_, Scale, Box, 3) +#else +TEST_SCALESWAPXY1(DISABLED_, Scale, Bilinear, 3) +TEST_SCALESWAPXY1(DISABLED_, Scale, Box, 3) +#endif #endif #undef TEST_SCALESWAPXY1 @@ -1197,10 +1221,6 @@ extern "C" void ScaleRowUp2_16_NEON(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst, int dst_width); -extern "C" void ScaleRowUp2_16_MMI(const uint16_t* src_ptr, - ptrdiff_t src_stride, - uint16_t* dst, - int dst_width); extern "C" void ScaleRowUp2_16_C(const uint16_t* src_ptr, ptrdiff_t src_stride, uint16_t* dst, @@ -1227,13 +1247,6 @@ TEST_F(LibYUVScaleTest, TestScaleRowUp2_16) { } else { ScaleRowUp2_16_C(&orig_pixels[0], 640, &dst_pixels_opt[0], 1280); } -#elif !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A) - int has_mmi = TestCpuFlag(kCpuHasMMI); - if (has_mmi) { - ScaleRowUp2_16_MMI(&orig_pixels[0], 640, &dst_pixels_opt[0], 1280); - } else { - ScaleRowUp2_16_C(&orig_pixels[0], 640, &dst_pixels_opt[0], 1280); - } #else ScaleRowUp2_16_C(&orig_pixels[0], 640, &dst_pixels_opt[0], 1280); #endif @@ -1385,56 +1398,56 @@ TEST_FACTOR(3, 1, 3, 0) #undef DX TEST_F(LibYUVScaleTest, PlaneTest3x) { - const int kSrcStride = 48; - const int kDstStride = 16; + const int kSrcStride = 480; + const int kDstStride = 160; const int kSize = kSrcStride * 3; align_buffer_page_end(orig_pixels, kSize); - for (int i = 0; i < 48 * 3; ++i) { + for (int i = 0; i < 480 * 3; ++i) { orig_pixels[i] = i; } align_buffer_page_end(dest_pixels, kDstStride); - int iterations16 = - benchmark_width_ * benchmark_height_ / (16 * 1) * benchmark_iterations_; - for (int i = 0; i < iterations16; ++i) { - ScalePlane(orig_pixels, kSrcStride, 48, 3, dest_pixels, kDstStride, 16, 1, + int iterations160 = (benchmark_width_ * benchmark_height_ + (160 - 1)) / 160 * + benchmark_iterations_; + for (int i = 0; i < iterations160; ++i) { + ScalePlane(orig_pixels, kSrcStride, 480, 3, dest_pixels, kDstStride, 160, 1, kFilterBilinear); } - EXPECT_EQ(49, dest_pixels[0]); + EXPECT_EQ(225, dest_pixels[0]); - ScalePlane(orig_pixels, kSrcStride, 48, 3, dest_pixels, kDstStride, 16, 1, + ScalePlane(orig_pixels, kSrcStride, 480, 3, dest_pixels, kDstStride, 160, 1, kFilterNone); - EXPECT_EQ(49, dest_pixels[0]); + EXPECT_EQ(225, dest_pixels[0]); free_aligned_buffer_page_end(dest_pixels); free_aligned_buffer_page_end(orig_pixels); } TEST_F(LibYUVScaleTest, PlaneTest4x) { - const int kSrcStride = 64; - const int kDstStride = 16; + const int kSrcStride = 640; + const int kDstStride = 160; const int kSize = kSrcStride * 4; align_buffer_page_end(orig_pixels, kSize); - for (int i = 0; i < 64 * 4; ++i) { + for (int i = 0; i < 640 * 4; ++i) { orig_pixels[i] = i; } align_buffer_page_end(dest_pixels, kDstStride); - int iterations16 = - benchmark_width_ * benchmark_height_ / (16 * 1) * benchmark_iterations_; - for (int i = 0; i < iterations16; ++i) { - ScalePlane(orig_pixels, kSrcStride, 64, 4, dest_pixels, kDstStride, 16, 1, + int iterations160 = (benchmark_width_ * benchmark_height_ + (160 - 1)) / 160 * + benchmark_iterations_; + for (int i = 0; i < iterations160; ++i) { + ScalePlane(orig_pixels, kSrcStride, 640, 4, dest_pixels, kDstStride, 160, 1, kFilterBilinear); } - EXPECT_EQ((65 + 66 + 129 + 130 + 2) / 4, dest_pixels[0]); + EXPECT_EQ(66, dest_pixels[0]); - ScalePlane(orig_pixels, kSrcStride, 64, 4, dest_pixels, kDstStride, 16, 1, + ScalePlane(orig_pixels, kSrcStride, 640, 4, dest_pixels, kDstStride, 160, 1, kFilterNone); - EXPECT_EQ(130, dest_pixels[0]); // expect the 3rd pixel of the 3rd row + EXPECT_EQ(2, dest_pixels[0]); // expect the 3rd pixel of the 3rd row free_aligned_buffer_page_end(dest_pixels); free_aligned_buffer_page_end(orig_pixels); @@ -1532,4 +1545,57 @@ TEST_F(LibYUVScaleTest, PlaneTestRotate_Box) { free_aligned_buffer_page_end(orig_pixels); } +TEST_F(LibYUVScaleTest, PlaneTest1_Box) { + align_buffer_page_end(orig_pixels, 3); + align_buffer_page_end(dst_pixels, 3); + + // Pad the 1x1 byte image with invalid values before and after in case libyuv + // reads outside the memory boundaries. + orig_pixels[0] = 0; + orig_pixels[1] = 1; // scale this pixel + orig_pixels[2] = 2; + dst_pixels[0] = 3; + dst_pixels[1] = 3; + dst_pixels[2] = 3; + + libyuv::ScalePlane(orig_pixels + 1, /* src_stride= */ 1, /* src_width= */ 1, + /* src_height= */ 1, dst_pixels, /* dst_stride= */ 1, + /* dst_width= */ 1, /* dst_height= */ 2, + libyuv::kFilterBox); + + EXPECT_EQ(dst_pixels[0], 1); + EXPECT_EQ(dst_pixels[1], 1); + EXPECT_EQ(dst_pixels[2], 3); + + free_aligned_buffer_page_end(dst_pixels); + free_aligned_buffer_page_end(orig_pixels); +} + +TEST_F(LibYUVScaleTest, PlaneTest1_16_Box) { + align_buffer_page_end(orig_pixels_alloc, 3 * 2); + align_buffer_page_end(dst_pixels_alloc, 3 * 2); + uint16_t* orig_pixels = (uint16_t*)orig_pixels_alloc; + uint16_t* dst_pixels = (uint16_t*)dst_pixels_alloc; + + // Pad the 1x1 byte image with invalid values before and after in case libyuv + // reads outside the memory boundaries. + orig_pixels[0] = 0; + orig_pixels[1] = 1; // scale this pixel + orig_pixels[2] = 2; + dst_pixels[0] = 3; + dst_pixels[1] = 3; + dst_pixels[2] = 3; + + libyuv::ScalePlane_16( + orig_pixels + 1, /* src_stride= */ 1, /* src_width= */ 1, + /* src_height= */ 1, dst_pixels, /* dst_stride= */ 1, + /* dst_width= */ 1, /* dst_height= */ 2, libyuv::kFilterNone); + + EXPECT_EQ(dst_pixels[0], 1); + EXPECT_EQ(dst_pixels[1], 1); + EXPECT_EQ(dst_pixels[2], 3); + + free_aligned_buffer_page_end(dst_pixels_alloc); + free_aligned_buffer_page_end(orig_pixels_alloc); +} } // namespace libyuv diff --git a/third-party/libyuv/third_party/libyuv/unit_test/scale_uv_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/scale_uv_test.cc index 6e4649f84d..3d524bef1f 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/scale_uv_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/scale_uv_test.cc @@ -20,6 +20,12 @@ namespace libyuv { #define STRINGIZE(line) #line #define FILELINESTR(file, line) file ":" STRINGIZE(line) +#if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__) +// SLOW TESTS are those that are unoptimized C code. +// FULL TESTS are optimized but test many variations of the same code. +#define ENABLE_FULL_TESTS +#endif + // Test scaling with C vs Opt and return maximum pixel difference. 0 = exact. static int UVTestFilter(int src_width, int src_height, @@ -125,6 +131,7 @@ static int UVTestFilter(int src_width, EXPECT_LE(diff, max_diff); \ } +#if defined(ENABLE_FULL_TESTS) // Test a scale factor with all 4 filters. Expect unfiltered to be exact, but // filtering is different fixed point implementations for SSSE3, Neon and C. #define TEST_FACTOR(name, nom, denom) \ @@ -132,6 +139,11 @@ static int UVTestFilter(int src_width, TEST_FACTOR1(name, Linear, nom, denom, 3) \ TEST_FACTOR1(name, Bilinear, nom, denom, 3) \ TEST_FACTOR1(name, Box, nom, denom, 3) +#else +// Test a scale factor with Bilinear. +#define TEST_FACTOR(name, nom, denom) \ + TEST_FACTOR1(name, Bilinear, nom, denom, 3) +#endif TEST_FACTOR(2, 1, 2) TEST_FACTOR(4, 1, 4) @@ -159,21 +171,26 @@ TEST_FACTOR(3, 1, 3) EXPECT_LE(diff, max_diff); \ } +#if defined(ENABLE_FULL_TESTS) /// Test scale to a specified size with all 4 filters. #define TEST_SCALETO(name, width, height) \ TEST_SCALETO1(name, width, height, None, 0) \ TEST_SCALETO1(name, width, height, Linear, 3) \ TEST_SCALETO1(name, width, height, Bilinear, 3) +#else +#define TEST_SCALETO(name, width, height) \ + TEST_SCALETO1(name, width, height, Bilinear, 3) +#endif TEST_SCALETO(UVScale, 1, 1) -TEST_SCALETO(UVScale, 256, 144) /* 128x72 * 2 */ -TEST_SCALETO(UVScale, 320, 240) TEST_SCALETO(UVScale, 569, 480) TEST_SCALETO(UVScale, 640, 360) -#ifdef ENABLE_SLOW_TESTS +#ifndef DISABLE_SLOW_TESTS +TEST_SCALETO(UVScale, 256, 144) /* 128x72 * 2 */ +TEST_SCALETO(UVScale, 320, 240) TEST_SCALETO(UVScale, 1280, 720) TEST_SCALETO(UVScale, 1920, 1080) -#endif // ENABLE_SLOW_TESTS +#endif // DISABLE_SLOW_TESTS #undef TEST_SCALETO1 #undef TEST_SCALETO @@ -186,70 +203,73 @@ TEST_SCALETO(UVScale, 1920, 1080) EXPECT_LE(diff, max_diff); \ } +#if defined(ENABLE_FULL_TESTS) // Test scale with swapped width and height with all 3 filters. TEST_SCALESWAPXY1(UVScale, None, 0) TEST_SCALESWAPXY1(UVScale, Linear, 0) TEST_SCALESWAPXY1(UVScale, Bilinear, 0) +#else +TEST_SCALESWAPXY1(UVScale, Bilinear, 0) +#endif #undef TEST_SCALESWAPXY1 TEST_F(LibYUVScaleTest, UVTest3x) { - const int kSrcStride = 48 * 2; - const int kDstStride = 16 * 2; + const int kSrcStride = 480 * 2; + const int kDstStride = 160 * 2; const int kSize = kSrcStride * 3; align_buffer_page_end(orig_pixels, kSize); - for (int i = 0; i < 48 * 3; ++i) { + for (int i = 0; i < 480 * 3; ++i) { orig_pixels[i * 2 + 0] = i; orig_pixels[i * 2 + 1] = 255 - i; } align_buffer_page_end(dest_pixels, kDstStride); - int iterations16 = - benchmark_width_ * benchmark_height_ / (16 * 1) * benchmark_iterations_; - for (int i = 0; i < iterations16; ++i) { - UVScale(orig_pixels, kSrcStride, 48, 3, dest_pixels, kDstStride, 16, 1, + int iterations160 = (benchmark_width_ * benchmark_height_ + (160 - 1)) / 160 * + benchmark_iterations_; + for (int i = 0; i < iterations160; ++i) { + UVScale(orig_pixels, kSrcStride, 480, 3, dest_pixels, kDstStride, 160, 1, kFilterBilinear); } - EXPECT_EQ(49, dest_pixels[0]); - EXPECT_EQ(255 - 49, dest_pixels[1]); + EXPECT_EQ(225, dest_pixels[0]); + EXPECT_EQ(255 - 225, dest_pixels[1]); - UVScale(orig_pixels, kSrcStride, 48, 3, dest_pixels, kDstStride, 16, 1, + UVScale(orig_pixels, kSrcStride, 480, 3, dest_pixels, kDstStride, 160, 1, kFilterNone); - EXPECT_EQ(49, dest_pixels[0]); - EXPECT_EQ(255 - 49, dest_pixels[1]); + EXPECT_EQ(225, dest_pixels[0]); + EXPECT_EQ(255 - 225, dest_pixels[1]); free_aligned_buffer_page_end(dest_pixels); free_aligned_buffer_page_end(orig_pixels); } TEST_F(LibYUVScaleTest, UVTest4x) { - const int kSrcStride = 64 * 2; - const int kDstStride = 16 * 2; + const int kSrcStride = 640 * 2; + const int kDstStride = 160 * 2; const int kSize = kSrcStride * 4; align_buffer_page_end(orig_pixels, kSize); - for (int i = 0; i < 64 * 4; ++i) { + for (int i = 0; i < 640 * 4; ++i) { orig_pixels[i * 2 + 0] = i; orig_pixels[i * 2 + 1] = 255 - i; } align_buffer_page_end(dest_pixels, kDstStride); - int iterations16 = - benchmark_width_ * benchmark_height_ / (16 * 1) * benchmark_iterations_; - for (int i = 0; i < iterations16; ++i) { - UVScale(orig_pixels, kSrcStride, 64, 4, dest_pixels, kDstStride, 16, 1, + int iterations160 = (benchmark_width_ * benchmark_height_ + (160 - 1)) / 160 * + benchmark_iterations_; + for (int i = 0; i < iterations160; ++i) { + UVScale(orig_pixels, kSrcStride, 640, 4, dest_pixels, kDstStride, 160, 1, kFilterBilinear); } - EXPECT_EQ((65 + 66 + 129 + 130 + 2) / 4, dest_pixels[0]); - EXPECT_EQ((255 - 65 + 255 - 66 + 255 - 129 + 255 - 130 + 2) / 4, - dest_pixels[1]); + EXPECT_EQ(66, dest_pixels[0]); + EXPECT_EQ(190, dest_pixels[1]); UVScale(orig_pixels, kSrcStride, 64, 4, dest_pixels, kDstStride, 16, 1, kFilterNone); - EXPECT_EQ(130, dest_pixels[0]); // expect the 3rd pixel of the 3rd row - EXPECT_EQ(255 - 130, dest_pixels[1]); + EXPECT_EQ(2, dest_pixels[0]); // expect the 3rd pixel of the 3rd row + EXPECT_EQ(255 - 2, dest_pixels[1]); free_aligned_buffer_page_end(dest_pixels); free_aligned_buffer_page_end(orig_pixels); diff --git a/third-party/libyuv/third_party/libyuv/unit_test/unit_test.cc b/third-party/libyuv/third_party/libyuv/unit_test/unit_test.cc index e6dbc3eed6..61145a4628 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/unit_test.cc +++ b/third-party/libyuv/third_party/libyuv/unit_test/unit_test.cc @@ -77,8 +77,15 @@ int TestCpuEnv(int cpu_info) { if (TestEnv("LIBYUV_DISABLE_MSA")) { cpu_info &= ~libyuv::kCpuHasMSA; } - if (TestEnv("LIBYUV_DISABLE_MMI")) { - cpu_info &= ~libyuv::kCpuHasMMI; +#endif +#if defined(__longarch__) && defined(__linux__) + if (TestEnv("LIBYUV_DISABLE_LSX")) { + cpu_info &= ~libyuv::kCpuHasLSX; + } +#endif +#if defined(__longarch__) && defined(__linux__) + if (TestEnv("LIBYUV_DISABLE_LASX")) { + cpu_info &= ~libyuv::kCpuHasLASX; } #endif #if !defined(__pnacl__) && !defined(__CLR_VER) && \ @@ -120,6 +127,9 @@ int TestCpuEnv(int cpu_info) { if (TestEnv("LIBYUV_DISABLE_AVX512VL")) { cpu_info &= ~libyuv::kCpuHasAVX512VL; } + if (TestEnv("LIBYUV_DISABLE_AVX512VNNI")) { + cpu_info &= ~libyuv::kCpuHasAVX512VNNI; + } if (TestEnv("LIBYUV_DISABLE_AVX512VBMI")) { cpu_info &= ~libyuv::kCpuHasAVX512VBMI; } diff --git a/third-party/libyuv/third_party/libyuv/unit_test/unit_test.h b/third-party/libyuv/third_party/libyuv/unit_test/unit_test.h index 580832addc..6cc99a654f 100644 --- a/third-party/libyuv/third_party/libyuv/unit_test/unit_test.h +++ b/third-party/libyuv/third_party/libyuv/unit_test/unit_test.h @@ -14,7 +14,6 @@ #ifdef _WIN32 #include #else -#include #include #endif @@ -111,10 +110,13 @@ inline int fastrand() { return static_cast((fastrand_seed >> 16) & 0xffff); } +// ubsan fails if dst is unaligned unless we use uint8 static inline void MemRandomize(uint8_t* dst, int64_t len) { int64_t i; for (i = 0; i < len - 1; i += 2) { - *reinterpret_cast(dst) = fastrand(); + int r = fastrand(); + dst[0] = static_cast(r); + dst[1] = static_cast(r >> 8); dst += 2; } for (; i < len; ++i) { diff --git a/third-party/libyuv/third_party/libyuv/util/cpuid.c b/third-party/libyuv/third_party/libyuv/util/cpuid.c index 46f9c1bfff..b618bb106e 100644 --- a/third-party/libyuv/third_party/libyuv/util/cpuid.c +++ b/third-party/libyuv/third_party/libyuv/util/cpuid.c @@ -23,6 +23,7 @@ int main(int argc, const char* argv[]) { int has_arm = TestCpuFlag(kCpuHasARM); int has_mips = TestCpuFlag(kCpuHasMIPS); int has_x86 = TestCpuFlag(kCpuHasX86); + int has_loongarch = TestCpuFlag(kCpuHasLOONGARCH); (void)argc; (void)argv; @@ -65,6 +66,7 @@ int main(int argc, const char* argv[]) { printf("Has ARM %x\n", has_arm); printf("Has MIPS %x\n", has_mips); printf("Has X86 %x\n", has_x86); + printf("Has LOONGARCH %x\n", has_loongarch); if (has_arm) { int has_neon = TestCpuFlag(kCpuHasNEON); printf("Has NEON %x\n", has_neon); @@ -72,8 +74,12 @@ int main(int argc, const char* argv[]) { if (has_mips) { int has_msa = TestCpuFlag(kCpuHasMSA); printf("Has MSA %x\n", has_msa); - int has_mmi = TestCpuFlag(kCpuHasMMI); - printf("Has MMI %x\n", has_mmi); + } + if (has_loongarch) { + int has_lsx = TestCpuFlag(kCpuHasLSX); + printf("Has LSX %x\n", has_lsx); + int has_lasx = TestCpuFlag(kCpuHasLASX); + printf("Has LASX %x\n", has_lasx); } if (has_x86) { int has_sse2 = TestCpuFlag(kCpuHasSSE2); @@ -88,6 +94,7 @@ int main(int argc, const char* argv[]) { int has_gfni = TestCpuFlag(kCpuHasGFNI); int has_avx512bw = TestCpuFlag(kCpuHasAVX512BW); int has_avx512vl = TestCpuFlag(kCpuHasAVX512VL); + int has_avx512vnni = TestCpuFlag(kCpuHasAVX512VNNI); int has_avx512vbmi = TestCpuFlag(kCpuHasAVX512VBMI); int has_avx512vbmi2 = TestCpuFlag(kCpuHasAVX512VBMI2); int has_avx512vbitalg = TestCpuFlag(kCpuHasAVX512VBITALG); @@ -104,6 +111,7 @@ int main(int argc, const char* argv[]) { printf("Has GFNI %x\n", has_gfni); printf("Has AVX512BW %x\n", has_avx512bw); printf("Has AVX512VL %x\n", has_avx512vl); + printf("Has AVX512VNNI %x\n", has_avx512vnni); printf("Has AVX512VBMI %x\n", has_avx512vbmi); printf("Has AVX512VBMI2 %x\n", has_avx512vbmi2); printf("Has AVX512VBITALG %x\n", has_avx512vbitalg); diff --git a/third-party/libyuv/third_party/libyuv/util/psnr_main.cc b/third-party/libyuv/third_party/libyuv/util/psnr_main.cc index a930b202ec..8b9fd97246 100644 --- a/third-party/libyuv/third_party/libyuv/util/psnr_main.cc +++ b/third-party/libyuv/third_party/libyuv/util/psnr_main.cc @@ -248,13 +248,13 @@ bool UpdateMetrics(uint8_t* ch_org, int number_of_frames, metric* cur_distortion_psnr, metric* distorted_frame, - bool do_psnr) { + bool compute_psnr) { const int uv_offset = (do_swap_uv ? uv_size : 0); const uint8_t* const u_org = ch_org + y_size + uv_offset; const uint8_t* const u_rec = ch_rec + y_size; const uint8_t* const v_org = ch_org + y_size + (uv_size - uv_offset); const uint8_t* const v_rec = ch_rec + y_size + uv_size; - if (do_psnr) { + if (compute_psnr) { #ifdef HAVE_JPEG double y_err = static_cast( libyuv::ComputeSumSquareError(ch_org, ch_rec, y_size)); diff --git a/third-party/libyuv/third_party/libyuv/util/yuvconvert.cc b/third-party/libyuv/third_party/libyuv/util/yuvconvert.cc index 27cdfe9e37..93b5266853 100644 --- a/third-party/libyuv/third_party/libyuv/util/yuvconvert.cc +++ b/third-party/libyuv/third_party/libyuv/util/yuvconvert.cc @@ -42,9 +42,9 @@ static __inline uint32_t Abs(int32_t v) { } // Parse PYUV format. ie name.1920x800_24Hz_P420.yuv -bool ExtractResolutionFromFilename(const char* name, - int* width_ptr, - int* height_ptr) { +static bool ExtractResolutionFromFilename(const char* name, + int* width_ptr, + int* height_ptr) { // Isolate the .width_height. section of the filename by searching for a // dot or underscore followed by a digit. for (int i = 0; name[i]; ++i) { @@ -59,7 +59,7 @@ bool ExtractResolutionFromFilename(const char* name, return false; } -void PrintHelp(const char* program) { +static void PrintHelp(const char* program) { printf("%s [-options] src_argb.raw dst_yuv.raw\n", program); printf( " -s .... specify source resolution. " @@ -78,7 +78,7 @@ void PrintHelp(const char* program) { exit(0); } -void ParseOptions(int argc, const char* argv[]) { +static void ParseOptions(int argc, const char* argv[]) { if (argc <= 1) { PrintHelp(argv[0]); } @@ -165,23 +165,23 @@ static int TileARGBScale(const uint8_t* src_argb, int src_height, uint8_t* dst_argb, int dst_stride_argb, - int dst_width, - int dst_height, + int destination_width, + int destination_height, libyuv::FilterMode filtering) { - for (int y = 0; y < dst_height; y += kTileY) { - for (int x = 0; x < dst_width; x += kTileX) { + for (int y = 0; y < destination_height; y += kTileY) { + for (int x = 0; x < destination_width; x += kTileX) { int clip_width = kTileX; - if (x + clip_width > dst_width) { - clip_width = dst_width - x; + if (x + clip_width > destination_width) { + clip_width = destination_width - x; } int clip_height = kTileY; - if (y + clip_height > dst_height) { - clip_height = dst_height - y; + if (y + clip_height > destination_height) { + clip_height = destination_height - y; } int r = libyuv::ARGBScaleClip(src_argb, src_stride_argb, src_width, src_height, dst_argb, dst_stride_argb, - dst_width, dst_height, x, y, clip_width, - clip_height, filtering); + destination_width, destination_height, x, y, + clip_width, clip_height, filtering); if (r) { return r; } diff --git a/third-party/opus/build-opus-bazel.sh b/third-party/opus/build-opus-bazel.sh index e78dd815af..e7a4d7af99 100755 --- a/third-party/opus/build-opus-bazel.sh +++ b/third-party/opus/build-opus-bazel.sh @@ -6,7 +6,7 @@ ARCH="$1" BUILD_DIR=$(echo "$(cd "$(dirname "$2")"; pwd -P)/$(basename "$2")") SOURCE_CODE_ARCHIVE="$3" -MINIOSVERSION="9.0" +MINIOSVERSION="11.0" OPT_CFLAGS="-Os -g" OPT_LDFLAGS="" diff --git a/third-party/webrtc/BUILD b/third-party/webrtc/BUILD index b5ef920b5d..f3f929830f 100644 --- a/third-party/webrtc/BUILD +++ b/third-party/webrtc/BUILD @@ -45,9 +45,7 @@ absl_sources = [ "dependencies/third_party/abseil-cpp/" + x for x in [ "absl/strings/internal/string_constant.h", "absl/base/internal/inline_variable.h", "absl/base/internal/cycleclock.cc", - "absl/base/internal/exponential_biased.cc", "absl/base/internal/low_level_alloc.cc", - "absl/base/internal/periodic_sampler.cc", "absl/base/internal/raw_logging.cc", "absl/base/internal/scoped_set_env.cc", "absl/base/internal/spinlock.cc", @@ -72,7 +70,6 @@ absl_sources = [ "dependencies/third_party/abseil-cpp/" + x for x in [ "absl/debugging/internal/examine_stack.cc", "absl/debugging/internal/stack_consumption.cc", "absl/debugging/internal/vdso_support.cc", - "absl/debugging/leak_check_disable.cc", "absl/debugging/stacktrace.cc", "absl/debugging/symbolize.cc", "absl/flags/flag.cc", @@ -241,10 +238,8 @@ absl_sources = [ "dependencies/third_party/abseil-cpp/" + x for x in [ "absl/base/log_severity.h", "absl/base/internal/sysinfo.h", "absl/base/internal/scoped_set_env.h", - "absl/base/internal/periodic_sampler.h", "absl/base/internal/spinlock.h", "absl/base/internal/raw_logging.h", - "absl/base/internal/exponential_biased.h", "absl/base/dynamic_annotations.h", "absl/strings/match.h", "absl/algorithm/container.h", @@ -315,7 +310,6 @@ absl_sources = [ "dependencies/third_party/abseil-cpp/" + x for x in [ "absl/base/internal/scheduling_mode.h", "absl/base/internal/tsan_mutex_interface.h", "absl/base/internal/unaligned_access.h", - "absl/container/internal/have_sse.h", "absl/container/internal/inlined_vector.h", "absl/debugging/internal/stacktrace_unimplemented-inl.inc", "absl/debugging/internal/stacktrace_generic-inl.inc", @@ -334,7 +328,6 @@ absl_sources = [ "dependencies/third_party/abseil-cpp/" + x for x in [ "absl/strings/str_format.h", "absl/synchronization/internal/kernel_timeout.h", "absl/synchronization/internal/per_thread_sem.h", - "absl/time/internal/zoneinfo.inc", "absl/types/internal/optional.h", "absl/types/internal/span.h", "absl/types/variant.h", @@ -358,18 +351,31 @@ absl_sources = [ "dependencies/third_party/abseil-cpp/" + x for x in [ "absl/functional/internal/function_ref.h", "absl/functional/bind_front.h", "absl/functional/internal/front_binder.h", + "absl/base/internal/cycleclock_config.h", + "absl/base/internal/prefetch.h", + "absl/base/internal/unscaledcycleclock_config.h", + "absl/functional/any_invocable.h", + "absl/profiling/internal/exponential_biased.h", + "absl/strings/cord_analysis.h", + "absl/strings/internal/has_absl_stringify.h", + "absl/container/internal/common_policy_traits.h", + "absl/functional/internal/any_invocable.h", + "absl/strings/cord_buffer.h", + "absl/strings/internal/stringify_sink.h", + "absl/cleanup/cleanup.h", + "absl/strings/internal/cord_data_edge.h", + "absl/cleanup/internal/cleanup.h", + "absl/strings/internal/cord_rep_crc.h", ]] webrtc_sources = [ "rtc_base/socket_address.h", "rtc_base/arraysize.h", "rtc_base/strings/string_builder.h", - "rtc_base/atomic_ops.h", "rtc_base/weak_ptr.h", "rtc_base/zero_memory.h", "rtc_base/unique_id_generator.h", "rtc_base/numerics/safe_conversions.h", - "rtc_base/time/timestamp_extrapolator.h", "rtc_base/third_party/base64/base64.h", "rtc_base/platform_thread_types.h", "rtc_base/task_queue.h", @@ -411,9 +417,7 @@ webrtc_sources = [ "rtc_base/network_monitor.h", "rtc_base/message_digest.h", "rtc_base/network.h", - "rtc_base/message_handler.h", "rtc_base/memory/aligned_malloc.h", - "rtc_base/location.h", "rtc_base/byte_order.h", "rtc_base/ifaddrs_converter.h", "rtc_base/helpers.h", @@ -426,7 +430,6 @@ webrtc_sources = [ "rtc_base/experiments/min_video_bitrate_experiment.h", "rtc_base/experiments/field_trial_list.h", "rtc_base/experiments/field_trial_units.h", - "rtc_base/experiments/jitter_upper_bound_experiment.h", "rtc_base/experiments/field_trial_parser.h", "rtc_base/data_rate_limiter.h", "rtc_base/experiments/cpu_speed_experiment.h", @@ -441,7 +444,6 @@ webrtc_sources = [ "rtc_base/async_socket.h", "rtc_base/async_udp_socket.h", "rtc_base/async_resolver_interface.h", - "rtc_base/async_invoker.h", "rtc_base/async_tcp_socket.h", "rtc_base/byte_buffer.h", "rtc_base/crypt_string.h", @@ -472,7 +474,6 @@ webrtc_sources = [ "rtc_base/system/inline.h", "rtc_base/type_traits.h", "rtc_base/numerics/safe_compare.h", - "rtc_base/async_invoker.cc", "rtc_base/async_packet_socket.cc", "rtc_base/async_resolver_interface.cc", "rtc_base/async_socket.cc", @@ -494,7 +495,6 @@ webrtc_sources = [ "rtc_base/experiments/field_trial_list.cc", "rtc_base/experiments/field_trial_parser.cc", "rtc_base/experiments/field_trial_units.cc", - "rtc_base/experiments/jitter_upper_bound_experiment.cc", "rtc_base/experiments/keyframe_interval_settings.cc", "rtc_base/experiments/min_video_bitrate_experiment.cc", "rtc_base/experiments/normalize_simulcast_size_experiment.cc", @@ -510,13 +510,11 @@ webrtc_sources = [ "rtc_base/http_common.cc", "rtc_base/ifaddrs_converter.cc", "rtc_base/ip_address.cc", - "rtc_base/location.cc", "rtc_base/log_sinks.cc", "rtc_base/logging.cc", "rtc_base/memory/aligned_malloc.cc", "rtc_base/memory/fifo_buffer.cc", "rtc_base/message_digest.cc", - "rtc_base/message_handler.cc", "rtc_base/net_helper.cc", "rtc_base/net_helpers.cc", "rtc_base/network.cc", @@ -576,7 +574,6 @@ webrtc_sources = [ "rtc_base/third_party/base64/base64.cc", "rtc_base/third_party/sigslot/sigslot.cc", "rtc_base/thread.cc", - "rtc_base/time/timestamp_extrapolator.cc", "rtc_base/time_utils.cc", "rtc_base/timestamp_aligner.cc", "rtc_base/unique_id_generator.cc", @@ -585,7 +582,6 @@ webrtc_sources = [ "rtc_base/ref_count.h", "rtc_base/ref_counter.h", "rtc_base/logging.h", - "api/task_queue/queued_task.h", "api/audio/audio_frame.cc", "api/audio/channel_layout.h", "api/audio/channel_layout.cc", @@ -643,7 +639,6 @@ webrtc_sources = [ "api/rtp_packet_info.cc", "api/rtp_parameters.cc", "api/rtp_receiver_interface.cc", - "api/rtp_sender_interface.cc", "api/rtp_transceiver_interface.cc", "api/sctp_transport_interface.cc", "api/stats_types.cc", @@ -694,7 +689,6 @@ webrtc_sources = [ "pc/audio_rtp_receiver.cc", "pc/audio_track.cc", "pc/channel.cc", - "pc/channel_manager.cc", "pc/data_channel_controller.cc", "pc/dtls_srtp_transport.h", "pc/dtls_srtp_transport.cc", @@ -725,7 +719,6 @@ webrtc_sources = [ "pc/rtp_sender.cc", "pc/rtp_transceiver.cc", "pc/rtp_transport.cc", - "pc/sctp_data_channel_transport.cc", "pc/sctp_transport.cc", "pc/sctp_utils.cc", "pc/sdp_serializer.cc", @@ -735,7 +728,6 @@ webrtc_sources = [ "pc/srtp_filter.cc", "pc/srtp_session.cc", "pc/srtp_transport.cc", - "pc/stats_collector.cc", "pc/track_media_info_map.cc", "pc/transport_stats.cc", "pc/video_rtp_receiver.cc", @@ -764,7 +756,6 @@ webrtc_sources = [ "media/engine/internal_encoder_factory.cc", "media/engine/multiplex_codec_factory.cc", "media/engine/payload_type_mapper.cc", - "media/engine/simulcast.cc", "media/engine/simulcast_encoder_adapter.cc", "media/engine/unhandled_packets_buffer.cc", "media/engine/webrtc_media_engine.h", @@ -1129,7 +1120,6 @@ webrtc_sources = [ "modules/audio_processing/transient/transient_suppressor_impl.cc", "modules/audio_processing/transient/wpd_node.cc", "modules/audio_processing/transient/wpd_tree.cc", - "modules/audio_processing/typing_detection.cc", "modules/audio_processing/utility/cascaded_biquad_filter.cc", "modules/audio_processing/utility/delay_estimator.cc", "modules/audio_processing/utility/delay_estimator_wrapper.cc", @@ -1168,10 +1158,8 @@ webrtc_sources = [ "modules/congestion_controller/goog_cc/trendline_estimator.cc", "modules/pacing/bitrate_prober.cc", "modules/pacing/interval_budget.cc", - "modules/pacing/paced_sender.cc", "modules/pacing/pacing_controller.cc", "modules/pacing/packet_router.cc", - "modules/pacing/round_robin_packet_queue.cc", "modules/pacing/task_queue_paced_sender.cc", "modules/rtp_rtcp/include/report_block_data.cc", "modules/rtp_rtcp/include/rtp_rtcp_defines.cc", @@ -1195,7 +1183,6 @@ webrtc_sources = [ "modules/rtp_rtcp/source/rtcp_packet/common_header.cc", "modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc", "modules/rtp_rtcp/source/rtcp_packet/dlrr.cc", - "modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.cc", "modules/rtp_rtcp/source/rtcp_packet/extended_reports.cc", "modules/rtp_rtcp/source/rtcp_packet/fir.cc", "modules/rtp_rtcp/source/rtcp_packet/loss_notification.cc", @@ -1253,7 +1240,6 @@ webrtc_sources = [ "modules/rtp_rtcp/source/tmmbr_help.cc", "modules/rtp_rtcp/source/ulpfec_generator.cc", "modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc", - "modules/rtp_rtcp/source/ulpfec_receiver_impl.cc", "modules/rtp_rtcp/source/video_rtp_depacketizer.cc", "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.cc", "modules/rtp_rtcp/source/video_rtp_depacketizer_generic.cc", @@ -1262,11 +1248,9 @@ webrtc_sources = [ "modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.cc", "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc", "modules/rtp_rtcp/source/receive_statistics_impl.cc", - "modules/utility/source/process_thread_impl.cc", "modules/video_capture/device_info_impl.cc", "modules/video_capture/video_capture_factory.cc", "modules/video_capture/video_capture_impl.cc", - "modules/video_coding/codec_timer.cc", "modules/video_coding/codecs/h264/h264.cc", "modules/video_coding/codecs/h264/h264_color_space.cc", "modules/video_coding/codecs/h264/h264_decoder_impl.cc", @@ -1281,7 +1265,6 @@ webrtc_sources = [ "modules/video_coding/encoded_frame.cc", "modules/video_coding/fec_controller_default.cc", "modules/video_coding/frame_buffer2.cc", - "modules/video_coding/frame_buffer3.cc", "modules/video_coding/frame_dependencies_calculator.cc", "modules/video_coding/frame_object.cc", "modules/video_coding/generic_decoder.cc", @@ -1289,16 +1272,10 @@ webrtc_sources = [ "modules/video_coding/h264_sps_pps_tracker.cc", "modules/video_coding/histogram.cc", "modules/video_coding/include/video_codec_interface.cc", - "modules/video_coding/inter_frame_delay.cc", - "modules/video_coding/jitter_estimator.cc", "modules/video_coding/loss_notification_controller.cc", "modules/video_coding/media_opt_util.cc", "modules/video_coding/packet_buffer.cc", "modules/video_coding/rtp_frame_reference_finder.cc", - "modules/video_coding/rtt_filter.cc", - "modules/video_coding/timestamp_map.cc", - "modules/video_coding/timing.cc", - "modules/video_coding/unique_timestamp_counter.cc", "modules/video_coding/utility/decoded_frames_history.cc", "modules/video_coding/utility/frame_dropper.cc", "modules/video_coding/utility/ivf_file_reader.cc", @@ -1319,11 +1296,6 @@ webrtc_sources = [ "modules/video_coding/codecs/vp9/svc_config.cc", "modules/video_coding/codecs/vp9/vp9.cc", "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc", - "modules/video_processing/util/denoiser_filter.cc", - "modules/video_processing/util/denoiser_filter_c.cc", - "modules/video_processing/util/noise_estimation.cc", - "modules/video_processing/util/skin_detection.cc", - "modules/video_processing/video_denoiser.cc", "call/adaptation/encoder_settings.cc", "call/adaptation/resource_adaptation_processor_interface.cc", "call/adaptation/video_source_restrictions.cc", @@ -1414,10 +1386,8 @@ webrtc_sources = [ "common_video/h264/pps_parser.cc", "common_video/h264/sps_parser.cc", "common_video/h264/sps_vui_rewriter.cc", - "common_video/incoming_video_stream.cc", "common_video/libyuv/webrtc_libyuv.cc", "common_video/video_frame_buffer.cc", - "common_video/video_render_frames.cc", "p2p/base/async_stun_tcp_socket.cc", "p2p/base/basic_async_resolver_factory.cc", "p2p/base/basic_ice_controller.cc", @@ -1487,7 +1457,6 @@ webrtc_sources = [ "video/adaptation/overuse_frame_detector.cc", "video/adaptation/quality_scaler_resource.cc", "video/buffered_frame_decryptor.cc", - "video/call_stats.cc", "video/encoder_bitrate_adjuster.cc", "video/encoder_overshoot_detector.cc", "video/encoder_rtcp_feedback.cc", @@ -1495,20 +1464,16 @@ webrtc_sources = [ "video/frame_encode_metadata_writer.cc", "video/quality_limitation_reason_tracker.cc", "video/quality_threshold.cc", - "video/receive_statistics_proxy.cc", "video/report_block_stats.cc", - "video/rtp_video_stream_receiver.cc", "video/rtp_video_stream_receiver_frame_transformer_delegate.cc", "video/send_delay_stats.cc", "video/send_statistics_proxy.cc", "video/stats_counter.cc", "video/stream_synchronization.cc", "video/transport_adapter.cc", - "video/video_quality_observer.cc", "video/video_send_stream.cc", "video/video_send_stream_impl.cc", "video/video_source_sink_controller.cc", - "video/video_stream_decoder.cc", "video/video_stream_decoder_impl.cc", "video/video_stream_encoder.cc", "audio/audio_level.cc", @@ -1520,7 +1485,6 @@ webrtc_sources = [ "audio/channel_receive_frame_transformer_delegate.cc", "audio/channel_send.cc", "audio/channel_send_frame_transformer_delegate.cc", - "audio/null_audio_poller.cc", "audio/remix_resample.cc", "audio/utility/audio_frame_operations.cc", "audio/utility/channel_mixer.cc", @@ -1644,7 +1608,6 @@ webrtc_sources = [ "api/video_codecs/video_decoder_factory.h", "api/video_codecs/video_decoder_software_fallback_wrapper.h", "api/video_codecs/video_encoder.h", - "api/video_codecs/video_encoder_config.h", "api/video_codecs/video_encoder_factory.h", "api/video_codecs/video_encoder_software_fallback_wrapper.h", "api/video_codecs/vp8_frame_config.h", @@ -1717,10 +1680,8 @@ webrtc_sources = [ "common_video/h264/sps_parser.h", "common_video/h264/sps_vui_rewriter.h", "common_video/include/bitrate_adjuster.h", - "common_video/include/incoming_video_stream.h", "common_video/include/video_frame_buffer.h", "common_video/libyuv/include/webrtc_libyuv.h", - "common_video/video_render_frames.h", "logging/rtc_event_log/encoder/blob_encoding.h", "logging/rtc_event_log/encoder/delta_encoding.h", "logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h", @@ -1772,7 +1733,6 @@ webrtc_sources = [ "media/engine/internal_encoder_factory.h", "media/engine/multiplex_codec_factory.h", "media/engine/payload_type_mapper.h", - "media/engine/simulcast.h", "media/engine/simulcast_encoder_adapter.h", "media/engine/unhandled_packets_buffer.h", "media/engine/webrtc_media_engine_defaults.h", @@ -1861,7 +1821,6 @@ webrtc_sources = [ "modules/audio_coding/codecs/pcm16b/pcm16b_common.h", "modules/audio_coding/codecs/red/audio_encoder_copy_red.h", "modules/audio_coding/include/audio_coding_module.h", - "modules/audio_coding/neteq/relative_arrival_delay_tracker.h", "modules/audio_coding/neteq/reorder_optimizer.h", "modules/audio_coding/neteq/underrun_optimizer.h", "modules/audio_coding/neteq/accelerate.h", @@ -2034,7 +1993,6 @@ webrtc_sources = [ "modules/audio_processing/transient/transient_suppressor_impl.h", "modules/audio_processing/transient/wpd_node.h", "modules/audio_processing/transient/wpd_tree.h", - "modules/audio_processing/typing_detection.h", "modules/audio_processing/utility/cascaded_biquad_filter.h", "modules/audio_processing/utility/delay_estimator.h", "modules/audio_processing/utility/delay_estimator_wrapper.h", @@ -2074,10 +2032,8 @@ webrtc_sources = [ "modules/include/module_common_types.h", "modules/pacing/bitrate_prober.h", "modules/pacing/interval_budget.h", - "modules/pacing/paced_sender.h", "modules/pacing/pacing_controller.h", "modules/pacing/packet_router.h", - "modules/pacing/round_robin_packet_queue.h", "modules/pacing/task_queue_paced_sender.h", "modules/remote_bitrate_estimator/aimd_rate_control.h", "modules/remote_bitrate_estimator/include/bwe_defines.h", @@ -2112,7 +2068,6 @@ webrtc_sources = [ "modules/rtp_rtcp/source/rtcp_packet/common_header.h", "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h", "modules/rtp_rtcp/source/rtcp_packet/dlrr.h", - "modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h", "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h", "modules/rtp_rtcp/source/rtcp_packet/fir.h", "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h", @@ -2169,7 +2124,6 @@ webrtc_sources = [ "modules/rtp_rtcp/source/tmmbr_help.h", "modules/rtp_rtcp/source/ulpfec_generator.h", "modules/rtp_rtcp/source/ulpfec_header_reader_writer.h", - "modules/rtp_rtcp/source/ulpfec_receiver_impl.h", "modules/rtp_rtcp/source/video_rtp_depacketizer.h", "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h", "modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h", @@ -2180,14 +2134,11 @@ webrtc_sources = [ "modules/third_party/fft/fft.h", "modules/third_party/g711/g711.h", "modules/third_party/g722/g722_enc_dec.h", - "modules/utility/source/process_thread_impl.h", "modules/video_capture/device_info_impl.h", "modules/video_capture/video_capture_factory.h", "modules/video_capture/video_capture_impl.h", "modules/video_coding/utility/vp9_constants.h", "modules/video_coding/utility/bandwidth_quality_scaler.h", - "modules/video_coding/codec_timer.h", - "modules/video_coding/codecs/av1/libaom_av1_decoder.h", "modules/video_coding/codecs/av1/libaom_av1_encoder.h", "modules/video_coding/codecs/h264/include/h264.h", "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h", @@ -2219,20 +2170,14 @@ webrtc_sources = [ "modules/video_coding/include/video_codec_interface.h", "modules/video_coding/include/video_coding_defines.h", "modules/video_coding/include/video_error_codes.h", - "modules/video_coding/inter_frame_delay.h", "modules/video_coding/jitter_buffer.h", - "modules/video_coding/jitter_estimator.h", "modules/video_coding/loss_notification_controller.h", "modules/video_coding/media_opt_util.h", "modules/video_coding/packet.h", "modules/video_coding/packet_buffer.h", "modules/video_coding/receiver.h", "modules/video_coding/rtp_frame_reference_finder.h", - "modules/video_coding/rtt_filter.h", "modules/video_coding/session_info.h", - "modules/video_coding/timestamp_map.h", - "modules/video_coding/timing.h", - "modules/video_coding/unique_timestamp_counter.h", "modules/video_coding/utility/decoded_frames_history.h", "modules/video_coding/utility/frame_dropper.h", "modules/video_coding/utility/ivf_file_reader.h", @@ -2244,12 +2189,6 @@ webrtc_sources = [ "modules/video_coding/utility/vp9_uncompressed_header_parser.h", "modules/video_coding/video_coding_impl.h", "modules/video_coding/video_receiver2.h", - "modules/video_processing/util/denoiser_filter.h", - "modules/video_processing/util/denoiser_filter_c.h", - "modules/video_processing/util/denoiser_filter_neon.h", - "modules/video_processing/util/noise_estimation.h", - "modules/video_processing/util/skin_detection.h", - "modules/video_processing/video_denoiser.h", "p2p/base/async_stun_tcp_socket.h", "p2p/base/basic_async_resolver_factory.h", "p2p/base/basic_ice_controller.h", @@ -2285,7 +2224,6 @@ webrtc_sources = [ "pc/audio_rtp_receiver.h", "pc/audio_track.h", "pc/channel.h", - "pc/channel_manager.h", "pc/data_channel_controller.h", "pc/dtls_transport.h", "pc/dtmf_sender.h", @@ -2317,7 +2255,6 @@ webrtc_sources = [ "pc/rtp_sender_proxy.h", "pc/rtp_transceiver.h", "pc/rtp_transport.h", - "pc/sctp_data_channel_transport.h", "pc/sctp_transport.h", "pc/sctp_utils.h", "pc/sdp_serializer.h", @@ -2327,7 +2264,6 @@ webrtc_sources = [ "pc/srtp_filter.h", "pc/srtp_session.h", "pc/srtp_transport.h", - "pc/stats_collector.h", "pc/track_media_info_map.h", "pc/transport_stats.h", "pc/video_rtp_receiver.h", @@ -2354,7 +2290,6 @@ webrtc_sources = [ "modules/audio_processing/transient/transient_suppressor.h", "modules/audio_processing/utility/delay_estimator_internal.h", "modules/audio_processing/vad/common.h", - "modules/include/module.h", "modules/include/module_common_types_public.h", "modules/include/module_fec_types.h", "modules/rtp_rtcp/source/byte_io.h", @@ -2385,7 +2320,6 @@ webrtc_sources = [ "api/transport/data_channel_transport_interface.h", "api/transport/enums.h", "api/transport/network_control.h", - "api/transport/webrtc_key_value_config.h", "api/turn_customizer.h", "api/video/video_bitrate_allocator_factory.h", "api/video/video_codec_type.h", @@ -2397,13 +2331,11 @@ webrtc_sources = [ "audio/channel_receive.h", "audio/channel_send.h", "audio/channel_send_frame_transformer_delegate.h", - "audio/null_audio_poller.h", "audio/utility/channel_mixer.h", "audio/utility/channel_mixing_matrix.h", "audio/voip/audio_egress.h", "p2p/base/ice_controller_factory_interface.h", "rtc_base/bitstream_reader.h", - "rtc_base/async_invoker_inl.h", "rtc_base/buffer.h", "rtc_base/compile_assert_c.h", "rtc_base/dscp.h", @@ -2422,28 +2354,22 @@ webrtc_sources = [ "rtc_base/units/unit_base.h", "rtc_base/containers/flat_map.h", "rtc_base/containers/flat_tree.h", - "rtc_base/containers/as_const.h", - "rtc_base/containers/not_fn.h", "rtc_base/containers/invoke.h", - "rtc_base/containers/void_t.h", "rtc_base/containers/flat_set.h", "rtc_base/containers/identity.h", "video/adaptation/encode_usage_resource.h", "video/adaptation/overuse_frame_detector.h", - "video/call_stats.h", "video/encoder_bitrate_adjuster.h", "video/encoder_overshoot_detector.h", "video/encoder_rtcp_feedback.h", "video/frame_dumping_decoder.h", "video/frame_encode_metadata_writer.h", "video/quality_limitation_reason_tracker.h", - "video/receive_statistics_proxy.h", "video/rtp_video_stream_receiver_frame_transformer_delegate.h", "video/send_delay_stats.h", "video/send_statistics_proxy.h", "video/stream_synchronization.h", "video/transport_adapter.h", - "video/video_quality_observer.h", "video/video_send_stream_impl.h", "video/video_source_sink_controller.h", "video/video_stream_decoder_impl.h", @@ -2458,7 +2384,6 @@ webrtc_sources = [ "api/video/video_codec_constants.h", "api/video/video_frame_type.h", "api/video/video_sink_interface.h", - "api/video/video_stream_encoder_interface.h", "audio/audio_send_stream.h", "audio/channel_receive_frame_transformer_delegate.h", "audio/remix_resample.h", @@ -2509,7 +2434,6 @@ webrtc_sources = [ "modules/audio_processing/vad/noise_gmm_tables.h", "modules/congestion_controller/goog_cc/delay_increase_detector_interface.h", "modules/rtp_rtcp/include/receive_statistics.h", - "modules/utility/include/process_thread.h", "modules/video_coding/codecs/vp9/include/vp9_globals.h", "pc/rtp_transport_internal.h", "rtc_base/mdns_responder_interface.h", @@ -2517,15 +2441,12 @@ webrtc_sources = [ "rtc_base/numerics/sequence_number_util.h", "rtc_base/openssl.h", "rtc_base/socket_server.h", - "rtc_base/task_utils/to_queued_task.h", "video/adaptation/quality_scaler_resource.h", "video/buffered_frame_decryptor.h", "video/quality_threshold.h", "video/report_block_stats.h", - "video/rtp_video_stream_receiver.h", "video/stats_counter.h", "video/video_send_stream.h", - "video/video_stream_decoder.h", "video/video_stream_encoder.h", "rtc_base/socket_factory.h", "api/audio_codecs/audio_decoder_factory_template.h", @@ -2583,11 +2504,9 @@ webrtc_sources = [ "rtc_base/ignore_wundef.h", "rtc_base/numerics/math_utils.h", "rtc_base/numerics/mod_ops.h", - "rtc_base/numerics/moving_median_filter.h", "rtc_base/socket_adapters.h", "rtc_base/ssl_roots.h", "rtc_base/system/warn_current_thread_is_deadlocked.h", - "rtc_base/thread_message.h", "rtc_base/trace_event.h", "api/audio_codecs/isac/audio_decoder_isac.h", "api/audio_codecs/isac/audio_encoder_isac.h", @@ -2608,7 +2527,6 @@ webrtc_sources = [ "modules/audio_coding/codecs/ilbc/state_search.h", "modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h", "modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h", - "modules/audio_processing/agc/gain_map_internal.h", "modules/video_capture/video_capture_config.h", "modules/video_coding/codecs/vp8/include/vp8.h", "p2p/base/p2p_transport_channel_ice_field_trials.h", @@ -2634,7 +2552,6 @@ webrtc_sources = [ "pc/channel_interface.h", "call/packet_receiver.h", "p2p/base/transport_info.h", - "modules/rtp_rtcp/include/ulpfec_receiver.h", "modules/rtp_rtcp/include/rtp_packet_sender.h", "rtc_base/numerics/moving_max_counter.h", "modules/audio_coding/codecs/ilbc/enhancer_interface.h", @@ -2656,9 +2573,7 @@ webrtc_sources = [ "pc/peer_connection_proxy.h", "pc/used_ids.h", "rtc_base/numerics/divide_round.h", - "rtc_base/system/thread_registry.h", "rtc_base/one_time_event.h", - "rtc_base/format_macros.h", "audio/conversion.h", "modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h", "modules/audio_coding/codecs/ilbc/unpack_bits.h", @@ -2678,7 +2593,6 @@ webrtc_sources = [ "api/priority.h", "api/transport/sctp_transport_factory_interface.h", "api/video/video_adaptation_reason.h", - "api/video/video_stream_encoder_observer.h", "api/video_codecs/spatial_layer.h", "common_audio/third_party/ooura/fft_size_128/ooura_fft.h", "common_audio/third_party/ooura/fft_size_128/ooura_fft.cc", @@ -2702,13 +2616,10 @@ webrtc_sources = [ "rtc_base/openssl_key_pair.h", "rtc_base/openssl_key_pair.cc", "rtc_base/synchronization/mutex.h", - "rtc_base/synchronization/mutex.cc", "rtc_base/synchronization/mutex_critical_section.h", "rtc_base/synchronization/mutex_pthread.h", "rtc_base/deprecated/recursive_critical_section.h", "rtc_base/deprecated/recursive_critical_section.cc", - "rtc_base/task_utils/pending_task_safety_flag.h", - "rtc_base/task_utils/pending_task_safety_flag.cc", "api/video/video_frame_metadata.h", "api/video/video_frame_metadata.cc", "modules/rtp_rtcp/source/rtp_rtcp_impl2.h", @@ -2728,7 +2639,6 @@ webrtc_sources = [ "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc", "modules/video_coding/chain_diff_calculator.h", "modules/video_coding/chain_diff_calculator.cc", - "modules/video_coding/deprecated/nack_module.h", "modules/video_coding/rtp_frame_id_only_ref_finder.h", "modules/video_coding/rtp_frame_id_only_ref_finder.cc", "modules/video_coding/svc/scalable_video_controller.h", @@ -2764,7 +2674,6 @@ webrtc_sources = [ "modules/video_coding/rtp_generic_ref_finder.cc", "pc/sctp_data_channel.h", "pc/sctp_data_channel.cc", - "pc/stats_collector_interface.h", "rtc_base/callback_list.h", "rtc_base/callback_list.cc", "call/adaptation/broadcast_resource_listener.h", @@ -2885,8 +2794,6 @@ webrtc_sources = [ "api/video_track_source_proxy_factory.h", "modules/remote_bitrate_estimator/packet_arrival_map.h", "modules/remote_bitrate_estimator/packet_arrival_map.cc", - "modules/audio_processing/agc/clipping_predictor.h", - "modules/audio_processing/agc/clipping_predictor.cc", "modules/rtp_rtcp/source/capture_clock_offset_updater.h", "modules/rtp_rtcp/source/capture_clock_offset_updater.cc", "pc/video_track_source_proxy.h", @@ -2895,10 +2802,6 @@ webrtc_sources = [ "modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc", "media/base/sdp_video_format_utils.h", "media/base/sdp_video_format_utils.cc", - "modules/audio_processing/agc/clipping_predictor_evaluator.h", - "modules/audio_processing/agc/clipping_predictor_evaluator.cc", - "modules/audio_processing/agc/clipping_predictor_level_buffer.h", - "modules/audio_processing/agc/clipping_predictor_level_buffer.cc", "media/sctp/sctp_transport_factory.h", "media/sctp/sctp_transport_factory.cc", "media/sctp/dcsctp_transport.h", @@ -2908,7 +2811,6 @@ webrtc_sources = [ "modules/audio_coding/neteq/underrun_optimizer.cc", "common_video/framerate_controller.cc", "modules/audio_processing/agc/analog_gain_stats_reporter.cc", - "modules/audio_coding/neteq/relative_arrival_delay_tracker.cc", "modules/audio_processing/agc2/vad_wrapper.cc", "modules/audio_processing/agc2/adaptive_digital_gain_controller.cc", "modules/video_coding/utility/framerate_controller_deprecated.h", @@ -2927,14 +2829,12 @@ webrtc_sources = [ "modules/video_coding/h265_vps_sps_pps_tracker.cc", "modules/rtp_rtcp/source/video_rtp_depacketizer_h265.cc", "common_video/h265/h265_bitstream_parser.cc", - "api/video_codecs/video_encoder_config.cc", "video/frame_cadence_adapter.cc", "modules/video_coding/codecs/h265/include/h265_globals.h", "video/frame_cadence_adapter.h", "common_video/h265/h265_common.h", "modules/video_coding/h265_vps_sps_pps_tracker.h", "common_video/h265/h265_pps_parser.h", - "modules/video_coding/frame_buffer3.h", "common_video/h265/h265_bitstream_parser.h", "modules/rtp_rtcp/source/video_rtp_depacketizer_h265.h", "common_video/h265/h265_sps_parser.h", @@ -2945,8 +2845,6 @@ webrtc_sources = [ "api/video/i444_buffer.cc", "logging/rtc_event_log/events/rtc_event_field_encoding_parser.cc", "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h", - "video/frame_buffer_proxy.cc", - "video/frame_buffer_proxy.h", "video/decode_synchronizer.cc", "video/decode_synchronizer.h", "logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.cc", @@ -2969,7 +2867,93 @@ webrtc_sources = [ "logging/rtc_event_log/events/rtc_event_definition.h", "modules/video_coding/utility/ivf_defines.h", "video/frame_decode_scheduler.h", - "modules/video_coding/codecs/av1/libaom_av1_encoder_supported.h", + "api/video/i210_buffer.cc", + "api/video/i422_buffer.cc", + "video/config/video_encoder_config.cc", + "modules/audio_processing/aec3/config_selector.cc", + "modules/rtp_rtcp/source/ulpfec_receiver.cc", + "api/video_codecs/av1_profile.cc", + "modules/utility/maybe_worker_thread.cc", + "pc/legacy_stats_collector.cc", + "modules/audio_coding/neteq/packet_arrival_history.cc", + "api/task_queue/pending_task_safety_flag.cc", + "video/render/incoming_video_stream.cc", + "modules/pacing/prioritized_packet_queue.cc", + "video/unique_timestamp_counter.cc", + "modules/audio_processing/agc2/clipping_predictor.cc", + "api/video_codecs/scalability_mode.cc", + "modules/video_coding/codecs/vp8/vp8_scalability.cc", + "video/render/video_render_frames.cc", + "modules/video_coding/svc/scalability_mode_util.cc", + "modules/audio_processing/transient/voice_probability_delay_unit.cc", + "modules/audio_processing/aec3/multi_channel_content_detector.cc", + "video/video_stream_buffer_controller.cc", + "modules/audio_processing/agc2/clipping_predictor_level_buffer.cc", + "api/video/frame_buffer.cc", + "modules/video_coding/timing/inter_frame_delay.cc", + "modules/video_coding/timing/jitter_estimator.cc", + "modules/video_coding/timing/timing.cc", + "video/config/encoder_stream_factory.cc", + "p2p/base/ice_switch_reason.cc", + "p2p/base/wrapping_active_ice_controller.cc", + "modules/video_coding/timing/codec_timer.cc", + "modules/video_coding/timing/timestamp_extrapolator.cc", + "modules/video_coding/timing/frame_delay_variation_kalman_filter.cc", + "modules/video_coding/timing/rtt_filter.cc", + "video/config/simulcast.cc", + "api/field_trials_view.h", + "api/make_ref_counted.h", + "api/task_queue/pending_task_safety_flag.h", + "api/video/frame_buffer.h", + "api/video/i210_buffer.h", + "api/video/i422_buffer.h", + "api/video/resolution.h", + "api/video_codecs/av1_profile.h", + "api/video_codecs/scalability_mode.h", + "modules/audio_coding/neteq/packet_arrival_history.h", + "modules/audio_processing/aec3/block.h", + "modules/audio_processing/aec3/config_selector.h", + "modules/audio_processing/aec3/multi_channel_content_detector.h", + "modules/audio_processing/agc2/clipping_predictor.h", + "modules/audio_processing/agc2/clipping_predictor_level_buffer.h", + "modules/audio_processing/transient/voice_probability_delay_unit.h", + "modules/pacing/prioritized_packet_queue.h", + "modules/rtp_rtcp/source/ulpfec_receiver.h", + "modules/utility/maybe_worker_thread.h", + "modules/video_coding/codecs/vp8/vp8_scalability.h", + "modules/video_coding/svc/scalability_mode_util.h", + "modules/video_coding/timing/codec_timer.h", + "modules/video_coding/timing/frame_delay_variation_kalman_filter.h", + "modules/video_coding/timing/inter_frame_delay.h", + "modules/video_coding/timing/jitter_estimator.h", + "modules/video_coding/timing/rtt_filter.h", + "modules/video_coding/timing/timestamp_extrapolator.h", + "modules/video_coding/timing/timing.h", + "video/config/encoder_stream_factory.h", + "video/config/simulcast.h", + "video/config/video_encoder_config.h", + "video/render/incoming_video_stream.h", + "video/render/video_render_frames.h", + "video/unique_timestamp_counter.h", + "video/video_stream_buffer_controller.h", + "api/video_codecs/simulcast_stream.h", + "api/video_codecs/video_encoder_factory_template.h", + "modules/audio_processing/agc2/gain_map_internal.h", + "p2p/base/ice_switch_reason.h", + "p2p/base/wrapping_active_ice_controller.h", + "pc/legacy_stats_collector.h", + "rtc_base/numerics/moving_percentile_filter.h", + "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h", + "modules/video_coding/utility/vp8_constants.h", + "p2p/base/active_ice_controller_factory_interface.h", + "p2p/base/active_ice_controller_interface.h", + "pc/legacy_stats_collector_interface.h", + "rtc_base/memory/always_valid_pointer.h", + "video/video_stream_encoder_interface.h", + "video/video_stream_encoder_observer.h", + "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h", + "p2p/base/ice_agent_interface.h", + "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h", ] ios_objc_sources = [ @@ -3150,7 +3134,6 @@ ios_sources = [ "objc/api/peerconnection/RTCRtpReceiver.mm", "objc/api/peerconnection/RTCMediaStream.mm", "objc/api/peerconnection/RTCRtpTransceiver.mm", - "objc/api/peerconnection/RTCPeerConnectionFactory.mm", "objc/api/peerconnection/RTCCertificate.mm", "objc/api/peerconnection/RTCDtmfSender.mm", "objc/api/peerconnection/RTCMediaStreamTrack.mm", @@ -3220,6 +3203,11 @@ ios_sources = [ "objc/api/peerconnection/RTCIceCandidateErrorEvent.h", "objc/api/peerconnection/RTCIceCandidateErrorEvent+Private.h", "objc/api/peerconnection/RTCIceCandidateErrorEvent.mm", + "objc/native/api/ssl_certificate_verifier.mm", + "objc/native/api/ssl_certificate_verifier.h", + "objc/base/RTCSSLCertificateVerifier.h", + "objc/native/api/objc_audio_device_module.h", + "objc/components/audio/RTCAudioDevice.h", ] common_arm_specific_sources = [webrtc_source_dir + "/" + path for path in [ @@ -3229,7 +3217,6 @@ common_arm_specific_sources = [webrtc_source_dir + "/" + path for path in [ "modules/audio_coding/codecs/isac/fix/source/lattice_neon.c", "modules/audio_coding/codecs/isac/fix/source/transform_neon.c", "modules/audio_processing/aecm/aecm_core_neon.cc", - "modules/video_processing/util/denoiser_filter_neon.cc", "common_audio/fir_filter_neon.cc", "common_audio/signal_processing/cross_correlation_neon.c", "common_audio/signal_processing/downsample_fast_neon.c", @@ -3325,105 +3312,6 @@ arch_specific_cflags = select({ "@build_bazel_rules_apple//apple:ios_x86_64": common_flags + x86_64_specific_flags, }) -'''cc_library( - name = "usrsctp", - srcs = [ "dependencies/third_party/usrsctp/" + path for path in [ - "usrsctplib/usrsctplib/netinet/sctp.h", - "usrsctplib/usrsctplib/netinet/sctp_asconf.c", - "usrsctplib/usrsctplib/netinet/sctp_asconf.h", - "usrsctplib/usrsctplib/netinet/sctp_auth.c", - "usrsctplib/usrsctplib/netinet/sctp_auth.h", - "usrsctplib/usrsctplib/netinet/sctp_bsd_addr.c", - "usrsctplib/usrsctplib/netinet/sctp_bsd_addr.h", - "usrsctplib/usrsctplib/netinet/sctp_callout.c", - "usrsctplib/usrsctplib/netinet/sctp_callout.h", - "usrsctplib/usrsctplib/netinet/sctp_cc_functions.c", - "usrsctplib/usrsctplib/netinet/sctp_constants.h", - "usrsctplib/usrsctplib/netinet/sctp_crc32.c", - "usrsctplib/usrsctplib/netinet/sctp_crc32.h", - "usrsctplib/usrsctplib/netinet/sctp_header.h", - "usrsctplib/usrsctplib/netinet/sctp_indata.c", - "usrsctplib/usrsctplib/netinet/sctp_indata.h", - "usrsctplib/usrsctplib/netinet/sctp_input.c", - "usrsctplib/usrsctplib/netinet/sctp_input.h", - "usrsctplib/usrsctplib/netinet/sctp_lock_userspace.h", - "usrsctplib/usrsctplib/netinet/sctp_os.h", - "usrsctplib/usrsctplib/netinet/sctp_os_userspace.h", - "usrsctplib/usrsctplib/netinet/sctp_output.c", - "usrsctplib/usrsctplib/netinet/sctp_output.h", - "usrsctplib/usrsctplib/netinet/sctp_pcb.c", - "usrsctplib/usrsctplib/netinet/sctp_pcb.h", - "usrsctplib/usrsctplib/netinet/sctp_peeloff.c", - "usrsctplib/usrsctplib/netinet/sctp_peeloff.h", - "usrsctplib/usrsctplib/netinet/sctp_process_lock.h", - "usrsctplib/usrsctplib/netinet/sctp_sha1.c", - "usrsctplib/usrsctplib/netinet/sctp_sha1.h", - "usrsctplib/usrsctplib/netinet/sctp_ss_functions.c", - "usrsctplib/usrsctplib/netinet/sctp_structs.h", - "usrsctplib/usrsctplib/netinet/sctp_sysctl.c", - "usrsctplib/usrsctplib/netinet/sctp_sysctl.h", - "usrsctplib/usrsctplib/netinet/sctp_timer.c", - "usrsctplib/usrsctplib/netinet/sctp_timer.h", - "usrsctplib/usrsctplib/netinet/sctp_uio.h", - "usrsctplib/usrsctplib/netinet/sctp_userspace.c", - "usrsctplib/usrsctplib/netinet/sctp_usrreq.c", - "usrsctplib/usrsctplib/netinet/sctp_var.h", - "usrsctplib/usrsctplib/netinet/sctputil.c", - "usrsctplib/usrsctplib/netinet/sctputil.h", - "usrsctplib/usrsctplib/netinet6/sctp6_usrreq.c", - "usrsctplib/usrsctplib/netinet6/sctp6_var.h", - "usrsctplib/usrsctplib/user_atomic.h", - "usrsctplib/usrsctplib/user_environment.c", - "usrsctplib/usrsctplib/user_environment.h", - "usrsctplib/usrsctplib/user_inpcb.h", - "usrsctplib/usrsctplib/user_ip6_var.h", - "usrsctplib/usrsctplib/user_ip_icmp.h", - "usrsctplib/usrsctplib/user_malloc.h", - "usrsctplib/usrsctplib/user_mbuf.c", - "usrsctplib/usrsctplib/user_mbuf.h", - "usrsctplib/usrsctplib/user_queue.h", - "usrsctplib/usrsctplib/user_recv_thread.c", - "usrsctplib/usrsctplib/user_recv_thread.h", - "usrsctplib/usrsctplib/user_route.h", - "usrsctplib/usrsctplib/user_socket.c", - "usrsctplib/usrsctplib/user_socketvar.h", - "usrsctplib/usrsctplib/user_uma.h", - "usrsctplib/usrsctplib/usrsctp.h", - ]], - copts = [ - "-Ithird-party/webrtc/dependencies/third_party/usrsctp/usrsctplib/usrsctplib", - "-DHAVE_SA_LEN", - "-DHAVE_SCONN_LEN", - "-D__APPLE_USE_RFC_2292", - "-D__Userspace_os_Darwin", - "-UINET", - "-UINET6", - "-U__APPLE__", - "-DWEBRTC_IOS", - "-DWEBRTC_MAC", - "-DWEBRTC_POSIX", - "-DRTC_ENABLE_VP9", - "-DBSD=1", - "-DUSE_KISS_FFT", - "-DHAVE_PTHREAD", - "-DWEBRTC_APM_DEBUG_DUMP=0", - "-DWEBRTC_USE_BUILTIN_ISAC_FLOAT", - "-DWEBRTC_OPUS_VARIABLE_COMPLEXITY=0", - "-DHAVE_NETINET_IN_H", - "-DWEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE", - "-DSCTP_SIMPLE_ALLOCATOR", - "-DSCTP_PROCESS_LEVEL_LOCKS", - "-D__Userspace__", - "-D__Userspace_os_Darwin", - "-DPACKAGE_VERSION=''", - "-DHAVE_SCTP", - "-DWEBRTC_HAVE_USRSCTP", - "-DWEBRTC_HAVE_SCTP", - "-DNON_WINDOWS_DEFINE", - ] + arch_specific_cflags + optimization_flags, - visibility = ["//visibility:public"], -)''' - arch_specific_crc32c_sources = select({ "@build_bazel_rules_apple//apple:ios_armv7": [ ], @@ -3596,6 +3484,8 @@ dcsctp_sources = [ "webrtc/net/dcsctp/" + path for path in [ "rx/data_tracker.h", "rx/reassembly_queue.h", "rx/reassembly_streams.h", + "rx/interleaved_reassembly_streams.cc", + "rx/interleaved_reassembly_streams.h", "tx/outstanding_data.cc", "tx/retransmission_error_counter.cc", @@ -3608,6 +3498,8 @@ dcsctp_sources = [ "webrtc/net/dcsctp/" + path for path in [ "tx/outstanding_data.h", "tx/rr_send_queue.h", "tx/send_queue.h", + "tx/stream_scheduler.h", + "tx/stream_scheduler.cc", "timer/task_queue_timeout.cc", "timer/timer.cc", @@ -3704,8 +3596,13 @@ fft4g_sources = [ "fft4g/fft4g.cc", ] +opus_headers = [ + "dependencies/third_party/opus/src/include/opus.h", + "dependencies/third_party/opus/src/include/opus_multistream.h", +] + raw_combined_sources = webrtc_sources -combined_sources = [webrtc_source_dir + "/" + path for path in raw_combined_sources] + arch_specific_sources + [ webrtc_source_dir + "/" + "sdk/" + path for path in ios_sources + ios_objc_sources] + absl_sources + fft4g_sources + rnnoise_sources + pffft_sources + crc32c_sources + dcsctp_sources +combined_sources = [webrtc_source_dir + "/" + path for path in raw_combined_sources] + arch_specific_sources + [ webrtc_source_dir + "/" + "sdk/" + path for path in ios_sources + ios_objc_sources] + absl_sources + fft4g_sources + rnnoise_sources + pffft_sources + crc32c_sources + dcsctp_sources + opus_headers objc_library( name = "webrtc_lib", @@ -3716,11 +3613,10 @@ objc_library( "-Ithird-party/webrtc/" + webrtc_source_dir + "/", "-Ithird-party/webrtc/dependencies", "-Ithird-party/webrtc/dependencies/third_party/abseil-cpp", - #"-Ithird-party/webrtc/dependencies/third_party/usrsctp/usrsctplib", - #"-Ithird-party/webrtc/dependencies/third_party/usrsctp/usrsctplib/usrsctplib", "-Ithird-party/webrtc/dependencies/third_party/crc32c/src/include", "-Ithird-party/webrtc/dependencies/third_party/libsrtp/include", "-Ithird-party/webrtc/dependencies/third_party/libsrtp/crypto/include", + "-Ithird-party/webrtc/dependencies/third_party/opus/src/include", "-Ithird-party/libyuv", "-Ithird-party/libyuv/third_party/libyuv/include", "-Ithird-party/webrtc/" + webrtc_source_dir + "/" + "testing/gtest/include", @@ -3734,7 +3630,6 @@ objc_library( "-DWEBRTC_USE_BUILTIN_ISAC_FLOAT", "-DWEBRTC_OPUS_VARIABLE_COMPLEXITY=0", "-DHAVE_NETINET_IN_H", - "-DWEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE", "-DSCTP_SIMPLE_ALLOCATOR", "-DSCTP_PROCESS_LEVEL_LOCKS", "-D__Userspace__", diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/AbseilDll.cmake b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/AbseilDll.cmake index 056343e59c..d53befd44e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/AbseilDll.cmake +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/AbseilDll.cmake @@ -13,12 +13,11 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/atomic_hook.h" "base/internal/cycleclock.cc" "base/internal/cycleclock.h" + "base/internal/cycleclock_config.h" "base/internal/direct_mmap.h" "base/internal/dynamic_annotations.h" "base/internal/endian.h" "base/internal/errno_saver.h" - "base/internal/exponential_biased.cc" - "base/internal/exponential_biased.h" "base/internal/fast_type_id.h" "base/internal/hide_ptr.h" "base/internal/identity.h" @@ -28,8 +27,7 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/low_level_alloc.h" "base/internal/low_level_scheduling.h" "base/internal/per_thread_tls.h" - "base/internal/periodic_sampler.cc" - "base/internal/periodic_sampler.h" + "base/internal/prefetch.h" "base/internal/pretty_function.h" "base/internal/raw_logging.cc" "base/internal/raw_logging.h" @@ -53,6 +51,7 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/unaligned_access.h" "base/internal/unscaledcycleclock.cc" "base/internal/unscaledcycleclock.h" + "base/internal/unscaledcycleclock_config.h" "base/log_severity.cc" "base/log_severity.h" "base/macros.h" @@ -72,6 +71,7 @@ set(ABSL_INTERNAL_DLL_FILES "container/internal/btree.h" "container/internal/btree_container.h" "container/internal/common.h" + "container/internal/common_policy_traits.h" "container/internal/compressed_tuple.h" "container/internal/container_memory.h" "container/internal/counting_allocator.h" @@ -82,20 +82,36 @@ set(ABSL_INTERNAL_DLL_FILES "container/internal/hashtablez_sampler.cc" "container/internal/hashtablez_sampler.h" "container/internal/hashtablez_sampler_force_weak_definition.cc" - "container/internal/have_sse.h" "container/internal/inlined_vector.h" "container/internal/layout.h" - "container/internal/node_hash_policy.h" + "container/internal/node_slot_policy.h" "container/internal/raw_hash_map.h" "container/internal/raw_hash_set.cc" "container/internal/raw_hash_set.h" "container/internal/tracked.h" "container/node_hash_map.h" "container/node_hash_set.h" + "crc/crc32c.cc" + "crc/crc32c.h" + "crc/internal/cpu_detect.cc" + "crc/internal/cpu_detect.h" + "crc/internal/crc32c.h" + "crc/internal/crc32c_inline.h" + "crc/internal/crc32_x86_arm_combined_simd.h" + "crc/internal/crc.cc" + "crc/internal/crc.h" + "crc/internal/crc_internal.h" + "crc/internal/crc_x86_arm_combined.cc" + "crc/internal/crc_memcpy_fallback.cc" + "crc/internal/crc_memcpy.h" + "crc/internal/crc_memcpy_x86_64.cc" + "crc/internal/crc_non_temporal_memcpy.cc" + "crc/internal/crc_x86_arm_combined.cc" + "crc/internal/non_temporal_arm_intrinsics.h" + "crc/internal/non_temporal_memcpy.h" "debugging/failure_signal_handler.cc" "debugging/failure_signal_handler.h" "debugging/leak_check.h" - "debugging/leak_check_disable.cc" "debugging/stacktrace.cc" "debugging/stacktrace.h" "debugging/symbolize.cc" @@ -114,9 +130,11 @@ set(ABSL_INTERNAL_DLL_FILES "debugging/internal/symbolize.h" "debugging/internal/vdso_support.cc" "debugging/internal/vdso_support.h" + "functional/any_invocable.h" "functional/internal/front_binder.h" "functional/bind_front.h" "functional/function_ref.h" + "functional/internal/any_invocable.h" "functional/internal/function_ref.h" "hash/hash.h" "hash/internal/city.h" @@ -133,6 +151,10 @@ set(ABSL_INTERNAL_DLL_FILES "numeric/int128.h" "numeric/internal/bits.h" "numeric/internal/representation.h" + "profiling/internal/exponential_biased.cc" + "profiling/internal/exponential_biased.h" + "profiling/internal/periodic_sampler.cc" + "profiling/internal/periodic_sampler.h" "profiling/internal/sample_recorder.h" "random/bernoulli_distribution.h" "random/beta_distribution.h" @@ -196,22 +218,29 @@ set(ABSL_INTERNAL_DLL_FILES "strings/charconv.h" "strings/cord.cc" "strings/cord.h" + "strings/cord_analysis.cc" + "strings/cord_analysis.h" + "strings/cord_buffer.cc" + "strings/cord_buffer.h" "strings/escaping.cc" "strings/escaping.h" "strings/internal/charconv_bigint.cc" "strings/internal/charconv_bigint.h" "strings/internal/charconv_parse.cc" "strings/internal/charconv_parse.h" + "strings/internal/cord_data_edge.h" "strings/internal/cord_internal.cc" "strings/internal/cord_internal.h" - "strings/internal/cord_rep_consume.h" - "strings/internal/cord_rep_consume.cc" "strings/internal/cord_rep_btree.cc" "strings/internal/cord_rep_btree.h" "strings/internal/cord_rep_btree_navigator.cc" "strings/internal/cord_rep_btree_navigator.h" "strings/internal/cord_rep_btree_reader.cc" "strings/internal/cord_rep_btree_reader.h" + "strings/internal/cord_rep_crc.cc" + "strings/internal/cord_rep_crc.h" + "strings/internal/cord_rep_consume.h" + "strings/internal/cord_rep_consume.cc" "strings/internal/cord_rep_flat.h" "strings/internal/cord_rep_ring.cc" "strings/internal/cord_rep_ring.h" @@ -227,8 +256,13 @@ set(ABSL_INTERNAL_DLL_FILES "strings/internal/cordz_statistics.h" "strings/internal/cordz_update_scope.h" "strings/internal/cordz_update_tracker.h" + "strings/internal/damerau_levenshtein_distance.h" + "strings/internal/damerau_levenshtein_distance.cc" "strings/internal/stl_type_traits.h" "strings/internal/string_constant.h" + "strings/internal/stringify_sink.h" + "strings/internal/stringify_sink.cc" + "strings/internal/has_absl_stringify.h" "strings/match.cc" "strings/match.h" "strings/numbers.cc" @@ -341,126 +375,160 @@ set(ABSL_INTERNAL_DLL_FILES "types/internal/span.h" "types/variant.h" "utility/utility.h" + "debugging/leak_check.cc" ) set(ABSL_INTERNAL_DLL_TARGETS - "stacktrace" - "symbolize" - "examine_stack" - "failure_signal_handler" - "debugging_internal" - "demangle_internal" - "leak_check" - "leak_check_disable" - "stack_consumption" - "debugging" - "hash" - "spy_hash_state" - "city" - "memory" - "strings" - "strings_internal" - "cord" - "str_format" - "str_format_internal" - "pow10_helper" - "int128" - "numeric" - "utility" - "any" - "bad_any_cast" - "bad_any_cast_impl" - "span" - "optional" - "bad_optional_access" - "bad_variant_access" - "variant" - "compare" "algorithm" "algorithm_container" - "graphcycles_internal" - "kernel_timeout_internal" - "synchronization" - "thread_pool" - "bind_front" - "function_ref" + "any" + "any_invocable" "atomic_hook" - "log_severity" - "raw_logging_internal" - "spinlock_wait" - "config" - "dynamic_annotations" - "core_headers" - "malloc_internal" - "base_internal" + "bad_any_cast" + "bad_any_cast_impl" + "bad_optional_access" + "bad_variant_access" "base" - "throw_delegate" - "pretty_function" - "endian" + "base_internal" + "bind_front" "bits" - "exponential_biased" - "periodic_sampler" - "scoped_set_env" - "type_traits" - "meta" - "random_random" - "random_bit_gen_ref" - "random_distributions" - "random_seed_gen_exception" - "random_seed_sequences" - "random_internal_traits" - "random_internal_distribution_caller" - "random_internal_distributions" - "random_internal_fast_uniform_bits" - "random_internal_seed_material" - "random_internal_pool_urbg" - "random_internal_explicit_seed_seq" - "random_internal_sequence_urbg" - "random_internal_salted_seed_seq" - "random_internal_iostream_state_saver" - "random_internal_generate_real" - "random_internal_wide_multiply" - "random_internal_fastmath" - "random_internal_nonsecure_base" - "random_internal_pcg_engine" - "random_internal_randen_engine" - "random_internal_platform" - "random_internal_randen" - "random_internal_randen_slow" - "random_internal_randen_hwaes" - "random_internal_randen_hwaes_impl" - "random_internal_uniform_helper" - "status" - "time" - "civil_time" - "time_zone" - "container" "btree" + "city" + "civil_time" + "compare" "compressed_tuple" - "fixed_array" - "inlined_vector_internal" - "inlined_vector" + "config" + "container" + "container_common" + "container_memory" + "cord" + "core_headers" "counting_allocator" + "crc_cpu_detect", + "crc_internal", + "crc32c", + "debugging" + "debugging_internal" + "demangle_internal" + "dynamic_annotations" + "endian" + "examine_stack" + "exponential_biased" + "failure_signal_handler" + "fixed_array" "flat_hash_map" "flat_hash_set" - "node_hash_map" - "node_hash_set" - "container_memory" + "function_ref" + "graphcycles_internal" + "hash" "hash_function_defaults" "hash_policy_traits" - "hashtablez_sampler" "hashtable_debug" "hashtable_debug_hooks" - "have_sse" - "node_hash_policy" - "raw_hash_map" - "container_common" - "raw_hash_set" + "hashtablez_sampler" + "inlined_vector" + "inlined_vector_internal" + "int128" + "kernel_timeout_internal" "layout" - "tracked" + "leak_check" + "log_severity" + "malloc_internal" + "memory" + "meta" + "node_hash_map" + "node_hash_set" + "node_slot_policy" + "non_temporal_arm_intrinsics", + "non_temporal_memcpy", + "numeric" + "optional" + "periodic_sampler" + "pow10_helper" + "pretty_function" + "random_bit_gen_ref" + "random_distributions" + "random_internal_distribution_caller" + "random_internal_distributions" + "random_internal_explicit_seed_seq" + "random_internal_fastmath" + "random_internal_fast_uniform_bits" + "random_internal_generate_real" + "random_internal_iostream_state_saver" + "random_internal_nonsecure_base" + "random_internal_pcg_engine" + "random_internal_platform" + "random_internal_pool_urbg" + "random_internal_randen" + "random_internal_randen_engine" + "random_internal_randen_hwaes" + "random_internal_randen_hwaes_impl" + "random_internal_randen_slow" + "random_internal_salted_seed_seq" + "random_internal_seed_material" + "random_internal_sequence_urbg" + "random_internal_traits" + "random_internal_uniform_helper" + "random_internal_wide_multiply" + "random_random" + "random_seed_gen_exception" + "random_seed_sequences" + "raw_hash_map" + "raw_hash_set" + "raw_logging_internal" "sample_recorder" + "scoped_set_env" + "span" + "spinlock_wait" + "spy_hash_state" + "stack_consumption" + "stacktrace" + "status" + "str_format" + "str_format_internal" + "strings" + "strings_internal" + "symbolize" + "synchronization" + "thread_pool" + "throw_delegate" + "time" + "time_zone" + "tracked" + "type_traits" + "utility" + "variant" ) +function(_absl_target_compile_features_if_available TARGET TYPE FEATURE) + if(FEATURE IN_LIST CMAKE_CXX_COMPILE_FEATURES) + target_compile_features(${TARGET} ${TYPE} ${FEATURE}) + else() + message(WARNING "Feature ${FEATURE} is unknown for the CXX compiler") + endif() +endfunction() + +include(CheckCXXSourceCompiles) + +check_cxx_source_compiles( + [==[ +#ifdef _MSC_VER +# if _MSVC_LANG < 201700L +# error "The compiler defaults or is configured for C++ < 17" +# endif +#elif __cplusplus < 201700L +# error "The compiler defaults or is configured for C++ < 17" +#endif +int main() { return 0; } +]==] + ABSL_INTERNAL_AT_LEAST_CXX17) + +if(ABSL_INTERNAL_AT_LEAST_CXX17) + set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_17) +else() + set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_14) +endif() + function(absl_internal_dll_contains) cmake_parse_arguments(ABSL_INTERNAL_DLL "" @@ -538,7 +606,27 @@ function(absl_make_dll) NOMINMAX INTERFACE ${ABSL_CC_LIB_DEFINES} + ABSL_CONSUME_DLL ) + + if(ABSL_PROPAGATE_CXX_STD) + # Abseil libraries require C++14 as the current minimum standard. When + # compiled with C++17 (either because it is the compiler's default or + # explicitly requested), then Abseil requires C++17. + _absl_target_compile_features_if_available(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE}) + else() + # Note: This is legacy (before CMake 3.8) behavior. Setting the + # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is + # initialized by CMAKE_CXX_STANDARD) should have no real effect, since + # that is the default value anyway. + # + # CXX_STANDARD_REQUIRED does guard against the top-level CMake project + # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents + # "decaying" to an older standard if the requested one isn't available). + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) + endif() + install(TARGETS abseil_dll EXPORT ${PROJECT_NAME}Targets RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/AbseilHelpers.cmake b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/AbseilHelpers.cmake index 17c4f4499c..8e08d3fc8d 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/AbseilHelpers.cmake +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/AbseilHelpers.cmake @@ -26,6 +26,12 @@ if(NOT DEFINED ABSL_IDE_FOLDER) set(ABSL_IDE_FOLDER Abseil) endif() +if(ABSL_USE_SYSTEM_INCLUDES) + set(ABSL_INTERNAL_INCLUDE_WARNING_GUARD SYSTEM) +else() + set(ABSL_INTERNAL_INCLUDE_WARNING_GUARD "") +endif() + # absl_cc_library() # # CMake function to imitate Bazel's cc_library rule. @@ -40,7 +46,8 @@ endif() # LINKOPTS: List of link options # PUBLIC: Add this so that this library will be exported under absl:: # Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal. -# TESTONLY: When added, this target will only be built if BUILD_TESTING=ON. +# TESTONLY: When added, this target will only be built if both +# BUILD_TESTING=ON and ABSL_BUILD_TESTING=ON. # # Note: # By default, absl_cc_library will always create a library named absl_${NAME}, @@ -82,7 +89,9 @@ function(absl_cc_library) ${ARGN} ) - if(ABSL_CC_LIB_TESTONLY AND NOT BUILD_TESTING) + if(ABSL_CC_LIB_TESTONLY AND + NOT ((BUILD_TESTING AND ABSL_BUILD_TESTING) OR + (ABSL_BUILD_TEST_HELPERS AND ABSL_CC_LIB_PUBLIC))) return() endif() @@ -164,10 +173,14 @@ function(absl_cc_library) set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") elseif(${cflag} MATCHES "^(-W|/w[1234eo])") # Don't impose our warnings on others. + elseif(${cflag} MATCHES "^-m") + # Don't impose CPU instruction requirements on others, as + # the code performs feature detection on runtime. else() set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") endif() endforeach() + string(REPLACE ";" " " PC_LINKOPTS "${ABSL_CC_LIB_LINKOPTS}") FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" CONTENT "\ prefix=${CMAKE_INSTALL_PREFIX}\n\ exec_prefix=\${prefix}\n\ @@ -179,7 +192,7 @@ Description: Abseil ${_NAME} library\n\ URL: https://abseil.io/\n\ Version: ${PC_VERSION}\n\ Requires:${PC_DEPS}\n\ -Libs: -L\${libdir} $ $<$>:-labsl_${_NAME}>\n\ +Libs: -L\${libdir} ${PC_LINKOPTS} $<$>:-labsl_${_NAME}>\n\ Cflags: -I\${includedir}${PC_CFLAGS}\n") INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") @@ -236,7 +249,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") # unconditionally. set_property(TARGET ${_NAME} PROPERTY LINKER_LANGUAGE "CXX") - target_include_directories(${_NAME} + target_include_directories(${_NAME} ${ABSL_INTERNAL_INCLUDE_WARNING_GUARD} PUBLIC "$" $ @@ -255,10 +268,10 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") endif() if(ABSL_PROPAGATE_CXX_STD) - # Abseil libraries require C++11 as the current minimum standard. - # Top-level application CMake projects should ensure a consistent C++ - # standard for all compiled sources by setting CMAKE_CXX_STANDARD. - target_compile_features(${_NAME} PUBLIC cxx_std_11) + # Abseil libraries require C++14 as the current minimum standard. When + # compiled with C++17 (either because it is the compiler's default or + # explicitly requested), then Abseil requires C++17. + _absl_target_compile_features_if_available(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE}) else() # Note: This is legacy (before CMake 3.8) behavior. Setting the # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is @@ -284,7 +297,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") else() # Generating header-only library add_library(${_NAME} INTERFACE) - target_include_directories(${_NAME} + target_include_directories(${_NAME} ${ABSL_INTERNAL_INCLUDE_WARNING_GUARD} INTERFACE "$" $ @@ -303,10 +316,10 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES}) if(ABSL_PROPAGATE_CXX_STD) - # Abseil libraries require C++11 as the current minimum standard. + # Abseil libraries require C++14 as the current minimum standard. # Top-level application CMake projects should ensure a consistent C++ # standard for all compiled sources by setting CMAKE_CXX_STANDARD. - target_compile_features(${_NAME} INTERFACE cxx_std_11) + _absl_target_compile_features_if_available(${_NAME} INTERFACE ${ABSL_INTERNAL_CXX_STD_FEATURE}) # (INTERFACE libraries can't have the CXX_STANDARD property set, so there # is no legacy behavior else case). @@ -364,7 +377,7 @@ endfunction() # GTest::gtest_main # ) function(absl_cc_test) - if(NOT BUILD_TESTING) + if(NOT (BUILD_TESTING AND ABSL_BUILD_TESTING)) return() endif() @@ -415,10 +428,10 @@ function(absl_cc_test) set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) if(ABSL_PROPAGATE_CXX_STD) - # Abseil libraries require C++11 as the current minimum standard. + # Abseil libraries require C++14 as the current minimum standard. # Top-level application CMake projects should ensure a consistent C++ # standard for all compiled sources by setting CMAKE_CXX_STANDARD. - target_compile_features(${_NAME} PUBLIC cxx_std_11) + _absl_target_compile_features_if_available(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE}) else() # Note: This is legacy (before CMake 3.8) behavior. Setting the # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is @@ -434,11 +447,3 @@ function(absl_cc_test) add_test(NAME ${_NAME} COMMAND ${_NAME}) endfunction() - - -function(check_target my_target) - if(NOT TARGET ${my_target}) - message(FATAL_ERROR " ABSL: compiling absl requires a ${my_target} CMake target in your project, - see CMake/README.md for more details") - endif(NOT TARGET ${my_target}) -endfunction() diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/Googletest/CMakeLists.txt.in b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/Googletest/CMakeLists.txt.in index 5769e3a97b..75691b1117 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/Googletest/CMakeLists.txt.in +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/Googletest/CMakeLists.txt.in @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 2.8.2) +cmake_minimum_required(VERSION 3.10) project(googletest-external NONE) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/README.md b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/README.md index f8b27e63f6..19fb327cfe 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/README.md +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/README.md @@ -20,8 +20,10 @@ googletest framework ### Step-by-Step Instructions 1. If you want to build the Abseil tests, integrate the Abseil dependency -[Google Test](https://github.com/google/googletest) into your CMake project. To disable Abseil tests, you have to pass -`-DBUILD_TESTING=OFF` when configuring your project with CMake. +[Google Test](https://github.com/google/googletest) into your CMake +project. To disable Abseil tests, you have to pass either +`-DBUILD_TESTING=OFF` or `-DABSL_BUILD_TESTING=OFF` when configuring your +project with CMake. 2. Download Abseil and copy it into a subdirectory in your CMake project or add Abseil as a [git submodule](https://git-scm.com/docs/git-submodule) in your @@ -37,12 +39,12 @@ section of your executable or of your library.
Here is a short CMakeLists.txt example of an application project using Abseil. ```cmake -cmake_minimum_required(VERSION 3.8.2) +cmake_minimum_required(VERSION 3.10) project(my_app_project) # Pick the C++ standard to compile with. -# Abseil currently supports C++11, C++14, and C++17. -set(CMAKE_CXX_STANDARD 11) +# Abseil currently supports C++14, C++17, and C++20. +set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) add_subdirectory(abseil-cpp) @@ -60,7 +62,7 @@ will control Abseil library targets) is set to at least that minimum. For example: ```cmake -cmake_minimum_required(VERSION 3.8.2) +cmake_minimum_required(VERSION 3.10) project(my_lib_project) # Leave C++ standard up to the root application, so set it only if this is the @@ -91,7 +93,8 @@ setting a consistent `CMAKE_CXX_STANDARD` that is sufficiently high. ### Running Abseil Tests with CMake -Use the `-DBUILD_TESTING=ON` flag to run Abseil tests. +Use the `-DABSL_BUILD_TESTING=ON` flag to run Abseil tests. Note that +BUILD_TESTING must also be on (the default). You will need to provide Abseil with a Googletest dependency. There are two options for how to do this: @@ -109,7 +112,7 @@ For example, to run just the Abseil tests, you could use this script: cd path/to/abseil-cpp mkdir build cd build -cmake -DBUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON .. +cmake -DABSL_BUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON .. make -j ctest ``` @@ -175,7 +178,7 @@ cmake --build /temporary/build/abseil-cpp --target install ## Google Test Options -`-DBUILD_TESTING=ON` must be set to enable testing +`-DABSL_BUILD_TESTING=ON` must be set to enable testing - Have Abseil download and build Google Test for you: `-DABSL_USE_EXTERNAL_GOOGLETEST=OFF` (default) - Download and build latest Google Test: `-DABSL_USE_GOOGLETEST_HEAD=ON` diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/install_test_project/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/install_test_project/CMakeLists.txt index b865b2ec50..30c23b2c6b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/install_test_project/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/install_test_project/CMakeLists.txt @@ -15,7 +15,7 @@ # A simple CMakeLists.txt for testing cmake installation -cmake_minimum_required(VERSION 3.5) +cmake_minimum_required(VERSION 3.10) project(absl_cmake_testing CXX) add_executable(simple simple.cc) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/install_test_project/test.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/install_test_project/test.sh index 5a78c92cd1..cc028bac86 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/install_test_project/test.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMake/install_test_project/test.sh @@ -55,10 +55,10 @@ cmake "${absl_dir}" \ -DABSL_USE_EXTERNAL_GOOGLETEST=ON \ -DABSL_FIND_GOOGLETEST=ON \ -DCMAKE_BUILD_TYPE=Release \ - -DBUILD_TESTING=ON \ + -DABSL_BUILD_TESTING=ON \ -DBUILD_SHARED_LIBS="${build_shared_libs}" make -j $(nproc) -ctest -j $(nproc) +ctest -j $(nproc) --output-on-failure make install ldconfig popd diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMakeLists.txt index 7c8bfff4c5..9e10257851 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/CMakeLists.txt @@ -14,12 +14,9 @@ # limitations under the License. # -# Most widely used distributions have cmake 3.5 or greater available as of March -# 2019. A notable exception is RHEL-7 (CentOS7). You can install a current -# version of CMake by first installing Extra Packages for Enterprise Linux -# (https://fedoraproject.org/wiki/EPEL#Extra_Packages_for_Enterprise_Linux_.28EPEL.29) -# and then issuing `yum install cmake3` on the command line. -cmake_minimum_required(VERSION 3.5) +# https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md +# As of 2022-09-06, CMake 3.10 is the minimum supported version. +cmake_minimum_required(VERSION 3.10) # Compiler id for Apple Clang is now AppleClang. if (POLICY CMP0025) @@ -46,9 +43,10 @@ if (POLICY CMP0091) cmake_policy(SET CMP0091 NEW) endif (POLICY CMP0091) -# Set BUILD_TESTING to OFF by default. -# This must come before the project() and include(CTest) lines. -OPTION(BUILD_TESTING "Build tests" OFF) +# try_compile() honors the CMAKE_CXX_STANDARD value +if (POLICY CMP0067) + cmake_policy(SET CMP0067 NEW) +endif (POLICY CMP0067) project(absl LANGUAGES CXX) include(CTest) @@ -68,12 +66,16 @@ else() endif() option(ABSL_PROPAGATE_CXX_STD - "Use CMake C++ standard meta features (e.g. cxx_std_11) that propagate to targets that link to Abseil" + "Use CMake C++ standard meta features (e.g. cxx_std_14) that propagate to targets that link to Abseil" OFF) # TODO: Default to ON for CMake 3.8 and greater. if((${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.8) AND (NOT ABSL_PROPAGATE_CXX_STD)) message(WARNING "A future Abseil release will default ABSL_PROPAGATE_CXX_STD to ON for CMake 3.8 and up. We recommend enabling this option to ensure your project still builds correctly.") endif() +option(ABSL_USE_SYSTEM_INCLUDES + "Silence warnings in Abseil headers by marking them as SYSTEM includes" + OFF) + list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/CMake ${CMAKE_CURRENT_LIST_DIR}/absl/copts @@ -111,8 +113,15 @@ find_package(Threads REQUIRED) include(CMakeDependentOption) +option(ABSL_BUILD_TESTING + "If ON, Abseil will build all of Abseil's own tests." OFF) + +option(ABSL_BUILD_TEST_HELPERS + "If ON, Abseil will build libraries that you can use to write tests against Abseil code. This option requires that Abseil is configured to use GoogleTest." + OFF) + option(ABSL_USE_EXTERNAL_GOOGLETEST - "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subproject." OFF) + "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subdirectory." OFF) cmake_dependent_option(ABSL_FIND_GOOGLETEST "If ON, Abseil will use find_package(GTest) rather than assuming that GoogleTest is already provided by the including project." @@ -130,13 +139,18 @@ set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH "If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout." ) -if(BUILD_TESTING) - ## check targets +if((BUILD_TESTING AND ABSL_BUILD_TESTING) OR ABSL_BUILD_TEST_HELPERS) if (ABSL_USE_EXTERNAL_GOOGLETEST) if (ABSL_FIND_GOOGLETEST) find_package(GTest REQUIRED) - else() - if (NOT TARGET gtest AND NOT TARGET GTest::gtest) + elseif(NOT TARGET GTest::gtest) + if(TARGET gtest) + # When Google Test is included directly rather than through find_package, the aliases are missing. + add_library(GTest::gtest ALIAS gtest) + add_library(GTest::gtest_main ALIAS gtest_main) + add_library(GTest::gmock ALIAS gmock) + add_library(GTest::gmock_main ALIAS gmock_main) + else() message(FATAL_ERROR "ABSL_USE_EXTERNAL_GOOGLETEST is ON and ABSL_FIND_GOOGLETEST is OFF, which means that the top-level project must build the Google Test project. However, the target gtest was not found.") endif() endif() @@ -146,7 +160,7 @@ if(BUILD_TESTING) message(FATAL_ERROR "Do not set both ABSL_USE_GOOGLETEST_HEAD and ABSL_GOOGLETEST_DOWNLOAD_URL") endif() if(ABSL_USE_GOOGLETEST_HEAD) - set(absl_gtest_download_url "https://github.com/google/googletest/archive/master.zip") + set(absl_gtest_download_url "https://github.com/google/googletest/archive/main.zip") elseif(ABSL_GOOGLETEST_DOWNLOAD_URL) set(absl_gtest_download_url ${ABSL_GOOGLETEST_DOWNLOAD_URL}) endif() @@ -157,25 +171,6 @@ if(BUILD_TESTING) endif() include(CMake/Googletest/DownloadGTest.cmake) endif() - - if (NOT ABSL_FIND_GOOGLETEST) - # When Google Test is included directly rather than through find_package, the aliases are missing. - add_library(GTest::gtest_main ALIAS gtest_main) - add_library(GTest::gtest ALIAS gtest) - add_library(GTest::gmock ALIAS gmock) - endif() - - check_target(GTest::gtest) - check_target(GTest::gtest_main) - check_target(GTest::gmock) - check_target(GTest::gmock_main) - - list(APPEND ABSL_TEST_COMMON_LIBRARIES - GTest::gtest_main - GTest::gtest - GTest::gmock - ${CMAKE_THREAD_LIBS_INIT} - ) endif() add_subdirectory(absl) @@ -230,4 +225,25 @@ if(ABSL_ENABLE_INSTALL) PATTERN "copts" EXCLUDE PATTERN "testdata" EXCLUDE ) + + file(READ "absl/base/options.h" ABSL_INTERNAL_OPTIONS_H_CONTENTS) + if (ABSL_INTERNAL_AT_LEAST_CXX17) + string(REGEX REPLACE + "#define ABSL_OPTION_USE_STD_([^ ]*) 2" + "#define ABSL_OPTION_USE_STD_\\1 1" + ABSL_INTERNAL_OPTIONS_H_PINNED + "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}") + else() + string(REGEX REPLACE + "#define ABSL_OPTION_USE_STD_([^ ]*) 2" + "#define ABSL_OPTION_USE_STD_\\1 0" + ABSL_INTERNAL_OPTIONS_H_PINNED + "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}") + endif() + file(WRITE "${CMAKE_BINARY_DIR}/options-pinned.h" "${ABSL_INTERNAL_OPTIONS_H_PINNED}") + + install(FILES "${CMAKE_BINARY_DIR}/options-pinned.h" + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/absl/base + RENAME "options.h") + endif() # ABSL_ENABLE_INSTALL diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/README.md b/third-party/webrtc/dependencies/third_party/abseil-cpp/README.md index db3a7b447a..0816692e13 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/README.md +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/README.md @@ -1,7 +1,7 @@ # Abseil - C++ Common Libraries The repository contains the Abseil C++ library code. Abseil is an open-source -collection of C++ code (compliant to C++11) designed to augment the C++ +collection of C++ code (compliant to C++14) designed to augment the C++ standard library. ## Table of Contents @@ -46,26 +46,28 @@ the Abseil code, running tests, and getting a simple binary working. [Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official build systems for Abseil. - See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information on building Abseil using the Bazel build system. - If you require CMake support, please check the [CMake build instructions](CMake/README.md) and [CMake Quickstart](https://abseil.io/docs/cpp/quickstart-cmake). + ## Support -Abseil is officially supported on many platforms. See the [Abseil -platform support -guide](https://abseil.io/docs/cpp/platforms/platforms) for details on -supported operating systems, compilers, CPUs, etc. +Abseil follows Google's [Foundational C++ Support +Policy](https://opensource.google/documentation/policies/cplusplus-support). See +[this +table](https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md) +for a list of currently supported versions compilers, platforms, and build +tools. + ## Codemap Abseil contains the following C++ library components: -* [`base`](absl/base/) Abseil Fundamentals +* [`base`](absl/base/)
The `base` library contains initialization code and other code which all other Abseil code depends on. Code within `base` may not depend on any other code (other than the C++ standard library). @@ -78,29 +80,45 @@ Abseil contains the following C++ library components: * [`container`](absl/container/)
The `container` library contains additional STL-style containers, including Abseil's unordered "Swiss table" containers. +* [`crc`](absl/crc/) The `crc` library contains code for + computing error-detecting cyclic redundancy checks on data. * [`debugging`](absl/debugging/)
The `debugging` library contains code useful for enabling leak checks, and stacktrace and symbolization utilities. +* [`flags`](absl/flags/) +
The `flags` library contains code for handling command line flags for + libraries and binaries built with Abseil. * [`hash`](absl/hash/)
The `hash` library contains the hashing framework and default hash functor implementations for hashable types in Abseil. +* [`iterator`](absl/iterator/) +
The `iterator` library contains utilities for augmenting ranges in + range-based for loops. +* [`log`](absl/log/) +
The `log` library contains `LOG` and `CHECK` macros and facilities + for writing logged messages out to disk, `stderr`, or user-extensible + destinations. * [`memory`](absl/memory/) -
The `memory` library contains C++11-compatible versions of - `std::make_unique()` and related memory management facilities. +
The `memory` library contains memory management facilities that augment + C++'s `` library. * [`meta`](absl/meta/) -
The `meta` library contains C++11-compatible versions of type checks +
The `meta` library contains compatible versions of type checks available within C++14 and C++17 versions of the C++ `` library. * [`numeric`](absl/numeric/) -
The `numeric` library contains C++11-compatible 128-bit integers. +
The `numeric` library contains 128-bit integer types as well as + implementations of C++20's bitwise math functions. * [`profiling`](absl/profiling/)
The `profiling` library contains utility code for profiling C++ entities. It is currently a private dependency of other Abseil libraries. +* [`random`](absl/random/) +
The `random` library contains functions for generating psuedorandom + values. * [`status`](absl/status/) -
The `status` contains abstractions for error handling, specifically - `absl::Status` and `absl::StatusOr`. +
The `status` library contains abstractions for error handling, + specifically `absl::Status` and `absl::StatusOr`. * [`strings`](absl/strings/)
The `strings` library contains a variety of strings routines and - utilities, including a C++11-compatible version of the C++17 + utilities, including a C++14-compatible version of the C++17 `std::string_view` type. * [`synchronization`](absl/synchronization/)
The `synchronization` library contains concurrency primitives (Abseil's @@ -112,10 +130,11 @@ Abseil contains the following C++ library components: time zones. * [`types`](absl/types/)
The `types` library contains non-container utility types, like a - C++11-compatible version of the C++17 `std::optional` type. + C++14-compatible version of the C++17 `std::optional` type. * [`utility`](absl/utility/)
The `utility` library contains utility and helper code. + ## Releases Abseil recommends users "live-at-head" (update to the latest commit from the @@ -125,11 +144,13 @@ Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport fixes for severe bugs. See our [release management](https://abseil.io/about/releases) document for more details. + ## License The Abseil C++ library is licensed under the terms of the Apache license. See [LICENSE](LICENSE) for more information. + ## Links For more information about Abseil: diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/CMakeLists.txt index b1715846f0..19a91c62b8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/CMakeLists.txt @@ -18,10 +18,12 @@ add_subdirectory(base) add_subdirectory(algorithm) add_subdirectory(cleanup) add_subdirectory(container) +add_subdirectory(crc) add_subdirectory(debugging) add_subdirectory(flags) add_subdirectory(functional) add_subdirectory(hash) +add_subdirectory(log) add_subdirectory(memory) add_subdirectory(meta) add_subdirectory(numeric) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/CMakeLists.txt index 609d858946..181b49ca02 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/CMakeLists.txt @@ -35,6 +35,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::algorithm + absl::config GTest::gmock_main ) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/algorithm_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/algorithm_test.cc index 81fccb6135..d18df0240f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/algorithm_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/algorithm_test.cc @@ -20,6 +20,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/base/config.h" namespace { @@ -50,7 +51,15 @@ TEST(EqualTest, EmptyRange) { std::vector empty1; std::vector empty2; + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105705 +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wnonnull" +#endif EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), empty1.begin(), empty1.end())); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif EXPECT_FALSE(absl::equal(empty1.begin(), empty1.end(), v1.begin(), v1.end())); EXPECT_TRUE( absl::equal(empty1.begin(), empty1.end(), empty2.begin(), empty2.end())); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/container.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/container.h index c38a4a63db..26b1952923 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/container.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/container.h @@ -166,7 +166,7 @@ container_algorithm_internal::ContainerDifferenceType c_distance( // c_all_of() // // Container-based version of the `std::all_of()` function to -// test a condition on all elements within a container. +// test if all elements within a container satisfy a condition. template bool c_all_of(const C& c, Pred&& pred) { return std::all_of(container_algorithm_internal::c_begin(c), diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/container_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/container_test.cc index 605afc8040..0fbc7773e8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/container_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/container_test.cc @@ -67,13 +67,16 @@ bool Equals(int v1, int v2) { return v1 == v2; } bool IsOdd(int x) { return x % 2 != 0; } TEST_F(NonMutatingTest, Distance) { - EXPECT_EQ(container_.size(), absl::c_distance(container_)); - EXPECT_EQ(sequence_.size(), absl::c_distance(sequence_)); - EXPECT_EQ(vector_.size(), absl::c_distance(vector_)); - EXPECT_EQ(ABSL_ARRAYSIZE(array_), absl::c_distance(array_)); + EXPECT_EQ(container_.size(), + static_cast(absl::c_distance(container_))); + EXPECT_EQ(sequence_.size(), static_cast(absl::c_distance(sequence_))); + EXPECT_EQ(vector_.size(), static_cast(absl::c_distance(vector_))); + EXPECT_EQ(ABSL_ARRAYSIZE(array_), + static_cast(absl::c_distance(array_))); // Works with a temporary argument. - EXPECT_EQ(vector_.size(), absl::c_distance(std::vector(vector_))); + EXPECT_EQ(vector_.size(), + static_cast(absl::c_distance(std::vector(vector_)))); } TEST_F(NonMutatingTest, Distance_OverloadedBeginEnd) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/equal_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/equal_benchmark.cc index 7bf62c9a7f..948cd65c54 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/equal_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/algorithm/equal_benchmark.cc @@ -15,8 +15,8 @@ #include #include -#include "benchmark/benchmark.h" #include "absl/algorithm/algorithm.h" +#include "benchmark/benchmark.h" namespace { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/CMakeLists.txt index 7d56aa1346..26e2b48a87 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/CMakeLists.txt @@ -16,6 +16,7 @@ find_library(LIBRT rt) +# Internal-only target, do not depend on directly. absl_cc_library( NAME atomic_hook @@ -28,6 +29,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME errno_saver @@ -52,6 +54,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME raw_logging_internal @@ -63,11 +66,13 @@ absl_cc_library( absl::atomic_hook absl::config absl::core_headers + absl::errno_saver absl::log_severity COPTS ${ABSL_DEFAULT_COPTS} ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME spinlock_wait @@ -131,6 +136,7 @@ absl_cc_library( PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME malloc_internal @@ -151,6 +157,7 @@ absl_cc_library( Threads::Threads ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME base_internal @@ -174,6 +181,7 @@ absl_cc_library( "call_once.h" "casts.h" "internal/cycleclock.h" + "internal/cycleclock_config.h" "internal/low_level_scheduling.h" "internal/per_thread_tls.h" "internal/spinlock.h" @@ -181,6 +189,7 @@ absl_cc_library( "internal/thread_identity.h" "internal/tsan_mutex_interface.h" "internal/unscaledcycleclock.h" + "internal/unscaledcycleclock_config.h" SRCS "internal/cycleclock.cc" "internal/spinlock.cc" @@ -192,7 +201,7 @@ absl_cc_library( LINKOPTS ${ABSL_DEFAULT_LINKOPTS} $<$:-lrt> - $<$:"advapi32"> + $<$:-ladvapi32> DEPS absl::atomic_hook absl::base_internal @@ -207,6 +216,7 @@ absl_cc_library( PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME throw_delegate @@ -221,6 +231,7 @@ absl_cc_library( absl::raw_logging_internal ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME exception_testing @@ -234,6 +245,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME pretty_function @@ -243,6 +255,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME exception_safety_testing @@ -276,6 +289,7 @@ absl_cc_test( GTest::gtest_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME atomic_hook_test_helper @@ -375,6 +389,7 @@ absl_cc_test( GTest::gtest_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME spinlock_test_common @@ -409,6 +424,7 @@ absl_cc_test( GTest::gtest_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME endian @@ -519,60 +535,7 @@ absl_cc_test( GTest::gtest_main ) -absl_cc_library( - NAME - exponential_biased - SRCS - "internal/exponential_biased.cc" - HDRS - "internal/exponential_biased.h" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::config - absl::core_headers -) - -absl_cc_test( - NAME - exponential_biased_test - SRCS - "internal/exponential_biased_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::exponential_biased - absl::strings - GTest::gmock_main -) - -absl_cc_library( - NAME - periodic_sampler - SRCS - "internal/periodic_sampler.cc" - HDRS - "internal/periodic_sampler.h" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::core_headers - absl::exponential_biased -) - -absl_cc_test( - NAME - periodic_sampler_test - SRCS - "internal/periodic_sampler_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::core_headers - absl::periodic_sampler - GTest::gmock_main -) - +# Internal-only target, do not depend on directly. absl_cc_library( NAME scoped_set_env @@ -624,6 +587,7 @@ absl_cc_test( GTest::gtest_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME strerror @@ -655,6 +619,7 @@ absl_cc_test( GTest::gtest_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME fast_type_id @@ -680,6 +645,32 @@ absl_cc_test( GTest::gtest_main ) +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + prefetch + HDRS + "internal/prefetch.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_test( + NAME + prefetch_test + SRCS + "internal/prefetch_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::prefetch + GTest::gtest_main +) + absl_cc_test( NAME optimization_test diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/attributes.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/attributes.h index 2665d8f387..e11a064add 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/attributes.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/attributes.h @@ -136,9 +136,10 @@ // for further information. // The MinGW compiler doesn't complain about the weak attribute until the link // step, presumably because Windows doesn't use ELF binaries. -#if (ABSL_HAVE_ATTRIBUTE(weak) || \ - (defined(__GNUC__) && !defined(__clang__))) && \ - (!defined(_WIN32) || __clang_major__ < 9) && !defined(__MINGW32__) +#if (ABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ + !defined(__MINGW32__) #undef ABSL_ATTRIBUTE_WEAK #define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) #define ABSL_HAVE_ATTRIBUTE_WEAK 1 @@ -212,6 +213,9 @@ // https://gcc.gnu.org/gcc-4.8/changes.html #if ABSL_HAVE_ATTRIBUTE(no_sanitize_address) #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#elif defined(_MSC_VER) && _MSC_VER >= 1928 +// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS #endif @@ -311,15 +315,22 @@ __attribute__((section(#name))) __attribute__((noinline)) #endif - // ABSL_ATTRIBUTE_SECTION_VARIABLE // // Tells the compiler/linker to put a given variable into a section and define // `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. // This functionality is supported by GNU linker. #ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE +#ifdef _AIX +// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo +// op which includes an additional integer as part of its syntax indcating +// alignment. If data fall under different alignments then you might get a +// compilation error indicating a `Section type conflict`. +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#else #define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) #endif +#endif // ABSL_DECLARE_ATTRIBUTE_SECTION_VARS // @@ -330,8 +341,8 @@ // a no-op on ELF but not on Mach-O. // #ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS -#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ - extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK #endif #ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS @@ -392,6 +403,9 @@ // // Tells the compiler to warn about unused results. // +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard `[[nodiscard]]` directly over this macro. +// // When annotating a function, it must appear as the first part of the // declaration or definition. The compiler will warn if the return value from // such a function is unused: @@ -418,9 +432,10 @@ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 // // Note: past advice was to place the macro after the argument list. -#if ABSL_HAVE_ATTRIBUTE(nodiscard) -#define ABSL_MUST_USE_RESULT [[nodiscard]] -#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +// +// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is +// compliant with the stricter [[nodiscard]]. +#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) #define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) #else #define ABSL_MUST_USE_RESULT @@ -490,7 +505,7 @@ #define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] #if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) #define ABSL_XRAY_LOG_ARGS(N) \ - [[clang::xray_always_instrument, clang::xray_log_args(N)]] + [[clang::xray_always_instrument, clang::xray_log_args(N)]] #else #define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] #endif @@ -634,6 +649,9 @@ // declarations. The macro argument is used as a custom diagnostic message (e.g. // suggestion of a better alternative). // +// For code or headers that are assured to only build with C++14 and up, prefer +// just using the standard `[[deprecated("message")]]` directly over this macro. +// // Examples: // // class ABSL_DEPRECATED("Use Bar instead") Foo {...}; @@ -644,14 +662,17 @@ // ABSL_DEPRECATED("Use DoThat() instead") // void DoThis(); // +// enum FooEnum { +// kBar ABSL_DEPRECATED("Use kBaz instead"), +// }; +// // Every usage of a deprecated entity will trigger a warning when compiled with -// clang's `-Wdeprecated-declarations` option. This option is turned off by -// default, but the warnings will be reported by clang-tidy. -#if defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L +// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain +// turns this warning off by default, instead relying on clang-tidy to report +// new uses of deprecated code. +#if ABSL_HAVE_ATTRIBUTE(deprecated) #define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) -#endif - -#ifndef ABSL_DEPRECATED +#else #define ABSL_DEPRECATED(message) #endif @@ -661,9 +682,18 @@ // not compile (on supported platforms) unless the variable has a constant // initializer. This is useful for variables with static and thread storage // duration, because it guarantees that they will not suffer from the so-called -// "static init order fiasco". Prefer to put this attribute on the most visible -// declaration of the variable, if there's more than one, because code that -// accesses the variable can then use the attribute for optimization. +// "static init order fiasco". +// +// This attribute must be placed on the initializing declaration of the +// variable. Some compilers will give a -Wmissing-constinit warning when this +// attribute is placed on some other declaration but missing from the +// initializing declaration. +// +// In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can +// also be used in a non-initializing declaration to tell the compiler that a +// variable is already initialized, reducing overhead that would otherwise be +// incurred by a hidden guard variable. Thus annotating all declarations with +// this attribute is recommended to potentially enhance optimization. // // Example: // @@ -672,14 +702,19 @@ // ABSL_CONST_INIT static MyType my_var; // }; // -// MyType MyClass::my_var = MakeMyType(...); +// ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...); +// +// For code or headers that are assured to only build with C++20 and up, prefer +// just using the standard `constinit` keyword directly over this macro. // // Note that this attribute is redundant if the variable is declared constexpr. -#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#if defined(__cpp_constinit) && __cpp_constinit >= 201907L +#define ABSL_CONST_INIT constinit +#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) #define ABSL_CONST_INIT [[clang::require_constant_initialization]] #else #define ABSL_CONST_INIT -#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#endif // ABSL_ATTRIBUTE_PURE_FUNCTION // @@ -724,4 +759,41 @@ #define ABSL_ATTRIBUTE_LIFETIME_BOUND #endif +// ABSL_ATTRIBUTE_TRIVIAL_ABI +// Indicates that a type is "trivially relocatable" -- meaning it can be +// relocated without invoking the constructor/destructor, using a form of move +// elision. +// +// From a memory safety point of view, putting aside destructor ordering, it's +// safe to apply ABSL_ATTRIBUTE_TRIVIAL_ABI if an object's location +// can change over the course of its lifetime: if a constructor can be run one +// place, and then the object magically teleports to another place where some +// methods are run, and then the object teleports to yet another place where it +// is destroyed. This is notably not true for self-referential types, where the +// move-constructor must keep the self-reference up to date. If the type changed +// location without invoking the move constructor, it would have a dangling +// self-reference. +// +// The use of this teleporting machinery means that the number of paired +// move/destroy operations can change, and so it is a bad idea to apply this to +// a type meant to count the number of moves. +// +// Warning: applying this can, rarely, break callers. Objects passed by value +// will be destroyed at the end of the call, instead of the end of the +// full-expression containing the call. In addition, it changes the ABI +// of functions accepting this type by value (e.g. to pass in registers). +// +// See also the upstream documentation: +// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi +// +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::trivial_abi) +#define ABSL_ATTRIBUTE_TRIVIAL_ABI [[clang::trivial_abi]] +#define ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI 1 +#elif ABSL_HAVE_ATTRIBUTE(trivial_abi) +#define ABSL_ATTRIBUTE_TRIVIAL_ABI __attribute__((trivial_abi)) +#define ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI 1 +#else +#define ABSL_ATTRIBUTE_TRIVIAL_ABI +#endif + #endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/casts.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/casts.h index 83c691265f..b99adb0699 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/casts.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/casts.h @@ -29,6 +29,10 @@ #include #include +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +#include // For std::bit_cast. +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + #include "absl/base/internal/identity.h" #include "absl/base/macros.h" #include "absl/meta/type_traits.h" @@ -36,19 +40,6 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace internal_casts { - -template -struct is_bitcastable - : std::integral_constant< - bool, - sizeof(Dest) == sizeof(Source) && - type_traits_internal::is_trivially_copyable::value && - type_traits_internal::is_trivially_copyable::value && - std::is_default_constructible::value> {}; - -} // namespace internal_casts - // implicit_cast() // // Performs an implicit conversion between types following the language @@ -105,81 +96,83 @@ constexpr To implicit_cast(typename absl::internal::identity_t to) { // bit_cast() // -// Performs a bitwise cast on a type without changing the underlying bit -// representation of that type's value. The two types must be of the same size -// and both types must be trivially copyable. As with most casts, use with -// caution. A `bit_cast()` might be needed when you need to temporarily treat a -// type as some other type, such as in the following cases: +// Creates a value of the new type `Dest` whose representation is the same as +// that of the argument, which is of (deduced) type `Source` (a "bitwise cast"; +// every bit in the value representation of the result is equal to the +// corresponding bit in the object representation of the source). Source and +// destination types must be of the same size, and both types must be trivially +// copyable. // -// * Serialization (casting temporarily to `char *` for those purposes is -// always allowed by the C++ standard) -// * Managing the individual bits of a type within mathematical operations -// that are not normally accessible through that type -// * Casting non-pointer types to pointer types (casting the other way is -// allowed by `reinterpret_cast()` but round-trips cannot occur the other -// way). -// -// Example: +// As with most casts, use with caution. A `bit_cast()` might be needed when you +// need to treat a value as the value of some other type, for example, to access +// the individual bits of an object which are not normally accessible through +// the object's type, such as for working with the binary representation of a +// floating point value: // // float f = 3.14159265358979; -// int i = bit_cast(f); +// int i = bit_cast(f); // // i = 0x40490fdb // -// Casting non-pointer types to pointer types and then dereferencing them -// traditionally produces undefined behavior. +// Reinterpreting and accessing a value directly as a different type (as shown +// below) usually results in undefined behavior. // // Example: // // // WRONG -// float f = 3.14159265358979; // WRONG -// int i = * reinterpret_cast(&f); // WRONG +// float f = 3.14159265358979; +// int i = reinterpret_cast(f); // Wrong +// int j = *reinterpret_cast(&f); // Equally wrong +// int k = *bit_cast(&f); // Equally wrong // -// The address-casting method produces undefined behavior according to the ISO -// C++ specification section [basic.lval]. Roughly, this section says: if an -// object in memory has one type, and a program accesses it with a different -// type, the result is undefined behavior for most values of "different type". +// Reinterpret-casting results in undefined behavior according to the ISO C++ +// specification, section [basic.lval]. Roughly, this section says: if an object +// in memory has one type, and a program accesses it with a different type, the +// result is undefined behavior for most "different type". +// +// Using bit_cast on a pointer and then dereferencing it is no better than using +// reinterpret_cast. You should only use bit_cast on the value itself. // // Such casting results in type punning: holding an object in memory of one type // and reading its bits back using a different type. A `bit_cast()` avoids this -// issue by implementing its casts using `memcpy()`, which avoids introducing -// this undefined behavior. +// issue by copying the object representation to a new value, which avoids +// introducing this undefined behavior (since the original value is never +// accessed in the wrong way). // -// NOTE: The requirements here are more strict than the bit_cast of standard -// proposal p0476 due to the need for workarounds and lack of intrinsics. -// Specifically, this implementation also requires `Dest` to be -// default-constructible. -template < - typename Dest, typename Source, - typename std::enable_if::value, - int>::type = 0> +// The requirements of `absl::bit_cast` are more strict than that of +// `std::bit_cast` unless compiler support is available. Specifically, without +// compiler support, this implementation also requires `Dest` to be +// default-constructible. In C++20, `absl::bit_cast` is replaced by +// `std::bit_cast`. +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +using std::bit_cast; + +#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +template ::value && + type_traits_internal::is_trivially_copyable::value +#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + && std::is_default_constructible::value +#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + , + int>::type = 0> +#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline constexpr Dest bit_cast(const Source& source) { + return __builtin_bit_cast(Dest, source); +} +#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) inline Dest bit_cast(const Source& source) { Dest dest; memcpy(static_cast(std::addressof(dest)), static_cast(std::addressof(source)), sizeof(dest)); return dest; } +#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) -// NOTE: This overload is only picked if the requirements of bit_cast are -// not met. It is therefore UB, but is provided temporarily as previous -// versions of this function template were unchecked. Do not use this in -// new code. -template < - typename Dest, typename Source, - typename std::enable_if< - !internal_casts::is_bitcastable::value, - int>::type = 0> -ABSL_DEPRECATED( - "absl::bit_cast type requirements were violated. Update the types " - "being used such that they are the same size and are both " - "TriviallyCopyable.") -inline Dest bit_cast(const Source& source) { - static_assert(sizeof(Dest) == sizeof(Source), - "Source and destination types should have equal sizes."); - - Dest dest; - memcpy(&dest, &source, sizeof(dest)); - return dest; -} +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/config.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/config.h index c7b2e64de8..1058ce74b7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/config.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/config.h @@ -56,6 +56,25 @@ #include #endif // __cplusplus +// ABSL_INTERNAL_CPLUSPLUS_LANG +// +// MSVC does not set the value of __cplusplus correctly, but instead uses +// _MSVC_LANG as a stand-in. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +// +// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at +// times, for example: +// https://github.com/microsoft/vscode-cpptools/issues/1770 +// https://reviews.llvm.org/D70996 +// +// For this reason, this symbol is considered INTERNAL and code outside of +// Abseil must not use it. +#if defined(_MSVC_LANG) +#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG +#elif defined(__cplusplus) +#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus +#endif + #if defined(__APPLE__) // Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, // __IPHONE_8_0. @@ -183,12 +202,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_HAVE_BUILTIN(x) 0 #endif -#if defined(__is_identifier) -#define ABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x)) -#else -#define ABSL_INTERNAL_HAS_KEYWORD(x) 0 -#endif - #ifdef __has_feature #define ABSL_HAVE_FEATURE(f) __has_feature(f) #else @@ -212,11 +225,12 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif // ABSL_HAVE_TLS is defined to 1 when __thread should be supported. -// We assume __thread is supported on Linux when compiled with Clang or compiled -// against libstdc++ with _GLIBCXX_HAVE_TLS defined. +// We assume __thread is supported on Linux or Asylo when compiled with Clang or +// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined. #ifdef ABSL_HAVE_TLS #error ABSL_HAVE_TLS cannot be directly set -#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#elif (defined(__linux__) || defined(__ASYLO__)) && \ + (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) #define ABSL_HAVE_TLS 1 #endif @@ -229,6 +243,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE #error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set #elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \ + (defined(__clang__) && __clang_major__ >= 15) || \ (!defined(__clang__) && defined(__GLIBCXX__) && \ ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8)) #define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 @@ -243,31 +258,32 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // // Checks whether `std::is_trivially_copy_assignable` is supported. -// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with -// either libc++ or libstdc++, and Visual Studio (but not NVCC). +// Notes: Clang with libc++ supports these features, as does gcc >= 7.4 with +// libstdc++, or gcc >= 8.2 with libc++, and Visual Studio (but not NVCC). #if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) #error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set #elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) #error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set -#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ - (!defined(__clang__) && ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && \ - (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \ - (defined(_MSC_VER) && !defined(__NVCC__)) +#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ + (defined(__clang__) && __clang_major__ >= 15) || \ + (!defined(__clang__) && \ + ((ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && defined(__GLIBCXX__)) || \ + (ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(8, 2) && \ + defined(_LIBCPP_VERSION)))) || \ + (defined(_MSC_VER) && !defined(__NVCC__) && !defined(__clang__)) #define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 #define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 #endif -// ABSL_HAVE_SOURCE_LOCATION_CURRENT +// ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE // -// Indicates whether `absl::SourceLocation::current()` will return useful -// information in some contexts. -#ifndef ABSL_HAVE_SOURCE_LOCATION_CURRENT -#if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \ - ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE) -#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1 -#elif ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) -#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1 -#endif +// Checks whether `std::is_trivially_copyable` is supported. +// +// Notes: Clang 15+ with libc++ supports these features, GCC hasn't been tested. +#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set +#elif defined(__clang__) && (__clang_major__ >= 15) +#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1 #endif // ABSL_HAVE_THREAD_LOCAL @@ -408,10 +424,12 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // POSIX.1-2001. #ifdef ABSL_HAVE_MMAP #error ABSL_HAVE_MMAP cannot be directly set -#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ - defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ - defined(__ASYLO__) || defined(__myriad2__) +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ + defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \ + defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ + defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ + defined(__QNX__) #define ABSL_HAVE_MMAP 1 #endif @@ -422,7 +440,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM #error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__ros__) + defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \ + defined(__NetBSD__) #define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 #endif @@ -517,22 +536,41 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error "absl endian detection needs to be set up for your compiler" #endif -// macOS 10.13 and iOS 10.11 don't let you use , , or -// even though the headers exist and are publicly noted to work. See -// https://github.com/abseil/abseil-cpp/issues/207 and +// macOS < 10.13 and iOS < 11 don't let you use , , or +// even though the headers exist and are publicly noted to work, because the +// libc++ shared library shipped on the system doesn't have the requisite +// exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and // https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +// // libc++ spells out the availability requirements in the file // llvm-project/libcxx/include/__config via the #define // _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. -#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ - ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)) +// +// Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14 +// and iOS < 12 in the libc++ headers. This was corrected by +// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953 +// which subsequently made it into the XCode 12.5 release. We need to match the +// old (incorrect) conditions when built with old XCode, but can use the +// corrected earlier versions with new XCode. +#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ + ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \ + (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)))) #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 #else #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 @@ -702,8 +740,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #endif -#undef ABSL_INTERNAL_HAS_KEYWORD - // ABSL_DLL // // When building Abseil as a DLL, this macro expands to `__declspec(dllexport)` @@ -729,8 +765,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // a compiler instrumentation module and a run-time library. #ifdef ABSL_HAVE_MEMORY_SANITIZER #error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set." -#elif defined(__SANITIZE_MEMORY__) -#define ABSL_HAVE_MEMORY_SANITIZER 1 #elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer) #define ABSL_HAVE_MEMORY_SANITIZER 1 #endif @@ -757,6 +791,45 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_HAVE_ADDRESS_SANITIZER 1 #endif +// ABSL_HAVE_HWADDRESS_SANITIZER +// +// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan +// memory error detector which can use CPU features like ARM TBI, Intel LAM or +// AMD UAI. +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_HWADDRESS__) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_LEAK_SANITIZER +// +// LeakSanitizer (or lsan) is a detector of memory leaks. +// https://clang.llvm.org/docs/LeakSanitizer.html +// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// +// The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time +// whether the LeakSanitizer is potentially available. However, just because the +// LeakSanitizer is available does not mean it is active. Use the +// always-available run-time interface in //absl/debugging/leak_check.h for +// interacting with LeakSanitizer. +#ifdef ABSL_HAVE_LEAK_SANITIZER +#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set." +#elif defined(LEAK_SANITIZER) +// GCC provides no method for detecting the presense of the standalone +// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also +// use -DLEAK_SANITIZER. +#define ABSL_HAVE_LEAK_SANITIZER 1 +// Clang standalone LeakSanitizer (-fsanitize=leak) +#elif ABSL_HAVE_FEATURE(leak_sanitizer) +#define ABSL_HAVE_LEAK_SANITIZER 1 +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) +// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer. +#define ABSL_HAVE_LEAK_SANITIZER 1 +#endif + // ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION // // Class template argument deduction is a language feature added in C++17. @@ -766,4 +839,93 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1 #endif +// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// +// Prior to C++17, static constexpr variables defined in classes required a +// separate definition outside of the class body, for example: +// +// class Foo { +// static constexpr int kBar = 0; +// }; +// constexpr int Foo::kBar; +// +// In C++17, these variables defined in classes are considered inline variables, +// and the extra declaration is redundant. Since some compilers warn on the +// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used +// conditionally ignore them: +// +// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// constexpr int Foo::kBar; +// #endif +#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L +#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1 +#endif + +// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with +// RTTI support. +#ifdef ABSL_INTERNAL_HAS_RTTI +#error ABSL_INTERNAL_HAS_RTTI cannot be directly set +#elif !defined(__GNUC__) || defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_RTTI 1 +#endif // !defined(__GNUC__) || defined(__GXX_RTTI) + +// ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +#ifdef ABSL_INTERNAL_HAVE_SSE +#error ABSL_INTERNAL_HAVE_SSE cannot be directly set +#elif defined(__SSE__) +#define ABSL_INTERNAL_HAVE_SSE 1 +#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1) +// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1 +// indicates that at least SSE was targeted with the /arch:SSE option. +// All x86-64 processors support SSE, so support can be assumed. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#define ABSL_INTERNAL_HAVE_SSE 1 +#endif + +// ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +#ifdef ABSL_INTERNAL_HAVE_SSE2 +#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set +#elif defined(__SSE2__) +#define ABSL_INTERNAL_HAVE_SSE2 1 +#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2) +// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2 +// indicates that at least SSE2 was targeted with the /arch:SSE2 option. +// All x86-64 processors support SSE2, so support can be assumed. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#define ABSL_INTERNAL_HAVE_SSE2 1 +#endif + +// ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +// +// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3 +// with MSVC requires either assuming that the code will only every run on CPUs +// that support SSSE3, otherwise __cpuid() can be used to detect support at +// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported +// by the CPU. +#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set +#elif defined(__SSSE3__) +#define ABSL_INTERNAL_HAVE_SSSE3 1 +#endif + +// ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM +// SIMD). +// +// If __CUDA_ARCH__ is defined, then we are compiling CUDA code in device mode. +// In device mode, NEON intrinsics are not available, regardless of host +// platform. +// https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code +#ifdef ABSL_INTERNAL_HAVE_ARM_NEON +#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set +#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__) +#define ABSL_INTERNAL_HAVE_ARM_NEON 1 +#endif + #endif // ABSL_BASE_CONFIG_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/exception_safety_testing_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/exception_safety_testing_test.cc index a59be29e91..a87fd6a99e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/exception_safety_testing_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/exception_safety_testing_test.cc @@ -701,7 +701,10 @@ struct BasicGuaranteeWithExtraContracts : public NonNegative { static constexpr int kExceptionSentinel = 9999; }; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel; +#endif TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) { auto tester_with_val = diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h index 3e72b4977d..c72015ef96 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ -#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ #include "absl/base/internal/atomic_hook.h" @@ -31,4 +31,4 @@ void RegisterFunc(VoidF func); ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock.cc index 0e65005b89..902e3f5ef1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock.cc @@ -25,6 +25,8 @@ #include #include // NOLINT(build/c++11) +#include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/internal/unscaledcycleclock.h" namespace absl { @@ -33,44 +35,20 @@ namespace base_internal { #if ABSL_USE_UNSCALED_CYCLECLOCK -namespace { - -#ifdef NDEBUG -#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY -// Not debug mode and the UnscaledCycleClock frequency is the CPU -// frequency. Scale the CycleClock to prevent overflow if someone -// tries to represent the time as cycles since the Unix epoch. -static constexpr int32_t kShift = 1; -#else -// Not debug mode and the UnscaledCycleClock isn't operating at the -// raw CPU frequency. There is no need to do any scaling, so don't -// needlessly sacrifice precision. -static constexpr int32_t kShift = 0; -#endif -#else -// In debug mode use a different shift to discourage depending on a -// particular shift value. -static constexpr int32_t kShift = 2; +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr int32_t CycleClock::kShift; +constexpr double CycleClock::kFrequencyScale; #endif -static constexpr double kFrequencyScale = 1.0 / (1 << kShift); -static std::atomic cycle_clock_source; +ABSL_CONST_INIT std::atomic + CycleClock::cycle_clock_source_{nullptr}; -CycleClockSourceFunc LoadCycleClockSource() { - // Optimize for the common case (no callback) by first doing a relaxed load; - // this is significantly faster on non-x86 platforms. - if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) { - return nullptr; - } - // This corresponds to the store(std::memory_order_release) in - // CycleClockSource::Register, and makes sure that any updates made prior to - // registering the callback are visible to this thread before the callback is - // invoked. - return cycle_clock_source.load(std::memory_order_acquire); +void CycleClockSource::Register(CycleClockSourceFunc source) { + // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. + CycleClock::cycle_clock_source_.store(source, std::memory_order_release); } -} // namespace - +#ifdef _WIN32 int64_t CycleClock::Now() { auto fn = LoadCycleClockSource(); if (fn == nullptr) { @@ -78,15 +56,7 @@ int64_t CycleClock::Now() { } return fn() >> kShift; } - -double CycleClock::Frequency() { - return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); -} - -void CycleClockSource::Register(CycleClockSourceFunc source) { - // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. - cycle_clock_source.store(source, std::memory_order_release); -} +#endif #else diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock.h index a18b584445..cbfdf57998 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock.h @@ -42,14 +42,20 @@ #ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ #define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#include #include +#include "absl/base/attributes.h" #include "absl/base/config.h" +#include "absl/base/internal/cycleclock_config.h" +#include "absl/base/internal/unscaledcycleclock.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { +using CycleClockSourceFunc = int64_t (*)(); + // ----------------------------------------------------------------------------- // CycleClock // ----------------------------------------------------------------------------- @@ -68,12 +74,21 @@ class CycleClock { static double Frequency(); private: +#if ABSL_USE_UNSCALED_CYCLECLOCK + static CycleClockSourceFunc LoadCycleClockSource(); + + static constexpr int32_t kShift = kCycleClockShift; + static constexpr double kFrequencyScale = kCycleClockFrequencyScale; + + ABSL_CONST_INIT static std::atomic cycle_clock_source_; +#endif // ABSL_USE_UNSCALED_CYCLECLOC + CycleClock() = delete; // no instances CycleClock(const CycleClock&) = delete; CycleClock& operator=(const CycleClock&) = delete; -}; -using CycleClockSourceFunc = int64_t (*)(); + friend class CycleClockSource; +}; class CycleClockSource { private: @@ -87,6 +102,41 @@ class CycleClockSource { static void Register(CycleClockSourceFunc source); }; +#if ABSL_USE_UNSCALED_CYCLECLOCK + +inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() { +#if !defined(__x86_64__) + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) { + return nullptr; + } +#endif // !defined(__x86_64__) + + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback + // is invoked. + return cycle_clock_source_.load(std::memory_order_acquire); +} + +// Accessing globals in inlined code in Window DLLs is problematic. +#ifndef _WIN32 +inline int64_t CycleClock::Now() { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; +} +#endif + +inline double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock_config.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock_config.h new file mode 100644 index 0000000000..191112b58e --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/cycleclock_config.h @@ -0,0 +1,55 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/unscaledcycleclock_config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_USE_UNSCALED_CYCLECLOCK +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +// Not debug mode and the UnscaledCycleClock frequency is the CPU +// frequency. Scale the CycleClock to prevent overflow if someone +// tries to represent the time as cycles since the Unix epoch. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 1); +#else +// Not debug mode and the UnscaledCycleClock isn't operating at the +// raw CPU frequency. There is no need to do any scaling, so don't +// needlessly sacrifice precision. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 0); +#endif +#else // NDEBUG +// In debug mode use a different shift to discourage depending on a +// particular shift value. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 2); +#endif // NDEBUG + +ABSL_INTERNAL_INLINE_CONSTEXPR(double, kCycleClockFrequencyScale, + 1.0 / (1 << kCycleClockShift)); +#endif // ABSL_USE_UNSCALED_CYCLECLOC + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/direct_mmap.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/direct_mmap.h index 274054cd5a..815b8d23ba 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/direct_mmap.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/direct_mmap.h @@ -20,7 +20,7 @@ #include "absl/base/config.h" -#if ABSL_HAVE_MMAP +#ifdef ABSL_HAVE_MMAP #include @@ -41,13 +41,13 @@ #ifdef __mips__ // Include definitions of the ABI currently in use. -#ifdef __BIONIC__ +#if defined(__BIONIC__) || !defined(__GLIBC__) // Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the // definitions we need. #include #else #include -#endif // __BIONIC__ +#endif // __BIONIC__ || !__GLIBC__ #endif // __mips__ // SYS_mmap and SYS_munmap are not defined in Android. @@ -97,7 +97,8 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, #ifdef __BIONIC__ // SYS_mmap2 has problems on Android API level <= 16. // Workaround by invoking __mmap2() instead. - return __mmap2(start, length, prot, flags, fd, offset / pagesize); + return __mmap2(start, length, prot, flags, fd, + static_cast(offset / pagesize)); #else return reinterpret_cast( syscall(SYS_mmap2, start, length, prot, flags, fd, diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/endian.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/endian.h index dad0e9aeb0..50747d75ec 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/endian.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/endian.h @@ -16,16 +16,9 @@ #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ #define ABSL_BASE_INTERNAL_ENDIAN_H_ -// The following guarantees declaration of the byte swap functions -#ifdef _MSC_VER -#include // NOLINT(build/include) -#elif defined(__FreeBSD__) -#include -#elif defined(__GLIBC__) -#include // IWYU pragma: export -#endif - #include +#include + #include "absl/base/casts.h" #include "absl/base/config.h" #include "absl/base/internal/unaligned_access.h" @@ -34,47 +27,11 @@ namespace absl { ABSL_NAMESPACE_BEGIN -// Use compiler byte-swapping intrinsics if they are available. 32-bit -// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0. -// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0. -// For simplicity, we enable them all only for GCC 4.8.0 or later. -#if defined(__clang__) || \ - (defined(__GNUC__) && \ - ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5)) inline uint64_t gbswap_64(uint64_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) return __builtin_bswap64(host_int); -} -inline uint32_t gbswap_32(uint32_t host_int) { - return __builtin_bswap32(host_int); -} -inline uint16_t gbswap_16(uint16_t host_int) { - return __builtin_bswap16(host_int); -} - #elif defined(_MSC_VER) -inline uint64_t gbswap_64(uint64_t host_int) { return _byteswap_uint64(host_int); -} -inline uint32_t gbswap_32(uint32_t host_int) { - return _byteswap_ulong(host_int); -} -inline uint16_t gbswap_16(uint16_t host_int) { - return _byteswap_ushort(host_int); -} - -#else -inline uint64_t gbswap_64(uint64_t host_int) { -#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) - // Adapted from /usr/include/byteswap.h. Not available on Mac. - if (__builtin_constant_p(host_int)) { - return __bswap_constant_64(host_int); - } else { - uint64_t result; - __asm__("bswap %0" : "=r"(result) : "0"(host_int)); - return result; - } -#elif defined(__GLIBC__) - return bswap_64(host_int); #else return (((host_int & uint64_t{0xFF}) << 56) | ((host_int & uint64_t{0xFF00}) << 40) | @@ -84,12 +41,14 @@ inline uint64_t gbswap_64(uint64_t host_int) { ((host_int & uint64_t{0xFF0000000000}) >> 24) | ((host_int & uint64_t{0xFF000000000000}) >> 40) | ((host_int & uint64_t{0xFF00000000000000}) >> 56)); -#endif // bswap_64 +#endif } inline uint32_t gbswap_32(uint32_t host_int) { -#if defined(__GLIBC__) - return bswap_32(host_int); +#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) + return __builtin_bswap32(host_int); +#elif defined(_MSC_VER) + return _byteswap_ulong(host_int); #else return (((host_int & uint32_t{0xFF}) << 24) | ((host_int & uint32_t{0xFF00}) << 8) | @@ -99,33 +58,29 @@ inline uint32_t gbswap_32(uint32_t host_int) { } inline uint16_t gbswap_16(uint16_t host_int) { -#if defined(__GLIBC__) - return bswap_16(host_int); +#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) + return __builtin_bswap16(host_int); +#elif defined(_MSC_VER) + return _byteswap_ushort(host_int); #else return (((host_int & uint16_t{0xFF}) << 8) | ((host_int & uint16_t{0xFF00}) >> 8)); #endif } -#endif // intrinsics available - #ifdef ABSL_IS_LITTLE_ENDIAN -// Definitions for ntohl etc. that don't require us to include -// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather -// than just #defining them because in debug mode, gcc doesn't -// correctly handle the (rather involved) definitions of bswap_32. -// gcc guarantees that inline functions are as fast as macros, so -// this isn't a performance hit. +// Portable definitions for htonl (host-to-network) and friends on little-endian +// architectures. inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } #elif defined ABSL_IS_BIG_ENDIAN -// These definitions are simpler on big-endian machines -// These are functions instead of macros to avoid self-assignment warnings -// on calls such as "i = ghtnol(i);". This also provides type checking. +// Portable definitions for htonl (host-to-network) etc on big-endian +// architectures. These definitions are simpler since the host byte order is the +// same as network byte order. inline uint16_t ghtons(uint16_t x) { return x; } inline uint32_t ghtonl(uint32_t x) { return x; } inline uint64_t ghtonll(uint64_t x) { return x; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/fast_type_id.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/fast_type_id.h index 3db59e8374..a547b3a8bc 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/fast_type_id.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/fast_type_id.h @@ -28,8 +28,10 @@ struct FastTypeTag { constexpr static char dummy_var = 0; }; +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL template constexpr char FastTypeTag::dummy_var; +#endif // FastTypeId() evaluates at compile/link-time to a unique pointer for the // passed-in type. These are meant to be good match for keys into maps or diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/inline_variable.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/inline_variable.h index 130d8c2476..df933faff5 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/inline_variable.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/inline_variable.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ -#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ #include @@ -104,4 +104,4 @@ #endif // __cpp_inline_variables -#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/inline_variable_testing.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/inline_variable_testing.h index 3856b9f80f..f3c81459fa 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/inline_variable_testing.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/inline_variable_testing.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_ -#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_ +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ #include "absl/base/internal/inline_variable.h" @@ -43,4 +43,4 @@ const int& get_int_b(); ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_ +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/invoke.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/invoke.h index 5c71f32823..643c2a42f0 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/invoke.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/invoke.h @@ -14,6 +14,8 @@ // // absl::base_internal::invoke(f, args...) is an implementation of // INVOKE(f, args...) from section [func.require] of the C++ standard. +// When compiled as C++17 and later versions, it is implemented as an alias of +// std::invoke. // // [func.require] // Define INVOKE (f, t1, t2, ..., tN) as follows: @@ -35,6 +37,26 @@ #ifndef ABSL_BASE_INTERNAL_INVOKE_H_ #define ABSL_BASE_INTERNAL_INVOKE_H_ +#include "absl/base/config.h" + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#include + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +using std::invoke; +using std::invoke_result_t; +using std::is_invocable_r; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + #include #include #include @@ -80,8 +102,18 @@ struct MemFunAndRef : StrippedAccept { static decltype((std::declval().* std::declval())(std::declval()...)) Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { +// Ignore bogus GCC warnings on this line. +// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif return (std::forward(obj).* std::forward(mem_fun))(std::forward(args)...); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) +#pragma GCC diagnostic pop +#endif } }; @@ -180,8 +212,30 @@ invoke_result_t invoke(F&& f, Args&&... args) { return Invoker::type::Invoke(std::forward(f), std::forward(args)...); } + +template +struct IsInvocableRImpl : std::false_type {}; + +template +struct IsInvocableRImpl< + absl::void_t >, R, F, + Args...> + : std::integral_constant< + bool, + std::is_convertible, + R>::value || + std::is_void::value> {}; + +// Type trait whose member `value` is true if invoking `F` with `Args` is valid, +// and either the return type is convertible to `R`, or `R` is void. +// C++11-compatible version of `std::is_invocable_r`. +template +using is_invocable_r = IsInvocableRImpl; + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + #endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc index 229ab9162d..662167b08a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc @@ -332,7 +332,7 @@ size_t GetPageSize() { #elif defined(__wasm__) || defined(__asmjs__) return getpagesize(); #else - return sysconf(_SC_PAGESIZE); + return static_cast(sysconf(_SC_PAGESIZE)); #endif } @@ -364,7 +364,7 @@ LowLevelAlloc::Arena::Arena(uint32_t flags_value) } // L < meta_data_arena->mu -LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) { +LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) { Arena *meta_data_arena = DefaultArena(); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc.h index db91951c82..eabb14a9b4 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc.h @@ -103,7 +103,7 @@ class LowLevelAlloc { // the provided flags. For example, the call NewArena(kAsyncSignalSafe) // is itself async-signal-safe, as well as generatating an arena that provides // async-signal-safe Alloc/Free. - static Arena *NewArena(int32_t flags); + static Arena *NewArena(uint32_t flags); // Destroys an arena allocated by NewArena and returns true, // provided no allocated blocks remain in the arena. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc_test.cc index 31abb888a6..8fdec09e88 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/low_level_alloc_test.cc @@ -86,7 +86,7 @@ static void Test(bool use_new_arena, bool call_malloc_hook, int n) { AllocMap::iterator it; BlockDesc block_desc; int rnd; - LowLevelAlloc::Arena *arena = 0; + LowLevelAlloc::Arena *arena = nullptr; if (use_new_arena) { int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; arena = LowLevelAlloc::NewArena(flags); @@ -101,11 +101,10 @@ static void Test(bool use_new_arena, bool call_malloc_hook, int n) { case 0: // coin came up heads: add a block using_low_level_alloc = true; block_desc.len = rand() & 0x3fff; - block_desc.ptr = - reinterpret_cast( - arena == 0 - ? LowLevelAlloc::Alloc(block_desc.len) - : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); + block_desc.ptr = reinterpret_cast( + arena == nullptr + ? LowLevelAlloc::Alloc(block_desc.len) + : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); using_low_level_alloc = false; RandomizeBlockDesc(&block_desc); rnd = rand(); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/prefetch.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/prefetch.h new file mode 100644 index 0000000000..06419283ba --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/prefetch.h @@ -0,0 +1,138 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PREFETCH_H_ +#define ABSL_BASE_INTERNAL_PREFETCH_H_ + +#include "absl/base/config.h" + +#ifdef __SSE__ +#include +#endif + +#if defined(_MSC_VER) && defined(ABSL_INTERNAL_HAVE_SSE) +#include +#pragma intrinsic(_mm_prefetch) +#endif + +// Compatibility wrappers around __builtin_prefetch, to prefetch data +// for read if supported by the toolchain. + +// Move data into the cache before it is read, or "prefetch" it. +// +// The value of `addr` is the address of the memory to prefetch. If +// the target and compiler support it, data prefetch instructions are +// generated. If the prefetch is done some time before the memory is +// read, it may be in the cache by the time the read occurs. +// +// The function names specify the temporal locality heuristic applied, +// using the names of Intel prefetch instructions: +// +// T0 - high degree of temporal locality; data should be left in as +// many levels of the cache possible +// T1 - moderate degree of temporal locality +// T2 - low degree of temporal locality +// Nta - no temporal locality, data need not be left in the cache +// after the read +// +// Incorrect or gratuitous use of these functions can degrade +// performance, so use them only when representative benchmarks show +// an improvement. +// +// Example usage: +// +// absl::base_internal::PrefetchT0(addr); +// +// Currently, the different prefetch calls behave on some Intel +// architectures as follows: +// +// SNB..SKL SKX +// PrefetchT0() L1/L2/L3 L1/L2 +// PrefetchT1() L2/L3 L2 +// PrefetchT2() L2/L3 L2 +// PrefetchNta() L1/--/L3 L1* +// +// * On SKX PrefetchNta() will bring the line into L1 but will evict +// from L3 cache. This might result in surprising behavior. +// +// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon. +// +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +void PrefetchT0(const void* addr); +void PrefetchT1(const void* addr); +void PrefetchT2(const void* addr); +void PrefetchNta(const void* addr); + +// Implementation details follow. + +#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + +#define ABSL_INTERNAL_HAVE_PREFETCH 1 + +// See __builtin_prefetch: +// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. +// +// These functions speculatively load for read only. This is +// safe for all currently supported platforms. However, prefetch for +// store may have problems depending on the target platform. +// +inline void PrefetchT0(const void* addr) { + // Note: this uses prefetcht0 on Intel. + __builtin_prefetch(addr, 0, 3); +} +inline void PrefetchT1(const void* addr) { + // Note: this uses prefetcht1 on Intel. + __builtin_prefetch(addr, 0, 2); +} +inline void PrefetchT2(const void* addr) { + // Note: this uses prefetcht2 on Intel. + __builtin_prefetch(addr, 0, 1); +} +inline void PrefetchNta(const void* addr) { + // Note: this uses prefetchtnta on Intel. + __builtin_prefetch(addr, 0, 0); +} + +#elif defined(ABSL_INTERNAL_HAVE_SSE) + +#define ABSL_INTERNAL_HAVE_PREFETCH 1 + +inline void PrefetchT0(const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); +} +inline void PrefetchT1(const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T1); +} +inline void PrefetchT2(const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T2); +} +inline void PrefetchNta(const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); +} + +#else +inline void PrefetchT0(const void*) {} +inline void PrefetchT1(const void*) {} +inline void PrefetchT2(const void*) {} +inline void PrefetchNta(const void*) {} +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_PREFETCH_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/prefetch_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/prefetch_test.cc new file mode 100644 index 0000000000..7c1dae462c --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/prefetch_test.cc @@ -0,0 +1,43 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/prefetch.h" + +#include "gtest/gtest.h" + +namespace { + +int number = 42; + +TEST(Prefetch, TemporalLocalityNone) { + absl::base_internal::PrefetchNta(&number); + EXPECT_EQ(number, 42); +} + +TEST(Prefetch, TemporalLocalityLow) { + absl::base_internal::PrefetchT2(&number); + EXPECT_EQ(number, 42); +} + +TEST(Prefetch, TemporalLocalityMedium) { + absl::base_internal::PrefetchT1(&number); + EXPECT_EQ(number, 42); +} + +TEST(Prefetch, TemporalLocalityHigh) { + absl::base_internal::PrefetchT0(&number); + EXPECT_EQ(number, 42); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/raw_logging.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/raw_logging.cc index 074e026adb..6273e8471b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/raw_logging.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/raw_logging.cc @@ -14,15 +14,17 @@ #include "absl/base/internal/raw_logging.h" -#include #include +#include #include #include #include +#include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/errno_saver.h" #include "absl/base/log_severity.h" // We know how to perform low-level writes to stderr in POSIX and Windows. For @@ -36,8 +38,8 @@ // This preprocessor token is also defined in raw_io.cc. If you need to copy // this, consider moving both to config.h instead. #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__Fuchsia__) || defined(__native_client__) || \ - defined(__EMSCRIPTEN__) || defined(__ASYLO__) + defined(__Fuchsia__) || defined(__native_client__) || \ + defined(__OpenBSD__) || defined(__EMSCRIPTEN__) || defined(__ASYLO__) #include @@ -50,7 +52,8 @@ // ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall // syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); // for low level operations that want to avoid libc. -#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__) +#if (defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \ + !defined(__ANDROID__) #include #define ABSL_HAVE_SYSCALL_WRITE 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 @@ -69,20 +72,13 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace raw_logging_internal { +namespace raw_log_internal { namespace { // TODO(gfalcon): We want raw-logging to work on as many platforms as possible. // Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for // a selected set of platforms for which we expect not to be able to raw log. -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES - absl::base_internal::AtomicHook - log_prefix_hook; -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES - absl::base_internal::AtomicHook - abort_hook; - #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED constexpr char kTruncated[] = " ... (message truncated)\n"; @@ -93,12 +89,14 @@ constexpr char kTruncated[] = " ... (message truncated)\n"; bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { - int n = vsnprintf(*buf, *size, format, ap); + if (*size < 0) + return false; + int n = vsnprintf(*buf, static_cast(*size), format, ap); bool result = true; if (n < 0 || n > *size) { result = false; if (static_cast(*size) > sizeof(kTruncated)) { - n = *size - sizeof(kTruncated); // room for truncation message + n = *size - static_cast(sizeof(kTruncated)); } else { n = 0; // no room for truncation message } @@ -120,9 +118,11 @@ constexpr int kLogBufSize = 3000; bool DoRawLog(char** buf, int* size, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(3, 4); bool DoRawLog(char** buf, int* size, const char* format, ...) { + if (*size < 0) + return false; va_list ap; va_start(ap, format); - int n = vsnprintf(*buf, *size, format, ap); + int n = vsnprintf(*buf, static_cast(*size), format, ap); va_end(ap); if (n < 0 || n > *size) return false; *size -= n; @@ -130,6 +130,18 @@ bool DoRawLog(char** buf, int* size, const char* format, ...) { return true; } +bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line, + char** buf, int* buf_size) { + DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line); + return true; +} + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +absl::base_internal::AtomicHook + log_filter_and_prefix_hook(DefaultLogFilterAndPrefix); +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +absl::base_internal::AtomicHook abort_hook; + void RawLogVA(absl::LogSeverity severity, const char* file, int line, const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0); void RawLogVA(absl::LogSeverity severity, const char* file, int line, @@ -150,14 +162,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, } #endif - auto log_prefix_hook_ptr = log_prefix_hook.Load(); - if (log_prefix_hook_ptr) { - enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size); - } else { - if (enabled) { - DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line); - } - } + enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size); const char* const prefix_end = buf; #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED @@ -168,11 +173,12 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, } else { DoRawLog(&buf, &size, "%s", kTruncated); } - SafeWriteToStderr(buffer, strlen(buffer)); + AsyncSignalSafeWriteToStderr(buffer, strlen(buffer)); } #else static_cast(format); static_cast(ap); + static_cast(enabled); #endif // Abort the process after logging a FATAL message, even if the output itself @@ -195,13 +201,16 @@ void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line, } // namespace -void SafeWriteToStderr(const char *s, size_t len) { +void AsyncSignalSafeWriteToStderr(const char* s, size_t len) { + absl::base_internal::ErrnoSaver errno_saver; #if defined(ABSL_HAVE_SYSCALL_WRITE) + // We prefer calling write via `syscall` to minimize the risk of libc doing + // something "helpful". syscall(SYS_write, STDERR_FILENO, s, len); #elif defined(ABSL_HAVE_POSIX_WRITE) write(STDERR_FILENO, s, len); #elif defined(ABSL_HAVE_RAW_IO) - _write(/* stderr */ 2, s, len); + _write(/* stderr */ 2, s, static_cast(len)); #else // stderr logging unsupported on this platform (void) s; @@ -229,7 +238,9 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL absl::base_internal::AtomicHook internal_log_function(DefaultInternalLog); -void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); } +void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) { + log_filter_and_prefix_hook.Store(func); +} void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); } @@ -237,6 +248,6 @@ void RegisterInternalLogFunction(InternalLogFunction func) { internal_log_function.Store(func); } -} // namespace raw_logging_internal +} // namespace raw_log_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/raw_logging.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/raw_logging.h index 2bf7aabac1..db2ef38e07 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/raw_logging.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/raw_logging.h @@ -43,12 +43,11 @@ #define ABSL_RAW_LOG(severity, ...) \ do { \ - constexpr const char* absl_raw_logging_internal_basename = \ - ::absl::raw_logging_internal::Basename(__FILE__, \ - sizeof(__FILE__) - 1); \ - ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \ - absl_raw_logging_internal_basename, \ - __LINE__, __VA_ARGS__); \ + constexpr const char* absl_raw_log_internal_basename = \ + ::absl::raw_log_internal::Basename(__FILE__, sizeof(__FILE__) - 1); \ + ::absl::raw_log_internal::RawLog(ABSL_RAW_LOG_INTERNAL_##severity, \ + absl_raw_log_internal_basename, __LINE__, \ + __VA_ARGS__); \ } while (0) // Similar to CHECK(condition) << message, but for low-level modules: @@ -72,14 +71,14 @@ // // The API is a subset of the above: each macro only takes two arguments. Use // StrCat if you need to build a richer message. -#define ABSL_INTERNAL_LOG(severity, message) \ - do { \ - constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ - ::absl::raw_logging_internal::internal_log_function( \ - ABSL_RAW_LOGGING_INTERNAL_##severity, \ - absl_raw_logging_internal_filename, __LINE__, message); \ - if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \ - ABSL_INTERNAL_UNREACHABLE; \ +#define ABSL_INTERNAL_LOG(severity, message) \ + do { \ + constexpr const char* absl_raw_log_internal_filename = __FILE__; \ + ::absl::raw_log_internal::internal_log_function( \ + ABSL_RAW_LOG_INTERNAL_##severity, absl_raw_log_internal_filename, \ + __LINE__, message); \ + if (ABSL_RAW_LOG_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \ + ABSL_INTERNAL_UNREACHABLE; \ } while (0) #define ABSL_INTERNAL_CHECK(condition, message) \ @@ -91,16 +90,16 @@ } \ } while (0) -#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo -#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning -#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError -#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal -#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ +#define ABSL_RAW_LOG_INTERNAL_INFO ::absl::LogSeverity::kInfo +#define ABSL_RAW_LOG_INTERNAL_WARNING ::absl::LogSeverity::kWarning +#define ABSL_RAW_LOG_INTERNAL_ERROR ::absl::LogSeverity::kError +#define ABSL_RAW_LOG_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOG_INTERNAL_LEVEL(severity) \ ::absl::NormalizeLogSeverity(severity) namespace absl { ABSL_NAMESPACE_BEGIN -namespace raw_logging_internal { +namespace raw_log_internal { // Helper function to implement ABSL_RAW_LOG // Logs format... at "severity" level, reporting it @@ -109,12 +108,9 @@ namespace raw_logging_internal { void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); -// Writes the provided buffer directly to stderr, in a safe, low-level manner. -// -// In POSIX this means calling write(), which is async-signal safe and does -// not malloc. If the platform supports the SYS_write syscall, we invoke that -// directly to side-step any libc interception. -void SafeWriteToStderr(const char *s, size_t len); +// Writes the provided buffer directly to stderr, in a signal-safe, low-level +// manner. +void AsyncSignalSafeWriteToStderr(const char* s, size_t len); // compile-time function to get the "base" filename, that is, the part of // a filename after the last "/" or "\" path separator. The search starts at @@ -133,7 +129,7 @@ constexpr const char* Basename(const char* fname, int offset) { // TODO(gfalcon): Come up with a better name for this method. bool RawLoggingFullySupported(); -// Function type for a raw_logging customization hook for suppressing messages +// Function type for a raw_log customization hook for suppressing messages // by severity, and for writing custom prefixes on non-suppressed messages. // // The installed hook is called for every raw log invocation. The message will @@ -142,19 +138,20 @@ bool RawLoggingFullySupported(); // also provided with an output buffer, where it can write a custom log message // prefix. // -// The raw_logging system does not allocate memory or grab locks. User-provided +// The raw_log system does not allocate memory or grab locks. User-provided // hooks must avoid these operations, and must not throw exceptions. // // 'severity' is the severity level of the message being written. // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro // was located. -// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the -// hook writes a prefix, it must increment *buffer and decrement *buf_size +// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the +// hook writes a prefix, it must increment *buf and decrement *buf_size // accordingly. -using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, - int line, char** buffer, int* buf_size); +using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, + const char* file, int line, char** buf, + int* buf_size); -// Function type for a raw_logging customization hook called to abort a process +// Function type for a raw_log customization hook called to abort a process // when a FATAL message is logged. If the provided AbortHook() returns, the // logging system will call abort(). // @@ -162,7 +159,10 @@ using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, // was located. // The NUL-terminated logged message lives in the buffer between 'buf_start' // and 'buf_end'. 'prefix_end' points to the first non-prefix character of the -// buffer (as written by the LogPrefixHook.) +// buffer (as written by the LogFilterAndPrefixHook.) +// +// The lifetime of the filename and message buffers will not end while the +// process remains alive. using AbortHook = void (*)(const char* file, int line, const char* buf_start, const char* prefix_end, const char* buf_end); @@ -184,11 +184,11 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< // // These functions are safe to call at any point during initialization; they do // not block or malloc, and are async-signal safe. -void RegisterLogPrefixHook(LogPrefixHook func); +void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func); void RegisterAbortHook(AbortHook func); void RegisterInternalLogFunction(InternalLogFunction func); -} // namespace raw_logging_internal +} // namespace raw_log_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock.cc index 35c0696a34..381b913b29 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock.cc @@ -19,6 +19,7 @@ #include #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/spinlock_wait.h" @@ -66,12 +67,14 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, submit_profile_data.Store(fn); } +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL // Static member variable definitions. constexpr uint32_t SpinLock::kSpinLockHeld; constexpr uint32_t SpinLock::kSpinLockCooperative; constexpr uint32_t SpinLock::kSpinLockDisabledScheduling; constexpr uint32_t SpinLock::kSpinLockSleeper; constexpr uint32_t SpinLock::kWaitTimeMask; +#endif // Uncommon constructors. SpinLock::SpinLock(base_internal::SchedulingMode mode) @@ -175,7 +178,7 @@ void SpinLock::SlowUnlock(uint32_t lock_value) { // reserve a unitary wait time to represent that a waiter exists without our // own acquisition having been contended. if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) { - const uint64_t wait_cycles = DecodeWaitCycles(lock_value); + const int64_t wait_cycles = DecodeWaitCycles(lock_value); ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); submit_profile_data(this, wait_cycles); ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); @@ -217,9 +220,9 @@ uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, return clamped; } -uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { +int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { // Cast to uint32_t first to ensure bits [63:32] are cleared. - const uint64_t scaled_wait_time = + const int64_t scaled_wait_time = static_cast(lock_value & kWaitTimeMask); return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock.h index c73b5e0967..09ba5824b1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock.h @@ -16,21 +16,21 @@ // Most users requiring mutual exclusion should use Mutex. // SpinLock is provided for use in two situations: -// - for use in code that Mutex itself depends on +// - for use by Abseil internal code that Mutex itself depends on // - for async signal safety (see below) // SpinLock is async signal safe. If a spinlock is used within a signal // handler, all code that acquires the lock must ensure that the signal cannot // arrive while they are holding the lock. Typically, this is done by blocking // the signal. +// +// Threads waiting on a SpinLock may be woken in an arbitrary order. #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ #define ABSL_BASE_INTERNAL_SPINLOCK_H_ -#include -#include - #include +#include #include "absl/base/attributes.h" #include "absl/base/const_init.h" @@ -39,8 +39,6 @@ #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/tsan_mutex_interface.h" -#include "absl/base/macros.h" -#include "absl/base/port.h" #include "absl/base/thread_annotations.h" namespace absl { @@ -118,6 +116,14 @@ class ABSL_LOCKABLE SpinLock { return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; } + // Return immediately if this thread holds the SpinLock exclusively. + // Otherwise, report an error by crashing with a diagnostic. + inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { + if (!IsHeld()) { + ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); + } + } + protected: // These should not be exported except for testing. @@ -127,7 +133,7 @@ class ABSL_LOCKABLE SpinLock { int64_t wait_end_time); // Extract number of wait cycles in a lock value. - static uint64_t DecodeWaitCycles(uint32_t lock_value); + static int64_t DecodeWaitCycles(uint32_t lock_value); // Provide access to protected method above. Use for testing only. friend struct SpinLockTest; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_linux.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_linux.inc index 202f7cdfc8..fe8ba674f5 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_linux.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_linux.inc @@ -57,13 +57,10 @@ static_assert(sizeof(std::atomic) == sizeof(int), extern "C" { ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( - std::atomic *w, uint32_t value, int loop, + std::atomic *w, uint32_t value, int, absl::base_internal::SchedulingMode) { absl::base_internal::ErrnoSaver errno_saver; - struct timespec tm; - tm.tv_sec = 0; - tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); - syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr); } ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_wait.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_wait.h index 579bd09fa0..9a1adcda5e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_wait.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_wait.h @@ -39,6 +39,8 @@ struct SpinLockWaitTransition { // satisfying 0<=i *w, int n, const SpinLockWaitTransition trans[], SchedulingMode scheduling_mode); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_win32.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_win32.inc index 9d224813a5..934c2016fb 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_win32.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/spinlock_win32.inc @@ -27,7 +27,10 @@ void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( } else if (loop == 1) { Sleep(0); } else { - Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000); + // SpinLockSuggestedDelayNS() always returns a positive integer, so this + // static_cast is safe. + Sleep(static_cast( + absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000)); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/strerror.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/strerror.cc index 0d6226fd0a..de91c05e07 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/strerror.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/strerror.cc @@ -66,8 +66,8 @@ constexpr int kSysNerr = 135; std::array* NewStrErrorTable() { auto* table = new std::array; - for (int i = 0; i < static_cast(table->size()); ++i) { - (*table)[i] = StrErrorInternal(i); + for (size_t i = 0; i < table->size(); ++i) { + (*table)[i] = StrErrorInternal(static_cast(i)); } return table; } @@ -77,8 +77,8 @@ std::array* NewStrErrorTable() { std::string StrError(int errnum) { absl::base_internal::ErrnoSaver errno_saver; static const auto* table = NewStrErrorTable(); - if (errnum >= 0 && errnum < static_cast(table->size())) { - return (*table)[errnum]; + if (errnum >= 0 && static_cast(errnum) < table->size()) { + return (*table)[static_cast(errnum)]; } return StrErrorInternal(errnum); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/sysinfo.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/sysinfo.cc index 08a1e28894..da499d3a77 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/sysinfo.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/sysinfo.cc @@ -117,25 +117,26 @@ int Win32NumCPUs() { } } free(info); - return logicalProcessorCount; + return static_cast(logicalProcessorCount); } #endif } // namespace - static int GetNumCPUs() { #if defined(__myriad2__) return 1; #elif defined(_WIN32) - const unsigned hardware_concurrency = Win32NumCPUs(); + const int hardware_concurrency = Win32NumCPUs(); return hardware_concurrency ? hardware_concurrency : 1; +#elif defined(_AIX) + return sysconf(_SC_NPROCESSORS_ONLN); #else // Other possibilities: // - Read /sys/devices/system/cpu/online and use cpumask_parse() // - sysconf(_SC_NPROCESSORS_ONLN) - return std::thread::hardware_concurrency(); + return static_cast(std::thread::hardware_concurrency()); #endif } @@ -188,12 +189,15 @@ static double GetNominalCPUFrequency() { // and the memory location pointed to by value is set to the value read. static bool ReadLongFromFile(const char *file, long *value) { bool ret = false; - int fd = open(file, O_RDONLY); + int fd = open(file, O_RDONLY | O_CLOEXEC); if (fd != -1) { char line[1024]; char *err; memset(line, '\0', sizeof(line)); - int len = read(fd, line, sizeof(line) - 1); + ssize_t len; + do { + len = read(fd, line, sizeof(line) - 1); + } while (len < 0 && errno == EINTR); if (len <= 0) { ret = false; } else { @@ -375,7 +379,7 @@ pid_t GetTID() { #endif pid_t GetTID() { - return syscall(SYS_gettid); + return static_cast(syscall(SYS_gettid)); } #elif defined(__akaros__) @@ -428,11 +432,11 @@ static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. // Returns the TID to tid_array. static void FreeTID(void *v) { intptr_t tid = reinterpret_cast(v); - int word = tid / kBitsPerWord; + intptr_t word = tid / kBitsPerWord; uint32_t mask = ~(1u << (tid % kBitsPerWord)); absl::base_internal::SpinLockHolder lock(&tid_lock); assert(0 <= word && static_cast(word) < tid_array->size()); - (*tid_array)[word] &= mask; + (*tid_array)[static_cast(word)] &= mask; } static void InitGetTID() { @@ -454,7 +458,7 @@ pid_t GetTID() { intptr_t tid = reinterpret_cast(pthread_getspecific(tid_key)); if (tid != 0) { - return tid; + return static_cast(tid); } int bit; // tid_array[word] = 1u << bit; @@ -475,7 +479,8 @@ pid_t GetTID() { while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { ++bit; } - tid = (word * kBitsPerWord) + bit; + tid = + static_cast((word * kBitsPerWord) + static_cast(bit)); (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/sysinfo_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/sysinfo_test.cc index 5f9e45f6aa..f305b6c532 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/sysinfo_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/sysinfo_test.cc @@ -37,29 +37,6 @@ TEST(SysinfoTest, NumCPUs) { << "NumCPUs() should not have the default value of 0"; } -// Ensure that NominalCPUFrequency returns a reasonable value, or 1.00 on -// platforms where the CPU frequency is not available through sysfs. -// -// POWER is particularly problematic here; some Linux kernels expose the CPU -// frequency, while others do not. Since we can't predict a priori what a given -// machine is going to do, just disable this test on POWER on Linux. -#if !(defined(__linux) && (defined(__ppc64__) || defined(__PPC64__))) -TEST(SysinfoTest, NominalCPUFrequency) { - // Linux only exposes the CPU frequency on certain architectures, and - // Emscripten doesn't expose it at all. -#if defined(__linux__) && \ - (defined(__aarch64__) || defined(__hppa__) || defined(__mips__) || \ - defined(__riscv) || defined(__s390x__)) || \ - defined(__EMSCRIPTEN__) - EXPECT_EQ(NominalCPUFrequency(), 1.0) - << "CPU frequency detection was fixed! Please update unittest."; -#else - EXPECT_GE(NominalCPUFrequency(), 1000.0) - << "NominalCPUFrequency() did not return a reasonable value"; -#endif -} -#endif - TEST(SysinfoTest, GetTID) { EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. #ifdef __native_client__ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/thread_annotations.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/thread_annotations.h index 4dab6a9c15..8c5c67e0df 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/thread_annotations.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/thread_annotations.h @@ -38,6 +38,13 @@ #ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ #define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ +// ABSL_LEGACY_THREAD_ANNOTATIONS is a *temporary* compatibility macro that can +// be defined on the compile command-line to restore the legacy spellings of the +// thread annotations macros/functions. The macros in this file are available +// under ABSL_ prefixed spellings in absl/base/thread_annotations.h. This macro +// and the legacy spellings will be removed in the future. +#ifdef ABSL_LEGACY_THREAD_ANNOTATIONS + #if defined(__clang__) #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #else @@ -268,4 +275,6 @@ inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { } // namespace thread_safety_analysis +#endif // defined(ABSL_LEGACY_THREAD_ANNOTATIONS) + #endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/thread_identity.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/thread_identity.cc index 9950e63a79..79853f09f5 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/thread_identity.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/thread_identity.cc @@ -14,7 +14,7 @@ #include "absl/base/internal/thread_identity.h" -#ifndef _WIN32 +#if !defined(_WIN32) || defined(__MINGW32__) #include #include #endif @@ -56,6 +56,7 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { // *different* instances of this ptr. // Apple platforms have the visibility attribute, but issue a compile warning // that protected visibility is unsupported. +ABSL_CONST_INIT // Must come before __attribute__((visibility("protected"))) #if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) __attribute__((visibility("protected"))) #endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc index fc07e30057..b1c396c69c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc @@ -24,8 +24,13 @@ #ifdef __GLIBC__ #include #elif defined(__FreeBSD__) -#include +// clang-format off +// This order does actually matter =(. #include +#include +// clang-format on + +#include "absl/base/call_once.h" #endif #endif @@ -49,12 +54,6 @@ double UnscaledCycleClock::Frequency() { #elif defined(__x86_64__) -int64_t UnscaledCycleClock::Now() { - uint64_t low, high; - __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; -} - double UnscaledCycleClock::Frequency() { return base_internal::NominalCPUFrequency(); } @@ -87,6 +86,10 @@ int64_t UnscaledCycleClock::Now() { double UnscaledCycleClock::Frequency() { #ifdef __GLIBC__ return __ppc_get_timebase_freq(); +#elif defined(_AIX) + // This is the same constant value as returned by + // __ppc_get_timebase_freq(). + return static_cast(512000000); #elif defined(__FreeBSD__) static once_flag init_timebase_frequency_once; static double timebase_frequency = 0.0; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.h index 681ff8f996..cc1276ba08 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.h @@ -42,49 +42,11 @@ #include #endif -#include "absl/base/port.h" - -// The following platforms have an implementation of a hardware counter. -#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ - defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \ - defined(_M_IX86) || defined(_M_X64) -#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 -#else -#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 -#endif - -// The following platforms often disable access to the hardware -// counter (through a sandbox) even if the underlying hardware has a -// usable counter. The CycleTimer interface also requires a *scaled* -// CycleClock that runs at atleast 1 MHz. We've found some Android -// ARM64 devices where this is not the case, so we disable it by -// default on Android ARM64. -#if defined(__native_client__) || \ - (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \ - (defined(__ANDROID__) && defined(__aarch64__)) -#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 -#else -#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 -#endif - -// UnscaledCycleClock is an optional internal feature. -// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. -// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 -#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) -#define ABSL_USE_UNSCALED_CYCLECLOCK \ - (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ - ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) -#endif +#include "absl/base/config.h" +#include "absl/base/internal/unscaledcycleclock_config.h" #if ABSL_USE_UNSCALED_CYCLECLOCK -// This macro can be used to test if UnscaledCycleClock::Frequency() -// is NominalCPUFrequency() on a particular platform. -#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \ - defined(_M_IX86) || defined(_M_X64)) -#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace time_internal { @@ -115,6 +77,16 @@ class UnscaledCycleClock { friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; }; +#if defined(__x86_64__) + +inline int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return static_cast((high << 32) | low); +} + +#endif + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h new file mode 100644 index 0000000000..24b324ac99 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h @@ -0,0 +1,62 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ + +#if defined(__APPLE__) +#include +#endif + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \ + defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC)) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || (defined(__APPLE__)) || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \ + defined(_M_IX86) || defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif +#endif + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/invoke_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/invoke_test.cc index bcdef36c3b..7be26f649f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/invoke_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/invoke_test.cc @@ -31,6 +31,14 @@ namespace { int Function(int a, int b) { return a - b; } +void VoidFunction(int& a, int& b) { + a += b; + b = a - b; + a -= b; +} + +int ZeroArgFunction() { return -1937; } + int Sink(std::unique_ptr p) { return *p; } @@ -223,6 +231,100 @@ TEST(InvokeTest, SfinaeFriendly) { EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42)); } +TEST(IsInvocableRTest, CallableExactMatch) { + static_assert( + base_internal::is_invocable_r::value, + "Should be true for exact match of types on a free function"); +} + +TEST(IsInvocableRTest, CallableArgumentConversionMatch) { + static_assert( + base_internal::is_invocable_r::value, + "Should be true for convertible argument type"); +} + +TEST(IsInvocableRTest, CallableReturnConversionMatch) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for convertible return type"); +} + +TEST(IsInvocableRTest, CallableReturnVoid) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for void expected and actual return types"); + static_assert( + base_internal::is_invocable_r::value, + "Should be true for void expected and non-void actual return types"); +} + +TEST(IsInvocableRTest, CallableRefQualifierMismatch) { + static_assert(!base_internal::is_invocable_r::value, + "Should be false for reference constness mismatch"); + static_assert(!base_internal::is_invocable_r::value, + "Should be false for reference value category mismatch"); +} + +TEST(IsInvocableRTest, CallableArgumentTypeMismatch) { + static_assert(!base_internal::is_invocable_r::value, + "Should be false for argument type mismatch"); +} + +TEST(IsInvocableRTest, CallableReturnTypeMismatch) { + static_assert(!base_internal::is_invocable_r::value, + "Should be false for return type mismatch"); +} + +TEST(IsInvocableRTest, CallableTooFewArgs) { + static_assert( + !base_internal::is_invocable_r::value, + "Should be false for too few arguments"); +} + +TEST(IsInvocableRTest, CallableTooManyArgs) { + static_assert(!base_internal::is_invocable_r::value, + "Should be false for too many arguments"); +} + +TEST(IsInvocableRTest, MemberFunctionAndReference) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for exact match of types on a member function " + "and class reference"); +} + +TEST(IsInvocableRTest, MemberFunctionAndPointer) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for exact match of types on a member function " + "and class pointer"); +} + +TEST(IsInvocableRTest, DataMemberAndReference) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for exact match of types on a data member and " + "class reference"); +} + +TEST(IsInvocableRTest, DataMemberAndPointer) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for exact match of types on a data member and " + "class pointer"); +} + +TEST(IsInvocableRTest, CallableZeroArgs) { + static_assert( + base_internal::is_invocable_r::value, + "Should be true for exact match for a zero-arg free function"); +} + } // namespace } // namespace base_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity.cc index 72312afd36..60a8fc1f89 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity.cc @@ -16,6 +16,8 @@ #include +#include "absl/base/attributes.h" + namespace absl { ABSL_NAMESPACE_BEGIN @@ -23,5 +25,31 @@ std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) { if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s); return os << "absl::LogSeverity(" << static_cast(s) << ")"; } + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) { + switch (s) { + case absl::LogSeverityAtLeast::kInfo: + case absl::LogSeverityAtLeast::kWarning: + case absl::LogSeverityAtLeast::kError: + case absl::LogSeverityAtLeast::kFatal: + return os << ">=" << static_cast(s); + case absl::LogSeverityAtLeast::kInfinity: + return os << "INFINITY"; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) { + switch (s) { + case absl::LogSeverityAtMost::kInfo: + case absl::LogSeverityAtMost::kWarning: + case absl::LogSeverityAtMost::kError: + case absl::LogSeverityAtMost::kFatal: + return os << "<=" << static_cast(s); + case absl::LogSeverityAtMost::kNegativeInfinity: + return os << "NEGATIVE_INFINITY"; + } + return os; +} ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity.h index 2236422462..8bdca38b5f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity.h @@ -115,6 +115,57 @@ constexpr absl::LogSeverity NormalizeLogSeverity(int s) { // unspecified; do not rely on it. std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); +// Enums representing a lower bound for LogSeverity. APIs that only operate on +// messages of at least a certain level (for example, `SetMinLogLevel()`) use +// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is +// a level above all threshold levels and therefore no log message will +// ever meet this threshold. +enum class LogSeverityAtLeast : int { + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), + kInfinity = 1000, +}; + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); + +// Enums representing an upper bound for LogSeverity. APIs that only operate on +// messages of at most a certain level (for example, buffer all messages at or +// below a certain level) use this type to specify that level. +// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold +// levels and therefore will exclude all log messages. +enum class LogSeverityAtMost : int { + kNegativeInfinity = -1000, + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), +}; + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); + +#define COMPOP(op1, op2, T) \ + constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \ + return static_cast(lhs) op1 rhs; \ + } \ + constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \ + return lhs op2 static_cast(rhs); \ + } + +// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ +// `LogSeverityAtMost` are only supported in one direction. +// Valid checks are: +// LogSeverity >= LogSeverityAtLeast +// LogSeverity < LogSeverityAtLeast +// LogSeverity <= LogSeverityAtMost +// LogSeverity > LogSeverityAtMost +COMPOP(>, <, LogSeverityAtLeast) +COMPOP(<=, >=, LogSeverityAtLeast) +COMPOP(<, >, LogSeverityAtMost) +COMPOP(>=, <=, LogSeverityAtMost) +#undef COMPOP + ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity_test.cc index 55b26d1774..16091a5bea 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/log_severity_test.cc @@ -35,7 +35,8 @@ using ::testing::IsTrue; using ::testing::TestWithParam; using ::testing::Values; -std::string StreamHelper(absl::LogSeverity value) { +template +std::string StreamHelper(T value) { std::ostringstream stream; stream << value; return stream.str(); @@ -201,4 +202,44 @@ TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) { IsTrue()); EXPECT_THAT(reparsed_value, Eq(to_unparse)); } + +TEST(LogThresholdTest, LogSeverityAtLeastTest) { + EXPECT_LT(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kFatal); + EXPECT_GT(absl::LogSeverityAtLeast::kError, absl::LogSeverity::kInfo); + + EXPECT_LE(absl::LogSeverityAtLeast::kInfo, absl::LogSeverity::kError); + EXPECT_GE(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kInfo); +} + +TEST(LogThresholdTest, LogSeverityAtMostTest) { + EXPECT_GT(absl::LogSeverity::kError, absl::LogSeverityAtMost::kWarning); + EXPECT_LT(absl::LogSeverityAtMost::kError, absl::LogSeverity::kFatal); + + EXPECT_GE(absl::LogSeverityAtMost::kFatal, absl::LogSeverity::kError); + EXPECT_LE(absl::LogSeverity::kWarning, absl::LogSeverityAtMost::kError); +} + +TEST(LogThresholdTest, Extremes) { + EXPECT_LT(absl::LogSeverity::kFatal, absl::LogSeverityAtLeast::kInfinity); + EXPECT_GT(absl::LogSeverity::kInfo, + absl::LogSeverityAtMost::kNegativeInfinity); +} + +TEST(LogThresholdTest, Output) { + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfo), Eq(">=INFO")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kWarning), + Eq(">=WARNING")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kError), Eq(">=ERROR")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kFatal), Eq(">=FATAL")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfinity), + Eq("INFINITY")); + + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kInfo), Eq("<=INFO")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kWarning), Eq("<=WARNING")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kError), Eq("<=ERROR")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kFatal), Eq("<=FATAL")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kNegativeInfinity), + Eq("NEGATIVE_INFINITY")); +} + } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/optimization.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/optimization.h index d090be1286..d706100cca 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/optimization.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/optimization.h @@ -91,6 +91,7 @@ #define ABSL_CACHELINE_SIZE 64 #endif #endif +#endif #ifndef ABSL_CACHELINE_SIZE // A reasonable default guess. Note that overestimates tend to waste more @@ -141,12 +142,11 @@ // the generated machine code. // 3) Prefer applying this attribute to individual variables. Avoid // applying it to types. This tends to localize the effect. +#if defined(__clang__) || defined(__GNUC__) #define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE))) #elif defined(_MSC_VER) -#define ABSL_CACHELINE_SIZE 64 #define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE)) #else -#define ABSL_CACHELINE_SIZE 64 #define ABSL_CACHELINE_ALIGNED #endif @@ -181,35 +181,43 @@ #define ABSL_PREDICT_TRUE(x) (x) #endif -// ABSL_INTERNAL_ASSUME(cond) +// ABSL_ASSUME(cond) +// // Informs the compiler that a condition is always true and that it can assume -// it to be true for optimization purposes. The call has undefined behavior if -// the condition is false. +// it to be true for optimization purposes. +// +// WARNING: If the condition is false, the program can produce undefined and +// potentially dangerous behavior. +// // In !NDEBUG mode, the condition is checked with an assert(). -// NOTE: The expression must not have side effects, as it will only be evaluated -// in some compilation modes and not others. +// +// NOTE: The expression must not have side effects, as it may only be evaluated +// in some compilation modes and not others. Some compilers may issue a warning +// if the compiler cannot prove the expression has no side effects. For example, +// the expression should not use a function call since the compiler cannot prove +// that a function call does not have side effects. // // Example: // // int x = ...; -// ABSL_INTERNAL_ASSUME(x >= 0); +// ABSL_ASSUME(x >= 0); // // The compiler can optimize the division to a simple right shift using the // // assumption specified above. // int y = x / 16; // #if !defined(NDEBUG) -#define ABSL_INTERNAL_ASSUME(cond) assert(cond) +#define ABSL_ASSUME(cond) assert(cond) #elif ABSL_HAVE_BUILTIN(__builtin_assume) -#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond) +#define ABSL_ASSUME(cond) __builtin_assume(cond) #elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) -#define ABSL_INTERNAL_ASSUME(cond) \ +#define ABSL_ASSUME(cond) \ do { \ if (!(cond)) __builtin_unreachable(); \ } while (0) #elif defined(_MSC_VER) -#define ABSL_INTERNAL_ASSUME(cond) __assume(cond) +#define ABSL_ASSUME(cond) __assume(cond) #else -#define ABSL_INTERNAL_ASSUME(cond) \ +#define ABSL_ASSUME(cond) \ do { \ static_cast(false && (cond)); \ } while (0) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/options.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/options.h index 230bf1eecc..5c162a3891 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/options.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/options.h @@ -67,12 +67,6 @@ #ifndef ABSL_BASE_OPTIONS_H_ #define ABSL_BASE_OPTIONS_H_ -// Include a standard library header to allow configuration based on the -// standard library in use. -#ifdef __cplusplus -#include -#endif - // ----------------------------------------------------------------------------- // Type Compatibility Options // ----------------------------------------------------------------------------- diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/policy_checks.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/policy_checks.h index 06b3243916..2626fb6a36 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/policy_checks.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/policy_checks.h @@ -44,17 +44,17 @@ // Toolchain Check // ----------------------------------------------------------------------------- -// We support MSVC++ 14.0 update 2 and later. +// We support Visual Studio 2017 (MSVC++ 15.0) and later. // This minimum will go up. -#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__) -#error "This package requires Visual Studio 2015 Update 2 or higher." +#if defined(_MSC_VER) && _MSC_VER < 1910 && !defined(__clang__) +#error "This package requires Visual Studio 2017 (MSVC++ 15.0) or higher." #endif -// We support gcc 4.7 and later. +// We support gcc 5 and later. // This minimum will go up. #if defined(__GNUC__) && !defined(__clang__) -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) -#error "This package requires gcc 4.7 or higher." +#if __GNUC__ < 5 +#error "This package requires gcc 5 or higher." #endif #endif @@ -69,13 +69,15 @@ // C++ Version Check // ----------------------------------------------------------------------------- -// Enforce C++11 as the minimum. Note that Visual Studio has not -// advanced __cplusplus despite being good enough for our purposes, so -// so we exempt it from the check. -#if defined(__cplusplus) && !defined(_MSC_VER) -#if __cplusplus < 201103L -#error "C++ versions less than C++11 are not supported." -#endif +// Enforce C++14 as the minimum. +#if defined(_MSVC_LANG) +#if _MSVC_LANG < 201402L +#error "C++ versions less than C++14 are not supported." +#endif // _MSVC_LANG < 201402L +#elif defined(__cplusplus) +#if __cplusplus < 201402L +#error "C++ versions less than C++14 are not supported." +#endif // __cplusplus < 201402L #endif // ----------------------------------------------------------------------------- diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/spinlock_test_common.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/spinlock_test_common.cc index 2b572c5b3f..52ecf58012 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/spinlock_test_common.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/spinlock_test_common.cc @@ -34,7 +34,7 @@ #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/notification.h" -constexpr int32_t kNumThreads = 10; +constexpr uint32_t kNumThreads = 10; constexpr int32_t kIters = 1000; namespace absl { @@ -48,14 +48,14 @@ struct SpinLockTest { int64_t wait_end_time) { return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time); } - static uint64_t DecodeWaitCycles(uint32_t lock_value) { + static int64_t DecodeWaitCycles(uint32_t lock_value) { return SpinLock::DecodeWaitCycles(lock_value); } }; namespace { -static constexpr int kArrayLength = 10; +static constexpr size_t kArrayLength = 10; static uint32_t values[kArrayLength]; ABSL_CONST_INIT static SpinLock static_cooperative_spinlock( @@ -79,11 +79,11 @@ static uint32_t Hash32(uint32_t a, uint32_t c) { return c; } -static void TestFunction(int thread_salt, SpinLock* spinlock) { +static void TestFunction(uint32_t thread_salt, SpinLock* spinlock) { for (int i = 0; i < kIters; i++) { SpinLockHolder h(spinlock); - for (int j = 0; j < kArrayLength; j++) { - const int index = (j + thread_salt) % kArrayLength; + for (size_t j = 0; j < kArrayLength; j++) { + const size_t index = (j + thread_salt) % kArrayLength; values[index] = Hash32(values[index], thread_salt); std::this_thread::yield(); } @@ -93,7 +93,7 @@ static void TestFunction(int thread_salt, SpinLock* spinlock) { static void ThreadedTest(SpinLock* spinlock) { std::vector threads; threads.reserve(kNumThreads); - for (int i = 0; i < kNumThreads; ++i) { + for (uint32_t i = 0; i < kNumThreads; ++i) { threads.push_back(std::thread(TestFunction, i, spinlock)); } for (auto& thread : threads) { @@ -101,7 +101,7 @@ static void ThreadedTest(SpinLock* spinlock) { } SpinLockHolder h(spinlock); - for (int i = 1; i < kArrayLength; i++) { + for (size_t i = 1; i < kArrayLength; i++) { EXPECT_EQ(values[0], values[i]); } } @@ -133,28 +133,28 @@ TEST(SpinLock, WaitCyclesEncoding) { // but the lower kProfileTimestampShift will be dropped. const int kMaxCyclesShift = 32 - kLockwordReservedShift + kProfileTimestampShift; - const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; + const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; // These bits should be zero after encoding. const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1; // These bits are dropped when wait cycles are encoded. - const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; + const int64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; // Test a bunch of random values std::default_random_engine generator; // Shift to avoid overflow below. - std::uniform_int_distribution time_distribution( - 0, std::numeric_limits::max() >> 4); - std::uniform_int_distribution cycle_distribution(0, kMaxCycles); + std::uniform_int_distribution time_distribution( + 0, std::numeric_limits::max() >> 3); + std::uniform_int_distribution cycle_distribution(0, kMaxCycles); for (int i = 0; i < 100; i++) { int64_t start_time = time_distribution(generator); int64_t cycles = cycle_distribution(generator); int64_t end_time = start_time + cycles; uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time); - EXPECT_EQ(0, lock_value & kLockwordReservedMask); - uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); + EXPECT_EQ(0u, lock_value & kLockwordReservedMask); + int64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); EXPECT_EQ(0, decoded & kProfileTimestampMask); EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded); } @@ -178,21 +178,21 @@ TEST(SpinLock, WaitCyclesEncoding) { // Test clamping uint32_t max_value = SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); - uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); - uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; + int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); + int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; EXPECT_EQ(expected_max_value_decoded, max_value_decoded); const int64_t step = (1 << kProfileTimestampShift); uint32_t after_max_value = SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step); - uint64_t after_max_value_decoded = + int64_t after_max_value_decoded = SpinLockTest::DecodeWaitCycles(after_max_value); EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded); uint32_t before_max_value = SpinLockTest::EncodeWaitCycles( start_time, start_time + kMaxCycles - step); - uint64_t before_max_value_decoded = - SpinLockTest::DecodeWaitCycles(before_max_value); + int64_t before_max_value_decoded = + SpinLockTest::DecodeWaitCycles(before_max_value); EXPECT_GT(expected_max_value_decoded, before_max_value_decoded); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/thread_annotations.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/thread_annotations.h index 9695f6de67..bc8a620347 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/thread_annotations.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/thread_annotations.h @@ -154,8 +154,8 @@ // ABSL_LOCKS_EXCLUDED() // -// Documents the locks acquired in the body of the function. These locks -// cannot be held when calling this function (as Abseil's `Mutex` locks are +// Documents the locks that cannot be held by callers of this function, as they +// might be acquired by this function (Abseil's `Mutex` locks are // non-reentrant). #if ABSL_HAVE_ATTRIBUTE(locks_excluded) #define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/throw_delegate_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/throw_delegate_test.cc index 5ba4ce55e6..e74362b701 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/throw_delegate_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/throw_delegate_test.cc @@ -78,29 +78,97 @@ void ExpectThrowNoWhat(void (*f)()) { #endif } -TEST(ThrowHelper, Test) { - // Not using EXPECT_THROW because we want to check the .what() message too. +TEST(ThrowDelegate, ThrowStdLogicErrorChar) { ExpectThrowChar(ThrowStdLogicError); +} + +TEST(ThrowDelegate, ThrowStdInvalidArgumentChar) { ExpectThrowChar(ThrowStdInvalidArgument); +} + +TEST(ThrowDelegate, ThrowStdDomainErrorChar) { ExpectThrowChar(ThrowStdDomainError); +} + +TEST(ThrowDelegate, ThrowStdLengthErrorChar) { ExpectThrowChar(ThrowStdLengthError); +} + +TEST(ThrowDelegate, ThrowStdOutOfRangeChar) { ExpectThrowChar(ThrowStdOutOfRange); +} + +TEST(ThrowDelegate, ThrowStdRuntimeErrorChar) { ExpectThrowChar(ThrowStdRuntimeError); +} + +TEST(ThrowDelegate, ThrowStdRangeErrorChar) { ExpectThrowChar(ThrowStdRangeError); +} + +TEST(ThrowDelegate, ThrowStdOverflowErrorChar) { ExpectThrowChar(ThrowStdOverflowError); +} + +TEST(ThrowDelegate, ThrowStdUnderflowErrorChar) { ExpectThrowChar(ThrowStdUnderflowError); +} +TEST(ThrowDelegate, ThrowStdLogicErrorString) { ExpectThrowString(ThrowStdLogicError); - ExpectThrowString(ThrowStdInvalidArgument); - ExpectThrowString(ThrowStdDomainError); - ExpectThrowString(ThrowStdLengthError); - ExpectThrowString(ThrowStdOutOfRange); - ExpectThrowString(ThrowStdRuntimeError); - ExpectThrowString(ThrowStdRangeError); - ExpectThrowString(ThrowStdOverflowError); - ExpectThrowString(ThrowStdUnderflowError); +} - ExpectThrowNoWhat(ThrowStdBadFunctionCall); +TEST(ThrowDelegate, ThrowStdInvalidArgumentString) { + ExpectThrowString(ThrowStdInvalidArgument); +} + +TEST(ThrowDelegate, ThrowStdDomainErrorString) { + ExpectThrowString(ThrowStdDomainError); +} + +TEST(ThrowDelegate, ThrowStdLengthErrorString) { + ExpectThrowString(ThrowStdLengthError); +} + +TEST(ThrowDelegate, ThrowStdOutOfRangeString) { + ExpectThrowString(ThrowStdOutOfRange); +} + +TEST(ThrowDelegate, ThrowStdRuntimeErrorString) { + ExpectThrowString(ThrowStdRuntimeError); +} + +TEST(ThrowDelegate, ThrowStdRangeErrorString) { + ExpectThrowString(ThrowStdRangeError); +} + +TEST(ThrowDelegate, ThrowStdOverflowErrorString) { + ExpectThrowString(ThrowStdOverflowError); +} + +TEST(ThrowDelegate, ThrowStdUnderflowErrorString) { + ExpectThrowString(ThrowStdUnderflowError); +} + +TEST(ThrowDelegate, ThrowStdBadFunctionCallNoWhat) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + ThrowStdBadFunctionCall(); + FAIL() << "Didn't throw"; + } catch (const std::bad_function_call&) { + } +#ifdef _LIBCPP_VERSION + catch (const std::exception&) { + // https://reviews.llvm.org/D92397 causes issues with the vtable for + // std::bad_function_call when using libc++ as a shared library. + } +#endif +#else + EXPECT_DEATH_IF_SUPPORTED(ThrowStdBadFunctionCall(), ""); +#endif +} + +TEST(ThrowDelegate, ThrowStdBadAllocNoWhat) { ExpectThrowNoWhat(ThrowStdBadAlloc); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/cleanup/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/cleanup/CMakeLists.txt index 26a6d0dce3..f5af40b44c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/cleanup/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/cleanup/CMakeLists.txt @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Internal-only target, do not depend on directly. absl_cc_library( NAME cleanup_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/CMakeLists.txt index 9b8a7509ad..b3776aed96 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/CMakeLists.txt @@ -28,6 +28,7 @@ absl_cc_library( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::container_common + absl::common_policy_traits absl::compare absl::compressed_tuple absl::container_memory @@ -35,12 +36,14 @@ absl_cc_library( absl::core_headers absl::layout absl::memory + absl::raw_logging_internal absl::strings absl::throw_delegate absl::type_traits absl::utility ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME btree_test_common @@ -69,6 +72,7 @@ absl_cc_test( LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS + absl::algorithm_container absl::btree absl::btree_test_common absl::compare @@ -76,13 +80,14 @@ absl_cc_test( absl::counting_allocator absl::flags absl::hash_testing + absl::random_random absl::raw_logging_internal absl::strings absl::test_instance_tracker - absl::type_traits GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME compressed_tuple @@ -161,6 +166,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME inlined_vector_internal @@ -190,9 +196,11 @@ absl_cc_library( absl::inlined_vector_internal absl::throw_delegate absl::memory + absl::type_traits PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME counting_allocator @@ -239,6 +247,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME test_instance_tracker @@ -274,6 +283,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory + absl::core_headers absl::hash_function_defaults absl::raw_hash_map absl::algorithm_container @@ -347,8 +357,9 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory + absl::core_headers absl::hash_function_defaults - absl::node_hash_policy + absl::node_slot_policy absl::raw_hash_map absl::algorithm_container absl::memory @@ -381,8 +392,9 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::core_headers absl::hash_function_defaults - absl::node_hash_policy + absl::node_slot_policy absl::raw_hash_set absl::algorithm_container absl::memory @@ -407,6 +419,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME container_memory @@ -436,6 +449,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME hash_function_defaults @@ -468,6 +482,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME hash_generator_testing @@ -485,6 +500,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME hash_policy_testing @@ -510,6 +526,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME hash_policy_traits @@ -518,6 +535,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::common_policy_traits absl::meta PUBLIC ) @@ -534,6 +552,32 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + common_policy_traits + HDRS + "internal/common_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::meta + PUBLIC +) + +absl_cc_test( + NAME + common_policy_traits_test + SRCS + "internal/common_policy_traits_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::common_policy_traits + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. absl_cc_library( NAME hashtablez_sampler @@ -546,8 +590,8 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::base + absl::config absl::exponential_biased - absl::have_sse absl::sample_recorder absl::synchronization ) @@ -560,11 +604,12 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS + absl::config absl::hashtablez_sampler - absl::have_sse GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME hashtable_debug @@ -576,6 +621,7 @@ absl_cc_library( absl::hashtable_debug_hooks ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME hashtable_debug_hooks @@ -588,20 +634,12 @@ absl_cc_library( PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME - have_sse + node_slot_policy HDRS - "internal/have_sse.h" - COPTS - ${ABSL_DEFAULT_COPTS} -) - -absl_cc_library( - NAME - node_hash_policy - HDRS - "internal/node_hash_policy.h" + "internal/node_slot_policy.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -611,17 +649,18 @@ absl_cc_library( absl_cc_test( NAME - node_hash_policy_test + node_slot_policy_test SRCS - "internal/node_hash_policy_test.cc" + "internal/node_slot_policy_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_policy_traits - absl::node_hash_policy + absl::node_slot_policy GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME raw_hash_map @@ -636,6 +675,7 @@ absl_cc_library( PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME container_common @@ -647,6 +687,7 @@ absl_cc_library( absl::type_traits ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME raw_hash_set @@ -666,10 +707,10 @@ absl_cc_library( absl::endian absl::hash_policy_traits absl::hashtable_debug_hooks - absl::have_sse absl::memory absl::meta absl::optional + absl::prefetch absl::utility absl::hashtablez_sampler PUBLIC @@ -684,13 +725,17 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::container_memory + absl::flat_hash_map + absl::flat_hash_set absl::hash_function_defaults absl::hash_policy_testing absl::hashtable_debug absl::raw_hash_set absl::base absl::config + absl::log absl::core_headers + absl::prefetch absl::raw_logging_internal absl::strings GTest::gmock_main @@ -710,6 +755,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME layout @@ -743,6 +789,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME tracked @@ -755,6 +802,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_constructor_test @@ -769,6 +817,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_lookup_test @@ -783,6 +832,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_members_test @@ -796,6 +846,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_modifiers_test @@ -810,6 +861,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_constructor_test @@ -824,6 +876,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_lookup_test @@ -838,6 +891,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_members_test @@ -851,6 +905,7 @@ absl_cc_library( TESTONLY ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_modifiers_test @@ -894,3 +949,18 @@ absl_cc_test( absl::unordered_map_modifiers_test GTest::gmock_main ) + +absl_cc_test( + NAME + sample_element_size_test + SRCS + "sample_element_size_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::flat_hash_map + absl::flat_hash_set + absl::node_hash_map + absl::node_hash_set + GTest::gmock_main +) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_benchmark.cc index 65b6790b71..0d26fd424c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_benchmark.cc @@ -27,6 +27,7 @@ #include #include "benchmark/benchmark.h" +#include "absl/algorithm/container.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" @@ -34,9 +35,10 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/internal/hashtable_debug.h" -#include "absl/flags/flag.h" #include "absl/hash/hash.h" +#include "absl/log/log.h" #include "absl/memory/memory.h" +#include "absl/random/random.h" #include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" @@ -153,9 +155,9 @@ void BM_FullLookup(benchmark::State& state) { BM_LookupImpl(state, true); } -// Benchmark deletion of values from a container. +// Benchmark erasing values from a container. template -void BM_Delete(benchmark::State& state) { +void BM_Erase(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); @@ -180,9 +182,9 @@ void BM_Delete(benchmark::State& state) { } } -// Benchmark deletion of multiple values from a container. +// Benchmark erasing multiple values from a container. template -void BM_DeleteRange(benchmark::State& state) { +void BM_EraseRange(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); @@ -222,6 +224,40 @@ void BM_DeleteRange(benchmark::State& state) { } } +// Predicate that erases every other element. We can't use a lambda because +// C++11 doesn't support generic lambdas. +// TODO(b/207389011): consider adding benchmarks that remove different fractions +// of keys (e.g. 10%, 90%). +struct EraseIfPred { + uint64_t i = 0; + template + bool operator()(const T&) { + return ++i % 2; + } +}; + +// Benchmark erasing multiple values from a container with a predicate. +template +void BM_EraseIf(benchmark::State& state) { + using V = typename remove_pair_const::type; + std::vector values = GenerateValues(kBenchmarkValues); + + // Removes half of the keys per batch. + const int batch_size = (kBenchmarkValues + 1) / 2; + EraseIfPred pred; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + { + T container(values.begin(), values.end()); + state.ResumeTiming(); + erase_if(container, pred); + benchmark::DoNotOptimize(container); + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + // Benchmark steady-state insert (into first half of range) and remove (from // second half of range), treating the container approximately like a queue with // log-time access for all elements. This benchmark does not test the case where @@ -477,14 +513,14 @@ BTREE_TYPES(Time); void BM_##type##_##func(benchmark::State& state) { BM_##func(state); } \ BENCHMARK(BM_##type##_##func) -#define MY_BENCHMARK3(type) \ +#define MY_BENCHMARK3_STL(type) \ MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ - MY_BENCHMARK4(type, Delete); \ - MY_BENCHMARK4(type, DeleteRange); \ + MY_BENCHMARK4(type, Erase); \ + MY_BENCHMARK4(type, EraseRange); \ MY_BENCHMARK4(type, QueueAddRem); \ MY_BENCHMARK4(type, MixedAddRem); \ MY_BENCHMARK4(type, Fifo); \ @@ -492,9 +528,13 @@ BTREE_TYPES(Time); MY_BENCHMARK4(type, InsertRangeRandom); \ MY_BENCHMARK4(type, InsertRangeSorted) +#define MY_BENCHMARK3(type) \ + MY_BENCHMARK4(type, EraseIf); \ + MY_BENCHMARK3_STL(type) + #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ - MY_BENCHMARK3(stl_##type); \ - MY_BENCHMARK3(stl_unordered_##type); \ + MY_BENCHMARK3_STL(stl_##type); \ + MY_BENCHMARK3_STL(stl_unordered_##type); \ MY_BENCHMARK3(btree_256_##type) #define MY_BENCHMARK2(type) \ @@ -684,17 +724,40 @@ double ContainerInfo(const btree_map>& b) { btree_set>; \ using btree_256_map_size##SIZE##copies##SIZE##ptr = \ btree_map>; \ - MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) BIG_TYPE_PTR_BENCHMARKS(32); +void BM_BtreeSet_IteratorSubtraction(benchmark::State& state) { + absl::InsecureBitGen bitgen; + std::vector vec; + // Randomize the set's insertion order so the nodes aren't all full. + vec.reserve(state.range(0)); + for (int i = 0; i < state.range(0); ++i) vec.push_back(i); + absl::c_shuffle(vec, bitgen); + + absl::btree_set set; + for (int i : vec) set.insert(i); + + size_t distance = absl::Uniform(bitgen, 0u, set.size()); + while (state.KeepRunningBatch(distance)) { + size_t end = absl::Uniform(bitgen, distance, set.size()); + size_t begin = end - distance; + benchmark::DoNotOptimize(set.find(static_cast(end)) - + set.find(static_cast(begin))); + distance = absl::Uniform(bitgen, 0u, set.size()); + } +} + +BENCHMARK(BM_BtreeSet_IteratorSubtraction)->Range(1 << 10, 1 << 20); + } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_map.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_map.h index 6bbf414e8b..cd3ee2b422 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_map.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_map.h @@ -35,14 +35,20 @@ // // However, these types should not be considered drop-in replacements for // `std::map` and `std::multimap` as there are some API differences, which are -// noted in this header file. +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. // // Importantly, insertions and deletions may invalidate outstanding iterators, // pointers, and references to elements. Such invalidations are typically only // an issue if insertion and deletion operations are interleaved with the use of -// more than one iterator, pointer, or reference simultaneously. For this -// reason, `insert()` and `erase()` return a valid iterator at the current -// position. +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid +// iterator at the current position. Another important difference is that +// key-types must be copy-constructible. +// +// Another API difference is that btree iterators can be subtracted, and this +// is faster than using std::distance. #ifndef ABSL_CONTAINER_BTREE_MAP_H_ #define ABSL_CONTAINER_BTREE_MAP_H_ @@ -53,6 +59,14 @@ namespace absl { ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct map_params; + +} // namespace container_internal + // absl::btree_map<> // // An `absl::btree_map` is an ordered associative container of @@ -74,7 +88,7 @@ class btree_map : public container_internal::btree_map_container< container_internal::btree>> { + /*IsMulti=*/false>>> { using Base = typename btree_map::btree_map_container; public: @@ -311,7 +325,8 @@ class btree_map // btree_map::extract() // // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. + // as a C++17-compatible node handle. Any references, pointers, or iterators + // are invalidated. Overloads are listed below. // // node_type extract(const_iterator position): // @@ -336,6 +351,21 @@ class btree_map // It does NOT refer to the data layout of the underlying btree. using Base::extract; + // btree_map::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + // btree_map::merge() // // Extracts elements from a given `source` btree_map into this @@ -467,15 +497,11 @@ void swap(btree_map &x, btree_map &y) { // absl::erase_if(absl::btree_map<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. template -void erase_if(btree_map &map, Pred pred) { - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } +typename btree_map::size_type erase_if( + btree_map &map, Pred pred) { + return container_internal::btree_access::erase_if(map, std::move(pred)); } // absl::btree_multimap @@ -500,7 +526,7 @@ class btree_multimap : public container_internal::btree_multimap_container< container_internal::btree>> { + /*IsMulti=*/true>>> { using Base = typename btree_multimap::btree_multimap_container; public: @@ -691,11 +717,25 @@ class btree_multimap // It does NOT refer to the data layout of the underlying btree. using Base::extract; + // btree_multimap::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + // btree_multimap::merge() // - // Extracts elements from a given `source` btree_multimap into this - // `btree_multimap`. If the destination `btree_multimap` already contains an - // element with an equivalent key, that element is not extracted. + // Extracts all elements from a given `source` btree_multimap into this + // `btree_multimap`. using Base::merge; // btree_multimap::swap(btree_multimap& other) @@ -799,17 +839,46 @@ void swap(btree_multimap &x, btree_multimap &y) { // absl::erase_if(absl::btree_multimap<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. template -void erase_if(btree_multimap &map, Pred pred) { - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } +typename btree_multimap::size_type erase_if( + btree_multimap &map, Pred pred) { + return container_internal::btree_access::erase_if(map, std::move(pred)); } +namespace container_internal { + +// A parameters structure for holding the type parameters for a btree_map. +// Compare and Alloc should be nothrow copy-constructible. +template +struct map_params : common_params> { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + template + static auto key(const V &value) -> decltype(value.first) { + return value.first; + } + static const Key &key(const slot_type *s) { return slot_policy::key(s); } + static const Key &key(slot_type *s) { return slot_policy::key(s); } + // For use in node handle. + static auto mutable_key(slot_type *s) + -> decltype(slot_policy::mutable_key(s)) { + return slot_policy::mutable_key(s); + } + static mapped_type &value(value_type *value) { return value->second; } +}; + +} // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_set.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_set.h index c07ccd911b..51dc42b797 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_set.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_set.h @@ -35,14 +35,19 @@ // // However, these types should not be considered drop-in replacements for // `std::set` and `std::multiset` as there are some API differences, which are -// noted in this header file. +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. // // Importantly, insertions and deletions may invalidate outstanding iterators, // pointers, and references to elements. Such invalidations are typically only // an issue if insertion and deletion operations are interleaved with the use of // more than one iterator, pointer, or reference simultaneously. For this -// reason, `insert()` and `erase()` return a valid iterator at the current -// position. +// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid +// iterator at the current position. +// +// Another API difference is that btree iterators can be subtracted, and this +// is faster than using std::distance. #ifndef ABSL_CONTAINER_BTREE_SET_H_ #define ABSL_CONTAINER_BTREE_SET_H_ @@ -53,6 +58,17 @@ namespace absl { ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct set_slot_policy; + +template +struct set_params; + +} // namespace container_internal + // absl::btree_set<> // // An `absl::btree_set` is an ordered associative container of unique key @@ -74,7 +90,7 @@ class btree_set : public container_internal::btree_set_container< container_internal::btree>> { + /*IsMulti=*/false>>> { using Base = typename btree_set::btree_set_container; public: @@ -256,7 +272,8 @@ class btree_set // btree_set::extract() // // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. + // as a C++17-compatible node handle. Any references, pointers, or iterators + // are invalidated. Overloads are listed below. // // node_type extract(const_iterator position): // @@ -276,6 +293,21 @@ class btree_set // It does NOT refer to the data layout of the underlying btree. using Base::extract; + // btree_set::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + // btree_set::merge() // // Extracts elements from a given `source` btree_set into this @@ -385,15 +417,11 @@ void swap(btree_set &x, btree_set &y) { // absl::erase_if(absl::btree_set<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. template -void erase_if(btree_set &set, Pred pred) { - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } +typename btree_set::size_type erase_if(btree_set &set, + Pred pred) { + return container_internal::btree_access::erase_if(set, std::move(pred)); } // absl::btree_multiset<> @@ -418,7 +446,7 @@ class btree_multiset : public container_internal::btree_multiset_container< container_internal::btree>> { + /*IsMulti=*/true>>> { using Base = typename btree_multiset::btree_multiset_container; public: @@ -602,11 +630,25 @@ class btree_multiset // It does NOT refer to the data layout of the underlying btree. using Base::extract; + // btree_multiset::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + // btree_multiset::merge() // - // Extracts elements from a given `source` btree_multiset into this - // `btree_multiset`. If the destination `btree_multiset` already contains an - // element with an equivalent key, that element is not extracted. + // Extracts all elements from a given `source` btree_multiset into this + // `btree_multiset`. using Base::merge; // btree_multiset::swap(btree_multiset& other) @@ -712,17 +754,67 @@ void swap(btree_multiset &x, btree_multiset &y) { // absl::erase_if(absl::btree_multiset<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. template -void erase_if(btree_multiset &set, Pred pred) { - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } +typename btree_multiset::size_type erase_if( + btree_multiset & set, Pred pred) { + return container_internal::btree_access::erase_if(set, std::move(pred)); } +namespace container_internal { + +// This type implements the necessary functions from the +// absl::container_internal::slot_type interface for btree_(multi)set. +template +struct set_slot_policy { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type &element(slot_type *slot) { return *slot; } + static const value_type &element(const slot_type *slot) { return *slot; } + + template + static void construct(Alloc *alloc, slot_type *slot, Args &&...args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, *other); + } + + template + static void destroy(Alloc *alloc, slot_type *slot) { + absl::allocator_traits::destroy(*alloc, slot); + } +}; + +// A parameters structure for holding the type parameters for a btree_set. +// Compare and Alloc should be nothrow copy-constructible. +template +struct set_params : common_params> { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + + template + static const V &key(const V &value) { + return value; + } + static const Key &key(const slot_type *slot) { return *slot; } + static const Key &key(slot_type *slot) { return *slot; } +}; + +} // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_test.cc index d27cf27105..28dda8a6fd 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/btree_test.cc @@ -14,17 +14,24 @@ #include "absl/container/btree_test.h" +#include +#include #include +#include +#include #include #include #include +#include #include #include #include #include +#include #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/algorithm/container.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/btree_map.h" @@ -34,7 +41,7 @@ #include "absl/flags/flag.h" #include "absl/hash/hash_testing.h" #include "absl/memory/memory.h" -#include "absl/meta/type_traits.h" +#include "absl/random/random.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" @@ -67,6 +74,16 @@ void CheckPairEquals(const std::pair &x, const std::pair &y) { CheckPairEquals(x.first, y.first); CheckPairEquals(x.second, y.second); } + +bool IsAssertEnabled() { + // Use an assert with side-effects to figure out if they are actually enabled. + bool assert_enabled = false; + assert([&]() { // NOLINT + assert_enabled = true; + return true; + }()); + return assert_enabled; +} } // namespace // The base class for a sorted associative container checker. TreeType is the @@ -1212,6 +1229,11 @@ class BtreeNodePeer { constexpr static bool UsesLinearNodeSearch() { return btree_node::use_linear_search::value; } + + template + constexpr static bool UsesGenerations() { + return Btree::params_type::kEnableGenerations; + } }; namespace { @@ -1285,7 +1307,7 @@ TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { std::unique_ptr &v = m["A"]; EXPECT_TRUE(v == nullptr); - v.reset(new std::string("X")); + v = absl::make_unique("X"); auto iter = m.find("A"); EXPECT_EQ("X", *iter->second); @@ -1344,38 +1366,34 @@ TEST(Btree, InitializerListInsert) { EXPECT_EQ(++it, range.second); } -template -void AssertKeyCompareToAdapted() { - using Adapted = typename key_compare_to_adapter::type; - static_assert(!std::is_same::value, - "key_compare_to_adapter should have adapted this comparator."); +template +void AssertKeyCompareStringAdapted() { + using Adapted = typename key_compare_adapter::type; static_assert( - std::is_same>::value, - "Adapted comparator should be a key-compare-to comparator."); + std::is_same::value || + std::is_same::value, + "key_compare_adapter should have string-adapted this comparator."); } -template -void AssertKeyCompareToNotAdapted() { - using Unadapted = typename key_compare_to_adapter::type; +template +void AssertKeyCompareNotStringAdapted() { + using Adapted = typename key_compare_adapter::type; static_assert( - std::is_same::value, - "key_compare_to_adapter shouldn't have adapted this comparator."); - static_assert( - std::is_same>::value, - "Un-adapted comparator should return bool."); + !std::is_same::value && + !std::is_same::value, + "key_compare_adapter shouldn't have string-adapted this comparator."); } -TEST(Btree, KeyCompareToAdapter) { - AssertKeyCompareToAdapted, std::string>(); - AssertKeyCompareToAdapted, std::string>(); - AssertKeyCompareToAdapted, absl::string_view>(); - AssertKeyCompareToAdapted, - absl::string_view>(); - AssertKeyCompareToAdapted, absl::Cord>(); - AssertKeyCompareToAdapted, absl::Cord>(); - AssertKeyCompareToNotAdapted, int>(); - AssertKeyCompareToNotAdapted, int>(); +TEST(Btree, KeyCompareAdapter) { + AssertKeyCompareStringAdapted, std::string>(); + AssertKeyCompareStringAdapted, std::string>(); + AssertKeyCompareStringAdapted, + absl::string_view>(); + AssertKeyCompareStringAdapted, + absl::string_view>(); + AssertKeyCompareStringAdapted, absl::Cord>(); + AssertKeyCompareStringAdapted, absl::Cord>(); + AssertKeyCompareNotStringAdapted, int>(); + AssertKeyCompareNotStringAdapted, int>(); } TEST(Btree, RValueInsert) { @@ -1425,11 +1443,19 @@ TEST(Btree, RValueInsert) { EXPECT_EQ(tracker.swaps(), 0); } -// A btree set with a specific number of values per node. +template +struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase { + using Cmp::Cmp; + CheckedCompareOptedOutCmp() {} + CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {} // NOLINT +}; + +// A btree set with a specific number of values per node. Opt out of +// checked_compare so that we can expect exact numbers of comparisons. template > class SizedBtreeSet : public btree_set_container, + set_params, std::allocator, BtreeNodePeer::GetTargetNodeSize(TargetValuesPerNode), /*Multi=*/false>>> { using Base = typename SizedBtreeSet::btree_set_container; @@ -1473,8 +1499,10 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) { EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode>(), - BtreeNodePeer::GetNumSlotsPerNode()); + EXPECT_EQ( + BtreeNodePeer::GetNumSlotsPerNode>(), + // When we have generations, there is one fewer slot. + BtreeNodePeer::UsesGenerations>() ? 60 : 61); } // Test key insertion/deletion in random order. @@ -1528,8 +1556,10 @@ TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode>(), - BtreeNodePeer::GetNumSlotsPerNode()); + EXPECT_EQ( + BtreeNodePeer::GetNumSlotsPerNode>(), + // When we have generations, there is one fewer slot. + BtreeNodePeer::UsesGenerations>() ? 60 : 61); } // Test key insertion/deletion in random order. @@ -1631,10 +1661,9 @@ TEST(Btree, BtreeMultisetEmplace) { auto iter = s.emplace(value_to_insert); ASSERT_NE(iter, s.end()); EXPECT_EQ(*iter, value_to_insert); - auto iter2 = s.emplace(value_to_insert); - EXPECT_NE(iter2, iter); - ASSERT_NE(iter2, s.end()); - EXPECT_EQ(*iter2, value_to_insert); + iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); auto result = s.equal_range(value_to_insert); EXPECT_EQ(std::distance(result.first, result.second), 2); } @@ -1645,44 +1674,45 @@ TEST(Btree, BtreeMultisetEmplaceHint) { auto iter = s.emplace(value_to_insert); ASSERT_NE(iter, s.end()); EXPECT_EQ(*iter, value_to_insert); - auto emplace_iter = s.emplace_hint(iter, value_to_insert); - EXPECT_NE(emplace_iter, iter); - ASSERT_NE(emplace_iter, s.end()); - EXPECT_EQ(*emplace_iter, value_to_insert); + iter = s.emplace_hint(iter, value_to_insert); + // The new element should be before the previously inserted one. + EXPECT_EQ(iter, s.lower_bound(value_to_insert)); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); } TEST(Btree, BtreeMultimapEmplace) { const int key_to_insert = 123456; const char value0[] = "a"; - absl::btree_multimap s; - auto iter = s.emplace(key_to_insert, value0); - ASSERT_NE(iter, s.end()); + absl::btree_multimap m; + auto iter = m.emplace(key_to_insert, value0); + ASSERT_NE(iter, m.end()); EXPECT_EQ(iter->first, key_to_insert); EXPECT_EQ(iter->second, value0); const char value1[] = "b"; - auto iter2 = s.emplace(key_to_insert, value1); - EXPECT_NE(iter2, iter); - ASSERT_NE(iter2, s.end()); - EXPECT_EQ(iter2->first, key_to_insert); - EXPECT_EQ(iter2->second, value1); - auto result = s.equal_range(key_to_insert); + iter = m.emplace(key_to_insert, value1); + ASSERT_NE(iter, m.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value1); + auto result = m.equal_range(key_to_insert); EXPECT_EQ(std::distance(result.first, result.second), 2); } TEST(Btree, BtreeMultimapEmplaceHint) { const int key_to_insert = 123456; const char value0[] = "a"; - absl::btree_multimap s; - auto iter = s.emplace(key_to_insert, value0); - ASSERT_NE(iter, s.end()); + absl::btree_multimap m; + auto iter = m.emplace(key_to_insert, value0); + ASSERT_NE(iter, m.end()); EXPECT_EQ(iter->first, key_to_insert); EXPECT_EQ(iter->second, value0); const char value1[] = "b"; - auto emplace_iter = s.emplace_hint(iter, key_to_insert, value1); - EXPECT_NE(emplace_iter, iter); - ASSERT_NE(emplace_iter, s.end()); - EXPECT_EQ(emplace_iter->first, key_to_insert); - EXPECT_EQ(emplace_iter->second, value1); + iter = m.emplace_hint(iter, key_to_insert, value1); + // The new element should be before the previously inserted one. + EXPECT_EQ(iter, m.lower_bound(key_to_insert)); + ASSERT_NE(iter, m.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value1); } TEST(Btree, ConstIteratorAccessors) { @@ -1748,6 +1778,22 @@ TEST(Btree, ValueComp) { EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0))); } +// Test that we have the protected members from the std::map::value_compare API. +// See https://en.cppreference.com/w/cpp/container/map/value_compare. +TEST(Btree, MapValueCompProtected) { + struct key_compare { + bool operator()(int l, int r) const { return l < r; } + int id; + }; + using value_compare = absl::btree_map::value_compare; + struct value_comp_child : public value_compare { + explicit value_comp_child(key_compare kc) : value_compare(kc) {} + int GetId() const { return comp.id; } + }; + value_comp_child c(key_compare{10}); + EXPECT_EQ(c.GetId(), 10); +} + TEST(Btree, DefaultConstruction) { absl::btree_set s; absl::btree_map m; @@ -2077,6 +2123,79 @@ TEST(Btree, ExtractMultiMapEquivalentKeys) { } } +TEST(Btree, ExtractAndGetNextSet) { + absl::btree_set src = {1, 2, 3, 4, 5}; + auto it = src.find(3); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); + EXPECT_EQ(extracted_and_next.node.value(), 3); + EXPECT_EQ(*extracted_and_next.next, 4); +} + +TEST(Btree, ExtractAndGetNextMultiSet) { + absl::btree_multiset src = {1, 2, 3, 4, 5}; + auto it = src.find(3); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); + EXPECT_EQ(extracted_and_next.node.value(), 3); + EXPECT_EQ(*extracted_and_next.next, 4); +} + +TEST(Btree, ExtractAndGetNextMap) { + absl::btree_map src = {{1, 2}, {3, 4}, {5, 6}}; + auto it = src.find(3); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6))); + EXPECT_EQ(extracted_and_next.node.key(), 3); + EXPECT_EQ(extracted_and_next.node.mapped(), 4); + EXPECT_THAT(*extracted_and_next.next, Pair(5, 6)); +} + +TEST(Btree, ExtractAndGetNextMultiMap) { + absl::btree_multimap src = {{1, 2}, {3, 4}, {5, 6}}; + auto it = src.find(3); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6))); + EXPECT_EQ(extracted_and_next.node.key(), 3); + EXPECT_EQ(extracted_and_next.node.mapped(), 4); + EXPECT_THAT(*extracted_and_next.next, Pair(5, 6)); +} + +TEST(Btree, ExtractAndGetNextEndIter) { + absl::btree_set src = {1, 2, 3, 4, 5}; + auto it = src.find(5); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(1, 2, 3, 4)); + EXPECT_EQ(extracted_and_next.node.value(), 5); + EXPECT_EQ(extracted_and_next.next, src.end()); +} + +TEST(Btree, ExtractDoesntCauseExtraMoves) { +#ifdef _MSC_VER + GTEST_SKIP() << "This test fails on MSVC."; +#endif + + using Set = absl::btree_set; + std::array, 3> extracters = { + [](Set &s) { auto node = s.extract(s.begin()); }, + [](Set &s) { auto ret = s.extract_and_get_next(s.begin()); }, + [](Set &s) { auto node = s.extract(MovableOnlyInstance(0)); }}; + + InstanceTracker tracker; + for (int i = 0; i < 3; ++i) { + Set s; + s.insert(MovableOnlyInstance(0)); + tracker.ResetCopiesMovesSwaps(); + + extracters[i](s); + // We expect to see exactly 1 move: from the original slot into the + // extracted node. + EXPECT_EQ(tracker.copies(), 0) << i; + EXPECT_EQ(tracker.moves(), 1) << i; + EXPECT_EQ(tracker.swaps(), 0) << i; + } +} + // For multisets, insert with hint also affects correctness because we need to // insert immediately before the hint if possible. struct InsertMultiHintData { @@ -2297,7 +2416,9 @@ TEST(Btree, TryEmplaceWithHintWorks) { }; using Cmp = decltype(cmp); - absl::btree_map m(cmp); + // Use a map that is opted out of key_compare being adapted so we can expect + // strict comparison call limits. + absl::btree_map> m(cmp); for (int i = 0; i < 128; ++i) { m.emplace(i, i); } @@ -2452,23 +2573,28 @@ TEST(Btree, EraseIf) { // Test that erase_if works with all the container types and supports lambdas. { absl::btree_set s = {1, 3, 5, 6, 100}; - erase_if(s, [](int k) { return k > 3; }); + EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3); EXPECT_THAT(s, ElementsAre(1, 3)); } { absl::btree_multiset s = {1, 3, 3, 5, 6, 6, 100}; - erase_if(s, [](int k) { return k <= 3; }); + EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3); EXPECT_THAT(s, ElementsAre(5, 6, 6, 100)); } { absl::btree_map m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}}; - erase_if(m, [](std::pair kv) { return kv.first > 3; }); + EXPECT_EQ( + erase_if(m, [](std::pair kv) { return kv.first > 3; }), + 2); EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3))); } { absl::btree_multimap m = {{1, 1}, {3, 3}, {3, 6}, {6, 6}, {6, 7}, {100, 6}}; - erase_if(m, [](std::pair kv) { return kv.second == 6; }); + EXPECT_EQ( + erase_if(m, + [](std::pair kv) { return kv.second == 6; }), + 3); EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7))); } // Test that erasing all elements from a large set works and test support for @@ -2476,15 +2602,29 @@ TEST(Btree, EraseIf) { { absl::btree_set s; for (int i = 0; i < 1000; ++i) s.insert(2 * i); - erase_if(s, IsEven); + EXPECT_EQ(erase_if(s, IsEven), 1000); EXPECT_THAT(s, IsEmpty()); } // Test that erase_if supports other format of function pointers. { absl::btree_set s = {1, 3, 5, 6, 100}; - erase_if(s, &IsEven); + EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_THAT(s, ElementsAre(1, 3, 5)); } + // Test that erase_if invokes the predicate once per element. + { + absl::btree_set s; + for (int i = 0; i < 1000; ++i) s.insert(i); + int pred_calls = 0; + EXPECT_EQ(erase_if(s, + [&pred_calls](int k) { + ++pred_calls; + return k % 2; + }), + 500); + EXPECT_THAT(s, SizeIs(500)); + EXPECT_EQ(pred_calls, 1000); + } } TEST(Btree, InsertOrAssign) { @@ -2948,6 +3088,374 @@ TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) { absl::btree_set set = {{}, MultiKeyComp{}}; } +TEST(Btree, InvalidComparatorsCaught) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; + + { + struct ZeroAlwaysLessCmp { + bool operator()(int lhs, int rhs) const { + if (lhs == 0) return true; + return lhs < rhs; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); + } + { + struct ThreeWayAlwaysLessCmp { + absl::weak_ordering operator()(int, int) const { + return absl::weak_ordering::less; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); + } + { + struct SumGreaterZeroCmp { + bool operator()(int lhs, int rhs) const { + // First, do equivalence correctly - so we can test later condition. + if (lhs == rhs) return false; + return lhs + rhs > 0; + } + }; + absl::btree_set set; + // Note: '!' only needs to be escaped when it's the first character. + EXPECT_DEATH(set.insert({0, 1, 2}), + R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex"); + } + { + struct ThreeWaySumGreaterZeroCmp { + absl::weak_ordering operator()(int lhs, int rhs) const { + // First, do equivalence correctly - so we can test later condition. + if (lhs == rhs) return absl::weak_ordering::equivalent; + + if (lhs + rhs > 0) return absl::weak_ordering::less; + if (lhs + rhs == 0) return absl::weak_ordering::equivalent; + return absl::weak_ordering::greater; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } +} + +#ifndef _MSC_VER +// This test crashes on MSVC. +TEST(Btree, InvalidIteratorUse) { + if (!BtreeNodePeer::UsesGenerations>()) + GTEST_SKIP() << "Generation validation for iterators is disabled."; + + { + absl::btree_set set; + for (int i = 0; i < 10; ++i) set.insert(i); + auto it = set.begin(); + set.erase(it++); + EXPECT_DEATH(set.erase(it++), "invalidated iterator"); + } + { + absl::btree_set set; + for (int i = 0; i < 10; ++i) set.insert(i); + auto it = set.insert(20).first; + set.insert(30); + EXPECT_DEATH(*it, "invalidated iterator"); + } + { + absl::btree_set set; + for (int i = 0; i < 10000; ++i) set.insert(i); + auto it = set.find(5000); + ASSERT_NE(it, set.end()); + set.erase(1); + EXPECT_DEATH(*it, "invalidated iterator"); + } +} +#endif + +class OnlyConstructibleByAllocator { + explicit OnlyConstructibleByAllocator(int i) : i_(i) {} + + public: + OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other) + : i_(other.i_) {} + OnlyConstructibleByAllocator &operator=( + const OnlyConstructibleByAllocator &other) { + i_ = other.i_; + return *this; + } + int Get() const { return i_; } + bool operator==(int i) const { return i_ == i; } + + private: + template + friend class OnlyConstructibleAllocator; + + int i_; +}; + +template +class OnlyConstructibleAllocator : public std::allocator { + public: + OnlyConstructibleAllocator() = default; + template + explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator &) {} + + void construct(OnlyConstructibleByAllocator *p, int i) { + new (p) OnlyConstructibleByAllocator(i); + } + template + void construct(Pair *p, const int i) { + OnlyConstructibleByAllocator only(i); + new (p) Pair(std::move(only), i); + } + + template + struct rebind { + using other = OnlyConstructibleAllocator; + }; +}; + +struct OnlyConstructibleByAllocatorComp { + using is_transparent = void; + bool operator()(OnlyConstructibleByAllocator a, + OnlyConstructibleByAllocator b) const { + return a.Get() < b.Get(); + } + bool operator()(int a, OnlyConstructibleByAllocator b) const { + return a < b.Get(); + } + bool operator()(OnlyConstructibleByAllocator a, int b) const { + return a.Get() < b; + } +}; + +TEST(Btree, OnlyConstructibleByAllocatorType) { + const std::array arr = {3, 4}; + { + absl::btree_set> + set; + set.emplace(1); + set.emplace_hint(set.end(), 2); + set.insert(arr.begin(), arr.end()); + EXPECT_THAT(set, ElementsAre(1, 2, 3, 4)); + } + { + absl::btree_multiset> + set; + set.emplace(1); + set.emplace_hint(set.end(), 2); + // TODO(ezb): fix insert_multi to allow this to compile. + // set.insert(arr.begin(), arr.end()); + EXPECT_THAT(set, ElementsAre(1, 2)); + } + { + absl::btree_map> + map; + map.emplace(1); + map.emplace_hint(map.end(), 2); + map.insert(arr.begin(), arr.end()); + EXPECT_THAT(map, + ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4))); + } + { + absl::btree_multimap> + map; + map.emplace(1); + map.emplace_hint(map.end(), 2); + // TODO(ezb): fix insert_multi to allow this to compile. + // map.insert(arr.begin(), arr.end()); + EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2))); + } +} + +class NotAssignable { + public: + explicit NotAssignable(int i) : i_(i) {} + NotAssignable(const NotAssignable &other) : i_(other.i_) {} + NotAssignable &operator=(NotAssignable &&other) = delete; + int Get() const { return i_; } + bool operator==(int i) const { return i_ == i; } + friend bool operator<(NotAssignable a, NotAssignable b) { + return a.i_ < b.i_; + } + + private: + int i_; +}; + +TEST(Btree, NotAssignableType) { + { + absl::btree_set set; + set.emplace(1); + set.emplace_hint(set.end(), 2); + set.insert(NotAssignable(3)); + set.insert(set.end(), NotAssignable(4)); + EXPECT_THAT(set, ElementsAre(1, 2, 3, 4)); + set.erase(set.begin()); + EXPECT_THAT(set, ElementsAre(2, 3, 4)); + } + { + absl::btree_multiset set; + set.emplace(1); + set.emplace_hint(set.end(), 2); + set.insert(NotAssignable(2)); + set.insert(set.end(), NotAssignable(3)); + EXPECT_THAT(set, ElementsAre(1, 2, 2, 3)); + set.erase(set.begin()); + EXPECT_THAT(set, ElementsAre(2, 2, 3)); + } + { + absl::btree_map map; + map.emplace(NotAssignable(1), 1); + map.emplace_hint(map.end(), NotAssignable(2), 2); + map.insert({NotAssignable(3), 3}); + map.insert(map.end(), {NotAssignable(4), 4}); + EXPECT_THAT(map, + ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4))); + map.erase(map.begin()); + EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(3, 3), Pair(4, 4))); + } + { + absl::btree_multimap map; + map.emplace(NotAssignable(1), 1); + map.emplace_hint(map.end(), NotAssignable(2), 2); + map.insert({NotAssignable(2), 3}); + map.insert(map.end(), {NotAssignable(3), 3}); + EXPECT_THAT(map, + ElementsAre(Pair(1, 1), Pair(2, 2), Pair(2, 3), Pair(3, 3))); + map.erase(map.begin()); + EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(2, 3), Pair(3, 3))); + } +} + +struct ArenaLike { + void* recycled = nullptr; + size_t recycled_size = 0; +}; + +// A very simple implementation of arena allocation. +template +class ArenaLikeAllocator : public std::allocator { + public: + // Standard library containers require the ability to allocate objects of + // different types which they can do so via rebind.other. + template + struct rebind { + using other = ArenaLikeAllocator; + }; + + explicit ArenaLikeAllocator(ArenaLike* arena) noexcept : arena_(arena) {} + + ~ArenaLikeAllocator() { + if (arena_->recycled != nullptr) { + delete [] static_cast(arena_->recycled); + arena_->recycled = nullptr; + } + } + + template + explicit ArenaLikeAllocator(const ArenaLikeAllocator& other) noexcept + : arena_(other.arena_) {} + + T* allocate(size_t num_objects, const void* = nullptr) { + size_t size = num_objects * sizeof(T); + if (arena_->recycled != nullptr && arena_->recycled_size == size) { + T* result = static_cast(arena_->recycled); + arena_->recycled = nullptr; + return result; + } + return new T[num_objects]; + } + + void deallocate(T* p, size_t num_objects) { + size_t size = num_objects * sizeof(T); + + // Simulate writing to the freed memory as an actual arena allocator might + // do. This triggers an error report if the memory is poisoned. + memset(p, 0xde, size); + + if (arena_->recycled == nullptr) { + arena_->recycled = p; + arena_->recycled_size = size; + } else { + delete [] p; + } + } + + ArenaLike* arena_; +}; + +// This test verifies that an arena allocator that reuses memory will not be +// asked to free poisoned BTree memory. +TEST(Btree, ReusePoisonMemory) { + using Alloc = ArenaLikeAllocator; + using Set = absl::btree_set, Alloc>; + ArenaLike arena; + Alloc alloc(&arena); + Set set(alloc); + + set.insert(0); + set.erase(0); + set.insert(0); +} + +TEST(Btree, IteratorSubtraction) { + absl::BitGen bitgen; + std::vector vec; + // Randomize the set's insertion order so the nodes aren't all full. + for (int i = 0; i < 1000000; ++i) vec.push_back(i); + absl::c_shuffle(vec, bitgen); + + absl::btree_set set; + for (int i : vec) set.insert(i); + + for (int i = 0; i < 1000; ++i) { + size_t begin = absl::Uniform(bitgen, 0u, set.size()); + size_t end = absl::Uniform(bitgen, begin, set.size()); + ASSERT_EQ(end - begin, set.find(end) - set.find(begin)) + << begin << " " << end; + } +} + +TEST(Btree, DereferencingEndIterator) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; + + absl::btree_set set; + for (int i = 0; i < 1000; ++i) set.insert(i); + EXPECT_DEATH(*set.end(), R"regex(Dereferencing end\(\) iterator)regex"); +} + +TEST(Btree, InvalidIteratorComparison) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; + + absl::btree_set set1, set2; + for (int i = 0; i < 1000; ++i) { + set1.insert(i); + set2.insert(i); + } + + constexpr const char *kValueInitDeathMessage = + "Comparing default-constructed iterator with .*non-default-constructed " + "iterator"; + typename absl::btree_set::iterator iter1, iter2; + EXPECT_EQ(iter1, iter2); + EXPECT_DEATH(void(set1.begin() == iter1), kValueInitDeathMessage); + EXPECT_DEATH(void(iter1 == set1.begin()), kValueInitDeathMessage); + + constexpr const char *kDifferentContainerDeathMessage = + "Comparing iterators from different containers"; + iter1 = set1.begin(); + iter2 = set2.begin(); + EXPECT_DEATH(void(iter1 == iter2), kDifferentContainerDeathMessage); + EXPECT_DEATH(void(iter2 == iter1), kDifferentContainerDeathMessage); +} + } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/fixed_array.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/fixed_array.h index 839ba0bc16..5543243042 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/fixed_array.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/fixed_array.h @@ -471,6 +471,9 @@ class FixedArray { return n <= inline_elements; } +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ABSL_ATTRIBUTE_NOINLINE +#endif // ABSL_HAVE_ADDRESS_SANITIZER StorageElement* InitializeData() { if (UsingInlinedStorage(size())) { InlinedStorage::AnnotateConstruct(size()); @@ -489,12 +492,14 @@ class FixedArray { Storage storage_; }; +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL template constexpr size_t FixedArray::kInlineBytesDefault; template constexpr typename FixedArray::size_type FixedArray::inline_elements; +#endif template void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/fixed_array_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/fixed_array_benchmark.cc index 3c7a5a7234..db6663e60d 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/fixed_array_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/fixed_array_benchmark.cc @@ -16,8 +16,8 @@ #include -#include "benchmark/benchmark.h" #include "absl/container/fixed_array.h" +#include "benchmark/benchmark.h" namespace { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_map.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_map.h index 74def0df0e..e6bdbd9e4f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_map.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_map.h @@ -36,6 +36,7 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export @@ -75,6 +76,10 @@ struct FlatHashMapPolicy; // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // +// Using `absl::flat_hash_map` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// // NOTE: A `flat_hash_map` stores its value types directly inside its // implementation array to avoid memory indirection. Because a `flat_hash_map` // is designed to move data when rehashed, map values will not retain pointer @@ -356,8 +361,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // `flat_hash_map`. // // iterator try_emplace(const_iterator hint, - // const init_type& k, Args&&... args): - // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args): + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): // // Inserts (via copy or move) the element of the specified key into the // `flat_hash_map` using the position of `hint` as a non-binding suggestion @@ -541,10 +546,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // erase_if(flat_hash_map<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. template -void erase_if(flat_hash_map& c, Predicate pred) { - container_internal::EraseIf(pred, &c); +typename flat_hash_map::size_type erase_if( + flat_hash_map& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); } namespace container_internal { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc index 8dda1d3539..263951f1d8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc @@ -236,33 +236,36 @@ TEST(FlatHashMap, EraseIf) { // Erase all elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, [](std::pair) { return true; }); + EXPECT_EQ(erase_if(s, [](std::pair) { return true; }), 5); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, [](std::pair) { return false; }); + EXPECT_EQ(erase_if(s, [](std::pair) { return false; }), 0); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4), Pair(5, 5))); } // Erase specific elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, - [](std::pair kvp) { return kvp.first % 2 == 1; }); + EXPECT_EQ(erase_if(s, + [](std::pair kvp) { + return kvp.first % 2 == 1; + }), + 3); EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4))); } // Predicate is function reference. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, FirstIsEven); + EXPECT_EQ(erase_if(s, FirstIsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } // Predicate is function pointer. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, &FirstIsEven); + EXPECT_EQ(erase_if(s, &FirstIsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_set.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_set.h index 6b89da6571..f5376f991a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_set.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_set.h @@ -67,11 +67,15 @@ struct FlatHashSetPolicy; // // By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All // fundamental and Abseil types that support the `absl::Hash` framework have a -// compatible equality operator for comparing insertions into `flat_hash_map`. +// compatible equality operator for comparing insertions into `flat_hash_set`. // If your type is not yet supported by the `absl::Hash` framework, see // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // +// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// // NOTE: A `flat_hash_set` stores its keys directly inside its implementation // array to avoid memory indirection. Because a `flat_hash_set` is designed to // move data when rehashed, set keys will not retain pointer stability. If you @@ -106,7 +110,7 @@ class flat_hash_set public: // Constructors and Assignment Operators // - // A flat_hash_set supports the same overload set as `std::unordered_map` + // A flat_hash_set supports the same overload set as `std::unordered_set` // for construction and assignment: // // * Default constructor @@ -173,7 +177,7 @@ class flat_hash_set // available within the `flat_hash_set`. // // NOTE: this member function is particular to `absl::flat_hash_set` and is - // not provided in the `std::unordered_map` API. + // not provided in the `std::unordered_set` API. using Base::capacity; // flat_hash_set::empty() @@ -332,7 +336,7 @@ class flat_hash_set // flat_hash_set::swap(flat_hash_set& other) // // Exchanges the contents of this `flat_hash_set` with those of the `other` - // flat hash map, avoiding invocation of any move, copy, or swap operations on + // flat hash set, avoiding invocation of any move, copy, or swap operations on // individual elements. // // All iterators and references on the `flat_hash_set` remain valid, excepting @@ -340,7 +344,7 @@ class flat_hash_set // // `swap()` requires that the flat hash set's hashing and key equivalence // functions be Swappable, and are exchaged using unqualified calls to - // non-member `swap()`. If the map's allocator has + // non-member `swap()`. If the set's allocator has // `std::allocator_traits::propagate_on_container_swap::value` // set to `true`, the allocators are also exchanged using an unqualified call // to non-member `swap()`; otherwise, the allocators are not swapped. @@ -395,14 +399,14 @@ class flat_hash_set // flat_hash_set::bucket_count() // // Returns the number of "buckets" within the `flat_hash_set`. Note that - // because a flat hash map contains all elements within its internal storage, + // because a flat hash set contains all elements within its internal storage, // this value simply equals the current capacity of the `flat_hash_set`. using Base::bucket_count; // flat_hash_set::load_factor() // // Returns the current load factor of the `flat_hash_set` (the average number - // of slots occupied with a value within the hash map). + // of slots occupied with a value within the hash set). using Base::load_factor; // flat_hash_set::max_load_factor() @@ -443,9 +447,11 @@ class flat_hash_set // erase_if(flat_hash_set<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. template -void erase_if(flat_hash_set& c, Predicate pred) { - container_internal::EraseIf(pred, &c); +typename flat_hash_set::size_type erase_if( + flat_hash_set& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); } namespace container_internal { @@ -468,13 +474,6 @@ struct FlatHashSetPolicy { absl::allocator_traits::destroy(*alloc, slot); } - template - static void transfer(Allocator* alloc, slot_type* new_slot, - slot_type* old_slot) { - construct(alloc, new_slot, std::move(*old_slot)); - destroy(alloc, old_slot); - } - static T& element(slot_type* slot) { return *slot; } template diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc index 8f6f9944ca..b6a72a20a3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc @@ -143,31 +143,31 @@ TEST(FlatHashSet, EraseIf) { // Erase all elements. { flat_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, [](int) { return true; }); + EXPECT_EQ(erase_if(s, [](int) { return true; }), 5); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { flat_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, [](int) { return false; }); + EXPECT_EQ(erase_if(s, [](int) { return false; }), 0); EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); } // Erase specific elements. { flat_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, [](int k) { return k % 2 == 1; }); + EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3); EXPECT_THAT(s, UnorderedElementsAre(2, 4)); } // Predicate is function reference. { flat_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, IsEven); + EXPECT_EQ(erase_if(s, IsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } // Predicate is function pointer. { flat_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, &IsEven); + EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector.h index 37e5fef8fd..7058f375e7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector.h @@ -36,7 +36,6 @@ #define ABSL_CONTAINER_INLINED_VECTOR_H_ #include -#include #include #include #include @@ -53,6 +52,7 @@ #include "absl/base/port.h" #include "absl/container/internal/inlined_vector.h" #include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -78,6 +78,8 @@ class InlinedVector { using MoveIterator = inlined_vector_internal::MoveIterator; template using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; + template + using IsMoveAssignOk = inlined_vector_internal::IsMoveAssignOk; template using IteratorValueAdapter = @@ -95,6 +97,12 @@ class InlinedVector { using DisableIfAtLeastForwardIterator = absl::enable_if_t< !inlined_vector_internal::IsAtLeastForwardIterator::value, int>; + using MemcpyPolicy = typename Storage::MemcpyPolicy; + using ElementwiseAssignPolicy = typename Storage::ElementwiseAssignPolicy; + using ElementwiseConstructPolicy = + typename Storage::ElementwiseConstructPolicy; + using MoveAssignmentPolicy = typename Storage::MoveAssignmentPolicy; + public: using allocator_type = A; using value_type = inlined_vector_internal::ValueType; @@ -152,7 +160,7 @@ class InlinedVector { const allocator_type& allocator = allocator_type()) : storage_(allocator) { storage_.Initialize(IteratorValueAdapter(first), - std::distance(first, last)); + static_cast(std::distance(first, last))); } // Creates an inlined vector with elements constructed from the provided input @@ -207,8 +215,8 @@ class InlinedVector { other.storage_.SetInlinedSize(0); } else if (other.storage_.GetIsAllocated()) { - storage_.SetAllocatedData(other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()); + storage_.SetAllocation({other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()}); storage_.SetAllocatedSize(other.storage_.GetSize()); other.storage_.SetInlinedSize(0); @@ -233,8 +241,8 @@ class InlinedVector { // specified allocator is also `noexcept`. InlinedVector( InlinedVector&& other, - const allocator_type& allocator) - noexcept(absl::allocator_is_nothrow::value) + const allocator_type& + allocator) noexcept(absl::allocator_is_nothrow::value) : storage_(allocator) { if (IsMemcpyOk::value) { storage_.MemcpyFrom(other.storage_); @@ -242,8 +250,8 @@ class InlinedVector { other.storage_.SetInlinedSize(0); } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && other.storage_.GetIsAllocated()) { - storage_.SetAllocatedData(other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()); + storage_.SetAllocation({other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()}); storage_.SetAllocatedSize(other.storage_.GetSize()); other.storage_.SetInlinedSize(0); @@ -276,8 +284,10 @@ class InlinedVector { size_type max_size() const noexcept { // One bit of the size storage is used to indicate whether the inlined // vector contains allocated memory. As a result, the maximum size that the - // inlined vector can express is half of the max for `size_type`. - return (std::numeric_limits::max)() / 2; + // inlined vector can express is the minimum of the limit of how many + // objects we can allocate and std::numeric_limits::max() / 2. + return (std::min)(AllocatorTraits::max_size(storage_.GetAllocator()), + (std::numeric_limits::max)() / 2); } // `InlinedVector::capacity()` @@ -485,18 +495,7 @@ class InlinedVector { // unspecified state. InlinedVector& operator=(InlinedVector&& other) { if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { - if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { - inlined_vector_internal::DestroyElements(storage_.GetAllocator(), - data(), size()); - storage_.DeallocateIfAllocated(); - storage_.MemcpyFrom(other.storage_); - - other.storage_.SetInlinedSize(0); - } else { - storage_.Assign(IteratorValueAdapter>( - MoveIterator(other.storage_.GetInlinedData())), - other.size()); - } + MoveAssignment(MoveAssignmentPolicy{}, std::move(other)); } return *this; @@ -523,7 +522,7 @@ class InlinedVector { EnableIfAtLeastForwardIterator = 0> void assign(ForwardIterator first, ForwardIterator last) { storage_.Assign(IteratorValueAdapter(first), - std::distance(first, last)); + static_cast(std::distance(first, last))); } // Overload of `InlinedVector::assign(...)` to replace the contents of the @@ -586,8 +585,20 @@ class InlinedVector { if (ABSL_PREDICT_TRUE(n != 0)) { value_type dealias = v; + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), n); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif } else { return const_cast(pos); } @@ -613,9 +624,9 @@ class InlinedVector { ABSL_HARDENING_ASSERT(pos <= end()); if (ABSL_PREDICT_TRUE(first != last)) { - return storage_.Insert(pos, - IteratorValueAdapter(first), - std::distance(first, last)); + return storage_.Insert( + pos, IteratorValueAdapter(first), + static_cast(std::distance(first, last))); } else { return const_cast(pos); } @@ -632,7 +643,7 @@ class InlinedVector { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); - size_type index = std::distance(cbegin(), pos); + size_type index = static_cast(std::distance(cbegin(), pos)); for (size_type i = index; first != last; ++i, static_cast(++first)) { insert(data() + i, *first); } @@ -650,10 +661,22 @@ class InlinedVector { ABSL_HARDENING_ASSERT(pos <= end()); value_type dealias(std::forward(args)...); + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif return storage_.Insert(pos, IteratorValueAdapter>( MoveIterator(std::addressof(dealias))), 1); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif } // `InlinedVector::emplace_back(...)` @@ -721,8 +744,8 @@ class InlinedVector { // Destroys all elements in the inlined vector, setting the size to `0` and // deallocating any held memory. void clear() noexcept { - inlined_vector_internal::DestroyElements(storage_.GetAllocator(), data(), - size()); + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); storage_.DeallocateIfAllocated(); storage_.SetInlinedSize(0); @@ -735,15 +758,12 @@ class InlinedVector { // `InlinedVector::shrink_to_fit()` // - // Reduces memory usage by freeing unused memory. After being called, calls to - // `capacity()` will be equal to `max(N, size())`. + // Attempts to reduce memory usage by moving elements to (or keeping elements + // in) the smallest available buffer sufficient for containing `size()` + // elements. // - // If `size() <= N` and the inlined vector contains allocated memory, the - // elements will all be moved to the inlined space and the allocated memory - // will be deallocated. - // - // If `size() > N` and `size() < capacity()`, the elements will be moved to a - // smaller allocation. + // If `size()` is sufficiently small, the elements will be moved into (or kept + // in) the inlined space. void shrink_to_fit() { if (storage_.GetIsAllocated()) { storage_.ShrinkToFit(); @@ -763,6 +783,42 @@ class InlinedVector { template friend H AbslHashValue(H h, const absl::InlinedVector& a); + void MoveAssignment(MemcpyPolicy, InlinedVector&& other) { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); + storage_.DeallocateIfAllocated(); + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } + + void MoveAssignment(ElementwiseAssignPolicy, InlinedVector&& other) { + if (other.storage_.GetIsAllocated()) { + MoveAssignment(MemcpyPolicy{}, std::move(other)); + } else { + storage_.Assign(IteratorValueAdapter>( + MoveIterator(other.storage_.GetInlinedData())), + other.size()); + } + } + + void MoveAssignment(ElementwiseConstructPolicy, InlinedVector&& other) { + if (other.storage_.GetIsAllocated()) { + MoveAssignment(MemcpyPolicy{}, std::move(other)); + } else { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); + storage_.DeallocateIfAllocated(); + + IteratorValueAdapter> other_values( + MoveIterator(other.storage_.GetInlinedData())); + inlined_vector_internal::ConstructElements( + storage_.GetAllocator(), storage_.GetInlinedData(), other_values, + other.storage_.GetSize()); + storage_.SetInlinedSize(other.storage_.GetSize()); + } + } + Storage storage_; }; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc index e256fad60f..56a6bfd23a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc @@ -16,11 +16,11 @@ #include #include -#include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" +#include "benchmark/benchmark.h" namespace { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector_test.cc index 98aff33498..898b40db16 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/inlined_vector_test.cc @@ -16,12 +16,14 @@ #include #include +#include #include #include #include #include #include #include +#include #include #include "gmock/gmock.h" @@ -49,6 +51,7 @@ using testing::ElementsAre; using testing::ElementsAreArray; using testing::Eq; using testing::Gt; +using testing::Pointwise; using testing::PrintToString; using IntVec = absl::InlinedVector; @@ -126,20 +129,20 @@ using DynamicVec = absl::InlinedVector; // Append 0..len-1 to *v template -static void Fill(Container* v, int len, int offset = 0) { - for (int i = 0; i < len; i++) { - v->push_back(i + offset); +static void Fill(Container* v, size_t len, int offset = 0) { + for (size_t i = 0; i < len; i++) { + v->push_back(static_cast(i) + offset); } } -static IntVec Fill(int len, int offset = 0) { +static IntVec Fill(size_t len, int offset = 0) { IntVec v; Fill(&v, len, offset); return v; } TEST(IntVec, SimpleOps) { - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { IntVec v; const IntVec& cv = v; // const alias @@ -147,42 +150,42 @@ TEST(IntVec, SimpleOps) { EXPECT_EQ(len, v.size()); EXPECT_LE(len, v.capacity()); - for (int i = 0; i < len; i++) { - EXPECT_EQ(i, v[i]); - EXPECT_EQ(i, v.at(i)); + for (size_t i = 0; i < len; i++) { + EXPECT_EQ(static_cast(i), v[i]); + EXPECT_EQ(static_cast(i), v.at(i)); } EXPECT_EQ(v.begin(), v.data()); EXPECT_EQ(cv.begin(), cv.data()); - int counter = 0; + size_t counter = 0; for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) { - EXPECT_EQ(counter, *iter); + EXPECT_EQ(static_cast(counter), *iter); counter++; } EXPECT_EQ(counter, len); counter = 0; for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) { - EXPECT_EQ(counter, *iter); + EXPECT_EQ(static_cast(counter), *iter); counter++; } EXPECT_EQ(counter, len); counter = 0; for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) { - EXPECT_EQ(counter, *iter); + EXPECT_EQ(static_cast(counter), *iter); counter++; } EXPECT_EQ(counter, len); if (len > 0) { EXPECT_EQ(0, v.front()); - EXPECT_EQ(len - 1, v.back()); + EXPECT_EQ(static_cast(len - 1), v.back()); v.pop_back(); EXPECT_EQ(len - 1, v.size()); - for (int i = 0; i < v.size(); ++i) { - EXPECT_EQ(i, v[i]); - EXPECT_EQ(i, v.at(i)); + for (size_t i = 0; i < v.size(); ++i) { + EXPECT_EQ(static_cast(i), v[i]); + EXPECT_EQ(static_cast(i), v.at(i)); } } } @@ -191,7 +194,7 @@ TEST(IntVec, SimpleOps) { TEST(IntVec, PopBackNoOverflow) { IntVec v = {1}; v.pop_back(); - EXPECT_EQ(v.size(), 0); + EXPECT_EQ(v.size(), 0u); } TEST(IntVec, AtThrows) { @@ -202,47 +205,47 @@ TEST(IntVec, AtThrows) { } TEST(IntVec, ReverseIterator) { - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { IntVec v; Fill(&v, len); - int counter = len; + size_t counter = len; for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) { counter--; - EXPECT_EQ(counter, *iter); + EXPECT_EQ(static_cast(counter), *iter); } - EXPECT_EQ(counter, 0); + EXPECT_EQ(counter, 0u); counter = len; for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) { counter--; - EXPECT_EQ(counter, *iter); + EXPECT_EQ(static_cast(counter), *iter); } - EXPECT_EQ(counter, 0); + EXPECT_EQ(counter, 0u); counter = len; for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend(); ++iter) { counter--; - EXPECT_EQ(counter, *iter); + EXPECT_EQ(static_cast(counter), *iter); } - EXPECT_EQ(counter, 0); + EXPECT_EQ(counter, 0u); } } TEST(IntVec, Erase) { - for (int len = 1; len < 20; len++) { - for (int i = 0; i < len; ++i) { + for (size_t len = 1; len < 20; len++) { + for (size_t i = 0; i < len; ++i) { IntVec v; Fill(&v, len); v.erase(v.begin() + i); EXPECT_EQ(len - 1, v.size()); - for (int j = 0; j < i; ++j) { - EXPECT_EQ(j, v[j]); + for (size_t j = 0; j < i; ++j) { + EXPECT_EQ(static_cast(j), v[j]); } - for (int j = i; j < len - 1; ++j) { - EXPECT_EQ(j + 1, v[j]); + for (size_t j = i; j < len - 1; ++j) { + EXPECT_EQ(static_cast(j + 1), v[j]); } } } @@ -254,7 +257,8 @@ TEST(IntVec, Hardened) { EXPECT_EQ(v[9], 9); #if !defined(NDEBUG) || ABSL_OPTION_HARDENED EXPECT_DEATH_IF_SUPPORTED(v[10], ""); - EXPECT_DEATH_IF_SUPPORTED(v[-1], ""); + EXPECT_DEATH_IF_SUPPORTED(v[static_cast(-1)], ""); + EXPECT_DEATH_IF_SUPPORTED(v.resize(v.max_size() + 1), ""); #endif } @@ -262,43 +266,43 @@ TEST(IntVec, Hardened) { // should have reference counts == 0, and all others elements should have // reference counts == 1. TEST(RefCountedVec, EraseBeginEnd) { - for (int len = 1; len < 20; ++len) { - for (int erase_begin = 0; erase_begin < len; ++erase_begin) { - for (int erase_end = erase_begin; erase_end <= len; ++erase_end) { + for (size_t len = 1; len < 20; ++len) { + for (size_t erase_begin = 0; erase_begin < len; ++erase_begin) { + for (size_t erase_end = erase_begin; erase_end <= len; ++erase_end) { std::vector counts(len, 0); RefCountedVec v; - for (int i = 0; i < len; ++i) { - v.push_back(RefCounted(i, &counts[i])); + for (size_t i = 0; i < len; ++i) { + v.push_back(RefCounted(static_cast(i), &counts[i])); } - int erase_len = erase_end - erase_begin; + size_t erase_len = erase_end - erase_begin; v.erase(v.begin() + erase_begin, v.begin() + erase_end); EXPECT_EQ(len - erase_len, v.size()); // Check the elements before the first element erased. - for (int i = 0; i < erase_begin; ++i) { - EXPECT_EQ(i, v[i].value_); + for (size_t i = 0; i < erase_begin; ++i) { + EXPECT_EQ(static_cast(i), v[i].value_); } // Check the elements after the first element erased. - for (int i = erase_begin; i < v.size(); ++i) { - EXPECT_EQ(i + erase_len, v[i].value_); + for (size_t i = erase_begin; i < v.size(); ++i) { + EXPECT_EQ(static_cast(i + erase_len), v[i].value_); } // Check that the elements at the beginning are preserved. - for (int i = 0; i < erase_begin; ++i) { + for (size_t i = 0; i < erase_begin; ++i) { EXPECT_EQ(1, counts[i]); } // Check that the erased elements are destroyed - for (int i = erase_begin; i < erase_end; ++i) { + for (size_t i = erase_begin; i < erase_end; ++i) { EXPECT_EQ(0, counts[i]); } // Check that the elements at the end are preserved. - for (int i = erase_end; i < len; ++i) { + for (size_t i = erase_end; i < len; ++i) { EXPECT_EQ(1, counts[i]); } } @@ -377,21 +381,21 @@ TEST(InlinedVectorTest, ShrinkToFitGrowingVector) { absl::InlinedVector, 1> v; v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 1); + EXPECT_EQ(v.capacity(), 1u); v.emplace_back("answer", 42); v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 1); + EXPECT_EQ(v.capacity(), 1u); v.emplace_back("taxicab", 1729); - EXPECT_GE(v.capacity(), 2); + EXPECT_GE(v.capacity(), 2u); v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 2); + EXPECT_EQ(v.capacity(), 2u); v.reserve(100); - EXPECT_GE(v.capacity(), 100); + EXPECT_GE(v.capacity(), 100u); v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 2); + EXPECT_EQ(v.capacity(), 2u); } TEST(InlinedVectorTest, ShrinkToFitEdgeCases) { @@ -399,10 +403,10 @@ TEST(InlinedVectorTest, ShrinkToFitEdgeCases) { absl::InlinedVector, 1> v; v.emplace_back("answer", 42); v.emplace_back("taxicab", 1729); - EXPECT_GE(v.capacity(), 2); + EXPECT_GE(v.capacity(), 2u); v.pop_back(); v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 1); + EXPECT_EQ(v.capacity(), 1u); EXPECT_EQ(v[0].first, "answer"); EXPECT_EQ(v[0].second, 42); } @@ -411,34 +415,34 @@ TEST(InlinedVectorTest, ShrinkToFitEdgeCases) { absl::InlinedVector v(100); v.resize(0); v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 2); // inlined capacity + EXPECT_EQ(v.capacity(), 2u); // inlined capacity } { absl::InlinedVector v(100); v.resize(1); v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 2); // inlined capacity + EXPECT_EQ(v.capacity(), 2u); // inlined capacity } { absl::InlinedVector v(100); v.resize(2); v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 2); + EXPECT_EQ(v.capacity(), 2u); } { absl::InlinedVector v(100); v.resize(3); v.shrink_to_fit(); - EXPECT_EQ(v.capacity(), 3); + EXPECT_EQ(v.capacity(), 3u); } } TEST(IntVec, Insert) { - for (int len = 0; len < 20; len++) { - for (int pos = 0; pos <= len; pos++) { + for (size_t len = 0; len < 20; len++) { + for (ptrdiff_t pos = 0; pos <= static_cast(len); pos++) { { // Single element std::vector std_v; @@ -526,16 +530,16 @@ TEST(IntVec, Insert) { TEST(RefCountedVec, InsertConstructorDestructor) { // Make sure the proper construction/destruction happen during insert // operations. - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { SCOPED_TRACE(len); - for (int pos = 0; pos <= len; pos++) { + for (size_t pos = 0; pos <= len; pos++) { SCOPED_TRACE(pos); std::vector counts(len, 0); int inserted_count = 0; RefCountedVec v; - for (int i = 0; i < len; ++i) { + for (size_t i = 0; i < len; ++i) { SCOPED_TRACE(i); - v.push_back(RefCounted(i, &counts[i])); + v.push_back(RefCounted(static_cast(i), &counts[i])); } EXPECT_THAT(counts, Each(Eq(1))); @@ -552,20 +556,20 @@ TEST(RefCountedVec, InsertConstructorDestructor) { } TEST(IntVec, Resize) { - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { IntVec v; Fill(&v, len); // Try resizing up and down by k elements static const int kResizeElem = 1000000; - for (int k = 0; k < 10; k++) { + for (size_t k = 0; k < 10; k++) { // Enlarging resize v.resize(len + k, kResizeElem); EXPECT_EQ(len + k, v.size()); EXPECT_LE(len + k, v.capacity()); - for (int i = 0; i < len + k; i++) { + for (size_t i = 0; i < len + k; i++) { if (i < len) { - EXPECT_EQ(i, v[i]); + EXPECT_EQ(static_cast(i), v[i]); } else { EXPECT_EQ(kResizeElem, v[i]); } @@ -575,26 +579,26 @@ TEST(IntVec, Resize) { v.resize(len, kResizeElem); EXPECT_EQ(len, v.size()); EXPECT_LE(len, v.capacity()); - for (int i = 0; i < len; i++) { - EXPECT_EQ(i, v[i]); + for (size_t i = 0; i < len; i++) { + EXPECT_EQ(static_cast(i), v[i]); } } } } TEST(IntVec, InitWithLength) { - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { IntVec v(len, 7); EXPECT_EQ(len, v.size()); EXPECT_LE(len, v.capacity()); - for (int i = 0; i < len; i++) { + for (size_t i = 0; i < len; i++) { EXPECT_EQ(7, v[i]); } } } TEST(IntVec, CopyConstructorAndAssignment) { - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { IntVec v; Fill(&v, len); EXPECT_EQ(len, v.size()); @@ -603,7 +607,7 @@ TEST(IntVec, CopyConstructorAndAssignment) { IntVec v2(v); EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2); - for (int start_len = 0; start_len < 20; start_len++) { + for (size_t start_len = 0; start_len < 20; start_len++) { IntVec v3; Fill(&v3, start_len, 99); // Add dummy elements that should go away v3 = v; @@ -613,7 +617,7 @@ TEST(IntVec, CopyConstructorAndAssignment) { } TEST(IntVec, AliasingCopyAssignment) { - for (int len = 0; len < 20; ++len) { + for (size_t len = 0; len < 20; ++len) { IntVec original; Fill(&original, len); IntVec dup = original; @@ -623,9 +627,9 @@ TEST(IntVec, AliasingCopyAssignment) { } TEST(IntVec, MoveConstructorAndAssignment) { - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { IntVec v_in; - const int inlined_capacity = v_in.capacity(); + const size_t inlined_capacity = v_in.capacity(); Fill(&v_in, len); EXPECT_EQ(len, v_in.size()); EXPECT_LE(len, v_in.capacity()); @@ -642,7 +646,7 @@ TEST(IntVec, MoveConstructorAndAssignment) { EXPECT_FALSE(v_out.data() == old_data); } } - for (int start_len = 0; start_len < 20; start_len++) { + for (size_t start_len = 0; start_len < 20; start_len++) { IntVec v_out; Fill(&v_out, start_len, 99); // Add dummy elements that should go away IntVec v_temp(v_in); @@ -681,10 +685,10 @@ class NotTriviallyDestructible { }; TEST(AliasingTest, Emplace) { - for (int i = 2; i < 20; ++i) { + for (size_t i = 2; i < 20; ++i) { absl::InlinedVector vec; - for (int j = 0; j < i; ++j) { - vec.push_back(NotTriviallyDestructible(j)); + for (size_t j = 0; j < i; ++j) { + vec.push_back(NotTriviallyDestructible(static_cast(j))); } vec.emplace(vec.begin(), vec[0]); EXPECT_EQ(vec[0], vec[1]); @@ -696,12 +700,12 @@ TEST(AliasingTest, Emplace) { } TEST(AliasingTest, InsertWithCount) { - for (int i = 1; i < 20; ++i) { + for (size_t i = 1; i < 20; ++i) { absl::InlinedVector vec; - for (int j = 0; j < i; ++j) { - vec.push_back(NotTriviallyDestructible(j)); + for (size_t j = 0; j < i; ++j) { + vec.push_back(NotTriviallyDestructible(static_cast(j))); } - for (int n = 0; n < 5; ++n) { + for (size_t n = 0; n < 5; ++n) { // We use back where we can because it's guaranteed to become invalidated vec.insert(vec.begin(), n, vec.back()); auto b = vec.begin(); @@ -759,22 +763,22 @@ TEST(OverheadTest, Storage) { } TEST(IntVec, Clear) { - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { SCOPED_TRACE(len); IntVec v; Fill(&v, len); v.clear(); - EXPECT_EQ(0, v.size()); + EXPECT_EQ(0u, v.size()); EXPECT_EQ(v.begin(), v.end()); } } TEST(IntVec, Reserve) { - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { IntVec v; Fill(&v, len); - for (int newlen = 0; newlen < 100; newlen++) { + for (size_t newlen = 0; newlen < 100; newlen++) { const int* start_rep = v.data(); v.reserve(newlen); const int* final_rep = v.data(); @@ -841,9 +845,9 @@ TEST(StringVec, SelfMove) { } TEST(IntVec, Swap) { - for (int l1 = 0; l1 < 20; l1++) { + for (size_t l1 = 0; l1 < 20; l1++) { SCOPED_TRACE(l1); - for (int l2 = 0; l2 < 20; l2++) { + for (size_t l2 = 0; l2 < 20; l2++) { SCOPED_TRACE(l2); IntVec a = Fill(l1, 0); IntVec b = Fill(l2, 100); @@ -853,13 +857,13 @@ TEST(IntVec, Swap) { } EXPECT_EQ(l1, b.size()); EXPECT_EQ(l2, a.size()); - for (int i = 0; i < l1; i++) { + for (size_t i = 0; i < l1; i++) { SCOPED_TRACE(i); - EXPECT_EQ(i, b[i]); + EXPECT_EQ(static_cast(i), b[i]); } - for (int i = 0; i < l2; i++) { + for (size_t i = 0; i < l2; i++) { SCOPED_TRACE(i); - EXPECT_EQ(100 + i, a[i]); + EXPECT_EQ(100 + static_cast(i), a[i]); } } } @@ -868,46 +872,48 @@ TEST(IntVec, Swap) { TYPED_TEST_P(InstanceTest, Swap) { using Instance = TypeParam; using InstanceVec = absl::InlinedVector; - for (int l1 = 0; l1 < 20; l1++) { + for (size_t l1 = 0; l1 < 20; l1++) { SCOPED_TRACE(l1); - for (int l2 = 0; l2 < 20; l2++) { + for (size_t l2 = 0; l2 < 20; l2++) { SCOPED_TRACE(l2); InstanceTracker tracker; InstanceVec a, b; const size_t inlined_capacity = a.capacity(); auto min_len = std::min(l1, l2); auto max_len = std::max(l1, l2); - for (int i = 0; i < l1; i++) a.push_back(Instance(i)); - for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i)); - EXPECT_EQ(tracker.instances(), l1 + l2); + for (size_t i = 0; i < l1; i++) + a.push_back(Instance(static_cast(i))); + for (size_t i = 0; i < l2; i++) + b.push_back(Instance(100 + static_cast(i))); + EXPECT_EQ(tracker.instances(), static_cast(l1 + l2)); tracker.ResetCopiesMovesSwaps(); { using std::swap; swap(a, b); } - EXPECT_EQ(tracker.instances(), l1 + l2); + EXPECT_EQ(tracker.instances(), static_cast(l1 + l2)); if (a.size() > inlined_capacity && b.size() > inlined_capacity) { EXPECT_EQ(tracker.swaps(), 0); // Allocations are swapped. EXPECT_EQ(tracker.moves(), 0); } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) { - EXPECT_EQ(tracker.swaps(), min_len); + EXPECT_EQ(tracker.swaps(), static_cast(min_len)); EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), - max_len - min_len); + static_cast(max_len - min_len)); } else { // One is allocated and the other isn't. The allocation is transferred // without copying elements, and the inlined instances are copied/moved. EXPECT_EQ(tracker.swaps(), 0); EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), - min_len); + static_cast(min_len)); } EXPECT_EQ(l1, b.size()); EXPECT_EQ(l2, a.size()); - for (int i = 0; i < l1; i++) { - EXPECT_EQ(i, b[i].value()); + for (size_t i = 0; i < l1; i++) { + EXPECT_EQ(static_cast(i), b[i].value()); } - for (int i = 0; i < l2; i++) { - EXPECT_EQ(100 + i, a[i].value()); + for (size_t i = 0; i < l2; i++) { + EXPECT_EQ(100 + static_cast(i), a[i].value()); } } } @@ -936,9 +942,9 @@ TEST(IntVec, EqualAndNotEqual) { a.clear(); b.clear(); - for (int i = 0; i < 100; i++) { - a.push_back(i); - b.push_back(i); + for (size_t i = 0; i < 100; i++) { + a.push_back(static_cast(i)); + b.push_back(static_cast(i)); EXPECT_TRUE(a == b); EXPECT_FALSE(a != b); @@ -977,26 +983,26 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) { using Instance = TypeParam; using InstanceVec = absl::InlinedVector; InstanceTracker tracker; - for (int len = 0; len < 20; len++) { + for (size_t len = 0; len < 20; len++) { SCOPED_TRACE(len); tracker.ResetCopiesMovesSwaps(); InstanceVec v; const size_t inlined_capacity = v.capacity(); - for (int i = 0; i < len; i++) { - v.push_back(Instance(i)); + for (size_t i = 0; i < len; i++) { + v.push_back(Instance(static_cast(i))); } - EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.instances(), static_cast(len)); EXPECT_GE(tracker.copies() + tracker.moves(), - len); // More due to reallocation. + static_cast(len)); // More due to reallocation. tracker.ResetCopiesMovesSwaps(); // Enlarging resize() must construct some objects tracker.ResetCopiesMovesSwaps(); v.resize(len + 10, Instance(100)); - EXPECT_EQ(tracker.instances(), len + 10); + EXPECT_EQ(tracker.instances(), static_cast(len) + 10); if (len <= inlined_capacity && len + 10 > inlined_capacity) { - EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + len); + EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + static_cast(len)); } else { // Only specify a minimum number of copies + moves. We don't want to // depend on the reallocation policy here. @@ -1007,29 +1013,30 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) { // Shrinking resize() must destroy some objects tracker.ResetCopiesMovesSwaps(); v.resize(len, Instance(100)); - EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.instances(), static_cast(len)); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 0); // reserve() must not increase the number of initialized objects SCOPED_TRACE("reserve"); v.reserve(len + 1000); - EXPECT_EQ(tracker.instances(), len); - EXPECT_EQ(tracker.copies() + tracker.moves(), len); + EXPECT_EQ(tracker.instances(), static_cast(len)); + EXPECT_EQ(tracker.copies() + tracker.moves(), static_cast(len)); // pop_back() and erase() must destroy one object if (len > 0) { tracker.ResetCopiesMovesSwaps(); v.pop_back(); - EXPECT_EQ(tracker.instances(), len - 1); + EXPECT_EQ(tracker.instances(), static_cast(len) - 1); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 0); if (!v.empty()) { tracker.ResetCopiesMovesSwaps(); v.erase(v.begin()); - EXPECT_EQ(tracker.instances(), len - 2); - EXPECT_EQ(tracker.copies() + tracker.moves(), len - 2); + EXPECT_EQ(tracker.instances(), static_cast(len) - 2); + EXPECT_EQ(tracker.copies() + tracker.moves(), + static_cast(len) - 2); } } @@ -1086,12 +1093,12 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveConstruction) { tracker.ResetCopiesMovesSwaps(); { InstanceVec v_copy(std::move(v)); - if (len > inlined_capacity) { + if (static_cast(len) > inlined_capacity) { // Allocation is moved as a whole. EXPECT_EQ(tracker.instances(), len); EXPECT_EQ(tracker.live_instances(), len); // Tests an implementation detail, don't rely on this in your code. - EXPECT_EQ(v.size(), 0); // NOLINT misc-use-after-move + EXPECT_EQ(v.size(), 0u); // NOLINT misc-use-after-move EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 0); } else { @@ -1157,7 +1164,7 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) { tracker.ResetCopiesMovesSwaps(); InstanceVec longer, shorter; - const int inlined_capacity = longer.capacity(); + const size_t inlined_capacity = longer.capacity(); for (int i = 0; i < len; i++) { longer.push_back(Instance(i)); shorter.push_back(Instance(i)); @@ -1176,7 +1183,7 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) { src_len = len; longer = std::move(shorter); } - if (src_len > inlined_capacity) { + if (static_cast(src_len) > inlined_capacity) { // Allocation moved as a whole. EXPECT_EQ(tracker.instances(), src_len); EXPECT_EQ(tracker.live_instances(), src_len); @@ -1201,6 +1208,8 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) { } TEST(CountElemAssign, SimpleTypeWithInlineBacking) { + const size_t inlined_capacity = absl::InlinedVector().capacity(); + for (size_t original_size = 0; original_size <= 5; ++original_size) { SCOPED_TRACE(original_size); // Original contents are [12345, 12345, ...] @@ -1209,10 +1218,10 @@ TEST(CountElemAssign, SimpleTypeWithInlineBacking) { absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(2, 123); - EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(123, 123))); - if (original_size <= 2) { + EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(123, 123))); + if (original_size <= inlined_capacity) { // If the original had inline backing, it should stay inline. - EXPECT_EQ(2, v.capacity()); + EXPECT_EQ(v.capacity(), inlined_capacity); } } } @@ -1226,7 +1235,7 @@ TEST(CountElemAssign, SimpleTypeWithAllocation) { absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(3, 123); - EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(123, 123, 123))); + EXPECT_THAT(v, AllOf(SizeIs(3u), ElementsAre(123, 123, 123))); EXPECT_LE(v.size(), v.capacity()); } } @@ -1241,10 +1250,10 @@ TYPED_TEST_P(InstanceTest, CountElemAssignInlineBacking) { absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(2, Instance(123)); - EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(ValueIs(123), ValueIs(123)))); + EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(ValueIs(123), ValueIs(123)))); if (original_size <= 2) { // If the original had inline backing, it should stay inline. - EXPECT_EQ(2, v.capacity()); + EXPECT_EQ(2u, v.capacity()); } } } @@ -1259,8 +1268,8 @@ void InstanceCountElemAssignWithAllocationTest() { absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(3, Instance(123)); - EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123), - ValueIs(123)))); + EXPECT_THAT(v, AllOf(SizeIs(3u), ElementsAre(ValueIs(123), ValueIs(123), + ValueIs(123)))); EXPECT_LE(v.size(), v.capacity()); } } @@ -1275,16 +1284,17 @@ TEST(RangedConstructor, SimpleType) { std::vector source_v = {4, 5, 6}; // First try to fit in inline backing absl::InlinedVector v(source_v.begin(), source_v.end()); - EXPECT_EQ(3, v.size()); - EXPECT_EQ(4, v.capacity()); // Indication that we're still on inlined storage + EXPECT_EQ(3u, v.size()); + EXPECT_EQ(4u, + v.capacity()); // Indication that we're still on inlined storage EXPECT_EQ(4, v[0]); EXPECT_EQ(5, v[1]); EXPECT_EQ(6, v[2]); // Now, force a re-allocate absl::InlinedVector realloc_v(source_v.begin(), source_v.end()); - EXPECT_EQ(3, realloc_v.size()); - EXPECT_LT(2, realloc_v.capacity()); + EXPECT_EQ(3u, realloc_v.size()); + EXPECT_LT(2u, realloc_v.capacity()); EXPECT_EQ(4, realloc_v[0]); EXPECT_EQ(5, realloc_v[1]); EXPECT_EQ(6, realloc_v[2]); @@ -1299,8 +1309,8 @@ void InstanceRangedConstructorTestForContainer() { tracker.ResetCopiesMovesSwaps(); absl::InlinedVector v(source_v.begin(), source_v.end()); - EXPECT_EQ(2, v.size()); - EXPECT_LT(1, v.capacity()); + EXPECT_EQ(2u, v.size()); + EXPECT_LT(1u, v.capacity()); EXPECT_EQ(0, v[0].value()); EXPECT_EQ(1, v[1].value()); EXPECT_EQ(tracker.copies(), 2); @@ -1352,6 +1362,8 @@ TEST(RangedConstructor, ElementsAreConstructed) { } TEST(RangedAssign, SimpleType) { + const size_t inlined_capacity = absl::InlinedVector().capacity(); + // Test for all combinations of original sizes (empty and non-empty inline, // and out of line) and target sizes. for (size_t original_size = 0; original_size <= 5; ++original_size) { @@ -1365,7 +1377,7 @@ TEST(RangedAssign, SimpleType) { // New contents are [3, 4, ...] std::vector new_contents; for (size_t i = 0; i < target_size; ++i) { - new_contents.push_back(i + 3); + new_contents.push_back(static_cast(i + 3)); } absl::InlinedVector v(original_contents.begin(), @@ -1374,9 +1386,10 @@ TEST(RangedAssign, SimpleType) { EXPECT_EQ(new_contents.size(), v.size()); EXPECT_LE(new_contents.size(), v.capacity()); - if (target_size <= 3 && original_size <= 3) { + if (target_size <= inlined_capacity && + original_size <= inlined_capacity) { // Storage should stay inline when target size is small. - EXPECT_EQ(3, v.capacity()); + EXPECT_EQ(v.capacity(), inlined_capacity); } EXPECT_THAT(v, ElementsAreArray(new_contents)); } @@ -1409,7 +1422,7 @@ void InstanceRangedAssignTestForContainer() { // TODO(bsamwel): Test with an input iterator. std::vector new_contents_in; for (size_t i = 0; i < target_size; ++i) { - new_contents_in.push_back(Instance(i + 3)); + new_contents_in.push_back(Instance(static_cast(i) + 3)); } SourceContainer new_contents(new_contents_in.begin(), new_contents_in.end()); @@ -1422,7 +1435,7 @@ void InstanceRangedAssignTestForContainer() { EXPECT_LE(new_contents.size(), v.capacity()); if (target_size <= 3 && original_size <= 3) { // Storage should stay inline when target size is small. - EXPECT_EQ(3, v.capacity()); + EXPECT_EQ(3u, v.capacity()); } EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(), InstanceValuesEqual)); @@ -1446,12 +1459,12 @@ TYPED_TEST_P(InstanceTest, RangedAssign) { TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) { EXPECT_THAT((absl::InlinedVector{4, 5, 6}), - AllOf(SizeIs(3), CapacityIs(4), ElementsAre(4, 5, 6))); + AllOf(SizeIs(3u), CapacityIs(4u), ElementsAre(4, 5, 6))); } TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) { EXPECT_THAT((absl::InlinedVector{4, 5, 6}), - AllOf(SizeIs(3), CapacityIs(Gt(2)), ElementsAre(4, 5, 6))); + AllOf(SizeIs(3u), CapacityIs(Gt(2u)), ElementsAre(4, 5, 6))); } TEST(InitializerListConstructor, DisparateTypesInList) { @@ -1462,16 +1475,19 @@ TEST(InitializerListConstructor, DisparateTypesInList) { } TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) { - EXPECT_THAT((absl::InlinedVector{ - CopyableMovableInstance(0)}), - AllOf(SizeIs(1), CapacityIs(1), ElementsAre(ValueIs(0)))); + const size_t inlined_capacity = + absl::InlinedVector().capacity(); + EXPECT_THAT( + (absl::InlinedVector{ + CopyableMovableInstance(0)}), + AllOf(SizeIs(1u), CapacityIs(inlined_capacity), ElementsAre(ValueIs(0)))); } TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) { - EXPECT_THAT( - (absl::InlinedVector{ - CopyableMovableInstance(0), CopyableMovableInstance(1)}), - AllOf(SizeIs(2), CapacityIs(Gt(1)), ElementsAre(ValueIs(0), ValueIs(1)))); + EXPECT_THAT((absl::InlinedVector{ + CopyableMovableInstance(0), CopyableMovableInstance(1)}), + AllOf(SizeIs(2u), CapacityIs(Gt(1u)), + ElementsAre(ValueIs(0), ValueIs(1)))); } TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) { @@ -1481,14 +1497,14 @@ TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) { absl::InlinedVector v1(original_size, 12345); const size_t original_capacity_v1 = v1.capacity(); v1.assign({3}); - EXPECT_THAT( - v1, AllOf(SizeIs(1), CapacityIs(original_capacity_v1), ElementsAre(3))); + EXPECT_THAT(v1, AllOf(SizeIs(1u), CapacityIs(original_capacity_v1), + ElementsAre(3))); absl::InlinedVector v2(original_size, 12345); const size_t original_capacity_v2 = v2.capacity(); v2 = {3}; - EXPECT_THAT( - v2, AllOf(SizeIs(1), CapacityIs(original_capacity_v2), ElementsAre(3))); + EXPECT_THAT(v2, AllOf(SizeIs(1u), CapacityIs(original_capacity_v2), + ElementsAre(3))); } } @@ -1497,13 +1513,13 @@ TEST(InitializerListAssign, SimpleTypeDoesNotFitInlineBacking) { SCOPED_TRACE(original_size); absl::InlinedVector v1(original_size, 12345); v1.assign({3, 4, 5}); - EXPECT_THAT(v1, AllOf(SizeIs(3), ElementsAre(3, 4, 5))); - EXPECT_LE(3, v1.capacity()); + EXPECT_THAT(v1, AllOf(SizeIs(3u), ElementsAre(3, 4, 5))); + EXPECT_LE(3u, v1.capacity()); absl::InlinedVector v2(original_size, 12345); v2 = {3, 4, 5}; - EXPECT_THAT(v2, AllOf(SizeIs(3), ElementsAre(3, 4, 5))); - EXPECT_LE(3, v2.capacity()); + EXPECT_THAT(v2, AllOf(SizeIs(3u), ElementsAre(3, 4, 5))); + EXPECT_LE(3u, v2.capacity()); } } @@ -1532,7 +1548,7 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) { absl::InlinedVector v(original_size, Instance(12345)); const size_t original_capacity = v.capacity(); v.assign({Instance(3)}); - EXPECT_THAT(v, AllOf(SizeIs(1), CapacityIs(original_capacity), + EXPECT_THAT(v, AllOf(SizeIs(1u), CapacityIs(original_capacity), ElementsAre(ValueIs(3)))); } for (size_t original_size = 0; original_size <= 4; ++original_size) { @@ -1540,22 +1556,23 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) { absl::InlinedVector v(original_size, Instance(12345)); v.assign({Instance(3), Instance(4), Instance(5)}); EXPECT_THAT( - v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5)))); - EXPECT_LE(3, v.capacity()); + v, AllOf(SizeIs(3u), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5)))); + EXPECT_LE(3u, v.capacity()); } } -REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors, - CountConstructorsDestructorsOnCopyConstruction, - CountConstructorsDestructorsOnMoveConstruction, - CountConstructorsDestructorsOnAssignment, - CountConstructorsDestructorsOnMoveAssignment, - CountElemAssignInlineBacking, RangedConstructor, - RangedAssign, InitializerListAssign); +REGISTER_TYPED_TEST_SUITE_P(InstanceTest, Swap, CountConstructorsDestructors, + CountConstructorsDestructorsOnCopyConstruction, + CountConstructorsDestructorsOnMoveConstruction, + CountConstructorsDestructorsOnAssignment, + CountConstructorsDestructorsOnMoveAssignment, + CountElemAssignInlineBacking, RangedConstructor, + RangedAssign, InitializerListAssign); using InstanceTypes = ::testing::Types; -INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(InstanceTestOnTypes, InstanceTest, + InstanceTypes); TEST(DynamicVec, DynamicVecCompiles) { DynamicVec v; @@ -1586,54 +1603,54 @@ TEST(AllocatorSupportTest, CountAllocations) { MyAlloc alloc(&allocated); { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc); - EXPECT_THAT(allocated, 0); + EXPECT_THAT(allocated, Eq(0)); } - EXPECT_THAT(allocated, 0); + EXPECT_THAT(allocated, Eq(0)); { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); - EXPECT_THAT(allocated, v.size() * sizeof(int)); + EXPECT_THAT(allocated, Eq(static_cast(v.size() * sizeof(int)))); } - EXPECT_THAT(allocated, 0); + EXPECT_THAT(allocated, Eq(0)); { AllocVec v(4, 1, alloc); - EXPECT_THAT(allocated, 0); + EXPECT_THAT(allocated, Eq(0)); int64_t allocated2 = 0; MyAlloc alloc2(&allocated2); AllocVec v2(v, alloc2); - EXPECT_THAT(allocated2, 0); + EXPECT_THAT(allocated2, Eq(0)); int64_t allocated3 = 0; MyAlloc alloc3(&allocated3); AllocVec v3(std::move(v), alloc3); - EXPECT_THAT(allocated3, 0); + EXPECT_THAT(allocated3, Eq(0)); } EXPECT_THAT(allocated, 0); { AllocVec v(8, 2, alloc); - EXPECT_THAT(allocated, v.size() * sizeof(int)); + EXPECT_THAT(allocated, Eq(static_cast(v.size() * sizeof(int)))); int64_t allocated2 = 0; MyAlloc alloc2(&allocated2); AllocVec v2(v, alloc2); - EXPECT_THAT(allocated2, v2.size() * sizeof(int)); + EXPECT_THAT(allocated2, Eq(static_cast(v2.size() * sizeof(int)))); int64_t allocated3 = 0; MyAlloc alloc3(&allocated3); AllocVec v3(std::move(v), alloc3); - EXPECT_THAT(allocated3, v3.size() * sizeof(int)); + EXPECT_THAT(allocated3, Eq(static_cast(v3.size() * sizeof(int)))); } EXPECT_EQ(allocated, 0); { // Test shrink_to_fit deallocations. AllocVec v(8, 2, alloc); - EXPECT_EQ(allocated, 8 * sizeof(int)); + EXPECT_EQ(allocated, static_cast(8 * sizeof(int))); v.resize(5); - EXPECT_EQ(allocated, 8 * sizeof(int)); + EXPECT_EQ(allocated, static_cast(8 * sizeof(int))); v.shrink_to_fit(); - EXPECT_EQ(allocated, 5 * sizeof(int)); + EXPECT_EQ(allocated, static_cast(5 * sizeof(int))); v.resize(4); - EXPECT_EQ(allocated, 5 * sizeof(int)); + EXPECT_EQ(allocated, static_cast(5 * sizeof(int))); v.shrink_to_fit(); EXPECT_EQ(allocated, 0); } @@ -1652,13 +1669,17 @@ TEST(AllocatorSupportTest, SwapBothAllocated) { AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); EXPECT_LT(v1.capacity(), v2.capacity()); - EXPECT_THAT(allocated1, v1.capacity() * sizeof(int)); - EXPECT_THAT(allocated2, v2.capacity() * sizeof(int)); + EXPECT_THAT(allocated1, + Eq(static_cast(v1.capacity() * sizeof(int)))); + EXPECT_THAT(allocated2, + Eq(static_cast(v2.capacity() * sizeof(int)))); v1.swap(v2); EXPECT_THAT(v1, ElementsAreArray(ia2)); EXPECT_THAT(v2, ElementsAreArray(ia1)); - EXPECT_THAT(allocated1, v2.capacity() * sizeof(int)); - EXPECT_THAT(allocated2, v1.capacity() * sizeof(int)); + EXPECT_THAT(allocated1, + Eq(static_cast(v2.capacity() * sizeof(int)))); + EXPECT_THAT(allocated2, + Eq(static_cast(v1.capacity() * sizeof(int)))); } EXPECT_THAT(allocated1, 0); EXPECT_THAT(allocated2, 0); @@ -1676,13 +1697,15 @@ TEST(AllocatorSupportTest, SwapOneAllocated) { MyAlloc a2(&allocated2); AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); - EXPECT_THAT(allocated1, v1.capacity() * sizeof(int)); - EXPECT_THAT(allocated2, 0); + EXPECT_THAT(allocated1, + Eq(static_cast(v1.capacity() * sizeof(int)))); + EXPECT_THAT(allocated2, Eq(0)); v1.swap(v2); EXPECT_THAT(v1, ElementsAreArray(ia2)); EXPECT_THAT(v2, ElementsAreArray(ia1)); - EXPECT_THAT(allocated1, v2.capacity() * sizeof(int)); - EXPECT_THAT(allocated2, 0); + EXPECT_THAT(allocated1, + Eq(static_cast(v2.capacity() * sizeof(int)))); + EXPECT_THAT(allocated2, Eq(0)); EXPECT_TRUE(v2.get_allocator() == a1); EXPECT_TRUE(v1.get_allocator() == a2); } @@ -1744,7 +1767,7 @@ TEST(AllocatorSupportTest, ScopedAllocatorWorksAllocated) { } TEST(AllocatorSupportTest, SizeAllocConstructor) { - constexpr int inlined_size = 4; + constexpr size_t inlined_size = 4; using Alloc = CountingAllocator; using AllocVec = absl::InlinedVector; @@ -1754,7 +1777,7 @@ TEST(AllocatorSupportTest, SizeAllocConstructor) { auto v = AllocVec(len, Alloc(&allocated)); // Inline storage used; allocator should not be invoked - EXPECT_THAT(allocated, 0); + EXPECT_THAT(allocated, Eq(0)); EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); } @@ -1764,7 +1787,7 @@ TEST(AllocatorSupportTest, SizeAllocConstructor) { auto v = AllocVec(len, Alloc(&allocated)); // Out of line storage used; allocation of 8 elements expected - EXPECT_THAT(allocated, len * sizeof(int)); + EXPECT_THAT(allocated, Eq(static_cast(len * sizeof(int)))); EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); } } @@ -1799,9 +1822,9 @@ TEST(InlinedVectorTest, AbslHashValueWorks) { // Generate a variety of vectors some of these are small enough for the inline // space but are stored out of line. - for (int i = 0; i < 10; ++i) { + for (size_t i = 0; i < 10; ++i) { V v; - for (int j = 0; j < i; ++j) { + for (int j = 0; j < static_cast(i); ++j) { v.push_back(j); } cases.push_back(v); @@ -1812,4 +1835,226 @@ TEST(InlinedVectorTest, AbslHashValueWorks) { EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); } +class MoveConstructibleOnlyInstance + : public absl::test_internal::BaseCountedInstance { + public: + explicit MoveConstructibleOnlyInstance(int x) : BaseCountedInstance(x) {} + MoveConstructibleOnlyInstance(MoveConstructibleOnlyInstance&& other) = + default; + MoveConstructibleOnlyInstance& operator=( + MoveConstructibleOnlyInstance&& other) = delete; +}; + +MATCHER(HasValue, "") { + return ::testing::get<0>(arg).value() == ::testing::get<1>(arg); +} + +TEST(NonAssignableMoveAssignmentTest, AllocatedToInline) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector inlined; + inlined.emplace_back(1); + absl::InlinedVector allocated; + allocated.emplace_back(1); + allocated.emplace_back(2); + allocated.emplace_back(3); + tracker.ResetCopiesMovesSwaps(); + + inlined = std::move(allocated); + // passed ownership of the allocated storage + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 3); + + EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3})); +} + +TEST(NonAssignableMoveAssignmentTest, InlineToAllocated) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector inlined; + inlined.emplace_back(1); + absl::InlinedVector allocated; + allocated.emplace_back(1); + allocated.emplace_back(2); + allocated.emplace_back(3); + tracker.ResetCopiesMovesSwaps(); + + allocated = std::move(inlined); + // Moved elements + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(tracker.live_instances(), 1); + + EXPECT_THAT(allocated, Pointwise(HasValue(), {1})); +} + +TEST(NonAssignableMoveAssignmentTest, InlineToInline) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector inlined_a; + inlined_a.emplace_back(1); + absl::InlinedVector inlined_b; + inlined_b.emplace_back(1); + tracker.ResetCopiesMovesSwaps(); + + inlined_a = std::move(inlined_b); + // Moved elements + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(tracker.live_instances(), 1); + + EXPECT_THAT(inlined_a, Pointwise(HasValue(), {1})); +} + +TEST(NonAssignableMoveAssignmentTest, AllocatedToAllocated) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector allocated_a; + allocated_a.emplace_back(1); + allocated_a.emplace_back(2); + allocated_a.emplace_back(3); + absl::InlinedVector allocated_b; + allocated_b.emplace_back(4); + allocated_b.emplace_back(5); + allocated_b.emplace_back(6); + allocated_b.emplace_back(7); + tracker.ResetCopiesMovesSwaps(); + + allocated_a = std::move(allocated_b); + // passed ownership of the allocated storage + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 4); + + EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7})); +} + +TEST(NonAssignableMoveAssignmentTest, AssignThis) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector v; + v.emplace_back(1); + v.emplace_back(2); + v.emplace_back(3); + + tracker.ResetCopiesMovesSwaps(); + + // Obfuscated in order to pass -Wself-move. + v = std::move(*std::addressof(v)); + // nothing happens + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 3); + + EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3})); +} + +class NonSwappableInstance : public absl::test_internal::BaseCountedInstance { + public: + explicit NonSwappableInstance(int x) : BaseCountedInstance(x) {} + NonSwappableInstance(const NonSwappableInstance& other) = default; + NonSwappableInstance& operator=(const NonSwappableInstance& other) = default; + NonSwappableInstance(NonSwappableInstance&& other) = default; + NonSwappableInstance& operator=(NonSwappableInstance&& other) = default; +}; + +void swap(NonSwappableInstance&, NonSwappableInstance&) = delete; + +TEST(NonSwappableSwapTest, InlineAndAllocatedTransferStorageAndMove) { + using X = NonSwappableInstance; + InstanceTracker tracker; + absl::InlinedVector inlined; + inlined.emplace_back(1); + absl::InlinedVector allocated; + allocated.emplace_back(1); + allocated.emplace_back(2); + allocated.emplace_back(3); + tracker.ResetCopiesMovesSwaps(); + + inlined.swap(allocated); + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(tracker.live_instances(), 4); + + EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3})); +} + +TEST(NonSwappableSwapTest, InlineAndInlineMoveIndividualElements) { + using X = NonSwappableInstance; + InstanceTracker tracker; + absl::InlinedVector inlined_a; + inlined_a.emplace_back(1); + absl::InlinedVector inlined_b; + inlined_b.emplace_back(2); + tracker.ResetCopiesMovesSwaps(); + + inlined_a.swap(inlined_b); + EXPECT_EQ(tracker.moves(), 3); + EXPECT_EQ(tracker.live_instances(), 2); + + EXPECT_THAT(inlined_a, Pointwise(HasValue(), {2})); + EXPECT_THAT(inlined_b, Pointwise(HasValue(), {1})); +} + +TEST(NonSwappableSwapTest, AllocatedAndAllocatedOnlyTransferStorage) { + using X = NonSwappableInstance; + InstanceTracker tracker; + absl::InlinedVector allocated_a; + allocated_a.emplace_back(1); + allocated_a.emplace_back(2); + allocated_a.emplace_back(3); + absl::InlinedVector allocated_b; + allocated_b.emplace_back(4); + allocated_b.emplace_back(5); + allocated_b.emplace_back(6); + allocated_b.emplace_back(7); + tracker.ResetCopiesMovesSwaps(); + + allocated_a.swap(allocated_b); + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 7); + + EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7})); + EXPECT_THAT(allocated_b, Pointwise(HasValue(), {1, 2, 3})); +} + +TEST(NonSwappableSwapTest, SwapThis) { + using X = NonSwappableInstance; + InstanceTracker tracker; + absl::InlinedVector v; + v.emplace_back(1); + v.emplace_back(2); + v.emplace_back(3); + + tracker.ResetCopiesMovesSwaps(); + + v.swap(v); + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 3); + + EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3})); +} + +template +using CharVec = absl::InlinedVector; + +// Warning: This struct "simulates" the type `InlinedVector::Storage::Allocated` +// to make reasonable expectations for inlined storage capacity optimization. If +// implementation changes `Allocated`, then `MySpan` and tests that use it need +// to be updated accordingly. +template +struct MySpan { + T* data; + size_t size; +}; + +TEST(StorageTest, InlinedCapacityAutoIncrease) { + // The requested capacity is auto increased to `sizeof(MySpan)`. + EXPECT_GT(CharVec<1>().capacity(), 1); + EXPECT_EQ(CharVec<1>().capacity(), sizeof(MySpan)); + EXPECT_EQ(CharVec<1>().capacity(), CharVec<2>().capacity()); + EXPECT_EQ(sizeof(CharVec<1>), sizeof(CharVec<2>)); + + // The requested capacity is auto increased to + // `sizeof(MySpan) / sizeof(int)`. + EXPECT_GT((absl::InlinedVector().capacity()), 1); + EXPECT_EQ((absl::InlinedVector().capacity()), + sizeof(MySpan) / sizeof(int)); +} + } // anonymous namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/btree.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/btree.h index f636c5fc73..ab75afb403 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/btree.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/btree.h @@ -58,8 +58,10 @@ #include #include +#include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/internal/common.h" +#include "absl/container/internal/common_policy_traits.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/layout.h" @@ -74,12 +76,24 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS +#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) +// When compiled in sanitizer mode, we add generation integers to the nodes and +// iterators. When iterators are used, we validate that the container has not +// been mutated since the iterator was constructed. +#define ABSL_BTREE_ENABLE_GENERATIONS +#endif + +template +using compare_result_t = absl::result_of_t; + // A helper class that indicates if the Compare parameter is a key-compare-to // comparator. template using btree_is_key_compare_to = - std::is_convertible, - absl::weak_ordering>; + std::is_convertible, absl::weak_ordering>; struct StringBtreeDefaultLess { using is_transparent = void; @@ -87,7 +101,7 @@ struct StringBtreeDefaultLess { StringBtreeDefaultLess() = default; // Compatibility constructor. - StringBtreeDefaultLess(std::less) {} // NOLINT + StringBtreeDefaultLess(std::less) {} // NOLINT StringBtreeDefaultLess(std::less) {} // NOLINT // Allow converting to std::less for use in key_comp()/value_comp(). @@ -119,7 +133,7 @@ struct StringBtreeDefaultGreater { StringBtreeDefaultGreater() = default; - StringBtreeDefaultGreater(std::greater) {} // NOLINT + StringBtreeDefaultGreater(std::greater) {} // NOLINT StringBtreeDefaultGreater(std::greater) {} // NOLINT // Allow converting to std::greater for use in key_comp()/value_comp(). @@ -146,49 +160,140 @@ struct StringBtreeDefaultGreater { } }; -// A helper class to convert a boolean comparison into a three-way "compare-to" -// comparison that returns an `absl::weak_ordering`. This helper -// class is specialized for less, greater, -// less, greater, less, and -// greater. -// -// key_compare_to_adapter is provided so that btree users -// automatically get the more efficient compare-to code when using common -// Abseil string types with common comparison functors. -// These string-like specializations also turn on heterogeneous lookup by -// default. +// See below comments for checked_compare. +template ::value> +struct checked_compare_base : Compare { + using Compare::Compare; + explicit checked_compare_base(Compare c) : Compare(std::move(c)) {} + const Compare &comp() const { return *this; } +}; template -struct key_compare_to_adapter { - using type = Compare; +struct checked_compare_base { + explicit checked_compare_base(Compare c) : compare(std::move(c)) {} + const Compare &comp() const { return compare; } + Compare compare; +}; + +// A mechanism for opting out of checked_compare for use only in btree_test.cc. +struct BtreeTestOnlyCheckedCompareOptOutBase {}; + +// A helper class to adapt the specified comparator for two use cases: +// (1) When using common Abseil string types with common comparison functors, +// convert a boolean comparison into a three-way comparison that returns an +// `absl::weak_ordering`. This helper class is specialized for +// less, greater, less, +// greater, less, and greater. +// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see +// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever +// a comparison is made, we will make assertions to verify that the comparator +// is valid. +template +struct key_compare_adapter { + // Inherit from checked_compare_base to support function pointers and also + // keep empty-base-optimization (EBO) support for classes. + // Note: we can't use CompressedTuple here because that would interfere + // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a + // CompressedTuple and nested `CompressedTuple`s don't support EBO. + // TODO(b/214288561): use CompressedTuple instead once it supports EBO for + // nested `CompressedTuple`s. + struct checked_compare : checked_compare_base { + private: + using Base = typename checked_compare::checked_compare_base; + using Base::comp; + + // If possible, returns whether `t` is equivalent to itself. We can only do + // this for `Key`s because we can't be sure that it's safe to call + // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a + // compilation failure inside the implementation of the comparison operator. + bool is_self_equivalent(const Key &k) const { + // Note: this works for both boolean and three-way comparators. + return comp()(k, k) == 0; + } + // If we can't compare `t` with itself, returns true unconditionally. + template + bool is_self_equivalent(const T &) const { + return true; + } + + public: + using Base::Base; + checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT + + // Allow converting to Compare for use in key_comp()/value_comp(). + explicit operator Compare() const { return comp(); } + + template >::value, + int> = 0> + bool operator()(const T &lhs, const U &rhs) const { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const bool lhs_comp_rhs = comp()(lhs, rhs); + assert(!lhs_comp_rhs || !comp()(rhs, lhs)); + return lhs_comp_rhs; + } + + template < + typename T, typename U, + absl::enable_if_t, + absl::weak_ordering>::value, + int> = 0> + absl::weak_ordering operator()(const T &lhs, const U &rhs) const { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); +#ifndef NDEBUG + const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); + if (lhs_comp_rhs > 0) { + assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); + } else if (lhs_comp_rhs == 0) { + assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); + } else { + assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } +#endif + return lhs_comp_rhs; + } + }; + using type = absl::conditional_t< + std::is_base_of::value, + Compare, checked_compare>; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, std::string> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, std::string> { using type = StringBtreeDefaultGreater; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, absl::string_view> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, absl::string_view> { using type = StringBtreeDefaultGreater; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, absl::Cord> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, absl::Cord> { using type = StringBtreeDefaultGreater; }; @@ -224,21 +329,69 @@ struct prefers_linear_node_search< T, absl::void_t> : T::absl_btree_prefer_linear_node_search {}; +template +constexpr bool compare_has_valid_result_type() { + using compare_result_type = compare_result_t; + return std::is_same::value || + std::is_convertible::value; +} + +template +class map_value_compare { + template + friend class btree; + + // Note: this `protected` is part of the API of std::map::value_compare. See + // https://en.cppreference.com/w/cpp/container/map/value_compare. + protected: + explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {} + + original_key_compare comp; // NOLINT + + public: + auto operator()(const value_type &lhs, const value_type &rhs) const + -> decltype(comp(lhs.first, rhs.first)) { + return comp(lhs.first, rhs.first); + } +}; + template -struct common_params { + bool IsMulti, bool IsMap, typename SlotPolicy> +struct common_params : common_policy_traits { using original_key_compare = Compare; // If Compare is a common comparator for a string-like type, then we adapt it // to use heterogeneous lookup and to be a key-compare-to comparator. - using key_compare = typename key_compare_to_adapter::type; + // We also adapt the comparator to diagnose invalid comparators in debug mode. + // We disable this when `Compare` is invalid in a way that will cause + // adaptation to fail (having invalid return type) so that we can give a + // better compilation failure in static_assert_validation. If we don't do + // this, then there will be cascading compilation failures that are confusing + // for users. + using key_compare = + absl::conditional_t(), + Compare, + typename key_compare_adapter::type>; + + static constexpr bool kIsKeyCompareStringAdapted = + std::is_same::value || + std::is_same::value; + static constexpr bool kIsKeyCompareTransparent = + IsTransparent::value || kIsKeyCompareStringAdapted; + static constexpr bool kEnableGenerations = +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + true; +#else + false; +#endif + // A type which indicates if we have a key-compare-to functor or a plain old // key-compare functor. using is_key_compare_to = btree_is_key_compare_to; using allocator_type = Alloc; using key_type = Key; - using size_type = std::make_signed::type; + using size_type = size_t; using difference_type = ptrdiff_t; using slot_policy = SlotPolicy; @@ -250,6 +403,12 @@ struct common_params { using reference = value_type &; using const_reference = const value_type &; + using value_compare = + absl::conditional_t, + original_key_compare>; + using is_map_container = std::integral_constant; + // For the given lookup key type, returns whether we can have multiple // equivalent keys in the btree. If this is a multi-container, then we can. // Otherwise, we can have multiple equivalent keys only if all of the @@ -260,163 +419,26 @@ struct common_params { // that we know has the same equivalence classes for all lookup types. template constexpr static bool can_have_multiple_equivalent_keys() { - return Multi || - (IsTransparent::value && - !std::is_same::value && - !std::is_same::value && - !std::is_same::value); + return IsMulti || (IsTransparent::value && + !std::is_same::value && + !kIsKeyCompareStringAdapted); } enum { kTargetNodeSize = TargetNodeSize, - // Upper bound for the available space for values. This is largest for leaf + // Upper bound for the available space for slots. This is largest for leaf // nodes, which have overhead of at least a pointer + 4 bytes (for storing // 3 field_types and an enum). - kNodeValueSpace = - TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), + kNodeSlotSpace = TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), }; - // This is an integral type large enough to hold as many - // ValueSize-values as will fit a node of TargetNodeSize bytes. + // This is an integral type large enough to hold as many slots as will fit a + // node of TargetNodeSize bytes. using node_count_type = - absl::conditional_t<(kNodeValueSpace / sizeof(value_type) > + absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > (std::numeric_limits::max)()), uint16_t, uint8_t>; // NOLINT - - // The following methods are necessary for passing this struct as PolicyTraits - // for node_handle and/or are used within btree. - static value_type &element(slot_type *slot) { - return slot_policy::element(slot); - } - static const value_type &element(const slot_type *slot) { - return slot_policy::element(slot); - } - template - static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { - slot_policy::construct(alloc, slot, std::forward(args)...); - } - static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { - slot_policy::construct(alloc, slot, other); - } - static void destroy(Alloc *alloc, slot_type *slot) { - slot_policy::destroy(alloc, slot); - } - static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { - construct(alloc, new_slot, old_slot); - destroy(alloc, old_slot); - } - static void swap(Alloc *alloc, slot_type *a, slot_type *b) { - slot_policy::swap(alloc, a, b); - } - static void move(Alloc *alloc, slot_type *src, slot_type *dest) { - slot_policy::move(alloc, src, dest); - } -}; - -// A parameters structure for holding the type parameters for a btree_map. -// Compare and Alloc should be nothrow copy-constructible. -template -struct map_params : common_params> { - using super_type = typename map_params::common_params; - using mapped_type = Data; - // This type allows us to move keys when it is safe to do so. It is safe - // for maps in which value_type and mutable_value_type are layout compatible. - using slot_policy = typename super_type::slot_policy; - using slot_type = typename super_type::slot_type; - using value_type = typename super_type::value_type; - using init_type = typename super_type::init_type; - - using original_key_compare = typename super_type::original_key_compare; - // Reference: https://en.cppreference.com/w/cpp/container/map/value_compare - class value_compare { - template - friend class btree; - - protected: - explicit value_compare(original_key_compare c) : comp(std::move(c)) {} - - original_key_compare comp; // NOLINT - - public: - auto operator()(const value_type &lhs, const value_type &rhs) const - -> decltype(comp(lhs.first, rhs.first)) { - return comp(lhs.first, rhs.first); - } - }; - using is_map_container = std::true_type; - - template - static auto key(const V &value) -> decltype(value.first) { - return value.first; - } - static const Key &key(const slot_type *s) { return slot_policy::key(s); } - static const Key &key(slot_type *s) { return slot_policy::key(s); } - // For use in node handle. - static auto mutable_key(slot_type *s) - -> decltype(slot_policy::mutable_key(s)) { - return slot_policy::mutable_key(s); - } - static mapped_type &value(value_type *value) { return value->second; } -}; - -// This type implements the necessary functions from the -// absl::container_internal::slot_type interface. -template -struct set_slot_policy { - using slot_type = Key; - using value_type = Key; - using mutable_value_type = Key; - - static value_type &element(slot_type *slot) { return *slot; } - static const value_type &element(const slot_type *slot) { return *slot; } - - template - static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { - absl::allocator_traits::construct(*alloc, slot, - std::forward(args)...); - } - - template - static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { - absl::allocator_traits::construct(*alloc, slot, std::move(*other)); - } - - template - static void destroy(Alloc *alloc, slot_type *slot) { - absl::allocator_traits::destroy(*alloc, slot); - } - - template - static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) { - using std::swap; - swap(*a, *b); - } - - template - static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) { - *dest = std::move(*src); - } -}; - -// A parameters structure for holding the type parameters for a btree_set. -// Compare and Alloc should be nothrow copy-constructible. -template -struct set_params : common_params> { - using value_type = Key; - using slot_type = typename set_params::common_params::slot_type; - using value_compare = - typename set_params::common_params::original_key_compare; - using is_map_container = std::false_type; - - template - static const V &key(const V &value) { return value; } - static const Key &key(const slot_type *slot) { return *slot; } - static const Key &key(slot_type *slot) { return *slot; } }; // An adapter class that converts a lower-bound compare into an upper-bound @@ -453,8 +475,8 @@ struct SearchResult { template struct SearchResult { SearchResult() {} - explicit SearchResult(V value) : value(value) {} - SearchResult(V value, MatchKind /*match*/) : value(value) {} + explicit SearchResult(V v) : value(v) {} + SearchResult(V v, MatchKind /*match*/) : value(v) {} V value; @@ -471,6 +493,7 @@ class btree_node { using field_type = typename Params::node_count_type; using allocator_type = typename Params::allocator_type; using slot_type = typename Params::slot_type; + using original_key_compare = typename Params::original_key_compare; public: using params_type = Params; @@ -492,21 +515,28 @@ class btree_node { // - Otherwise, choose binary. // TODO(ezb): Might make sense to add condition(s) based on node-size. using use_linear_search = std::integral_constant< - bool, - has_linear_node_search_preference::value - ? prefers_linear_node_search::value - : has_linear_node_search_preference::value + bool, has_linear_node_search_preference::value + ? prefers_linear_node_search::value + : has_linear_node_search_preference::value ? prefers_linear_node_search::value : std::is_arithmetic::value && - (std::is_same, key_compare>::value || + (std::is_same, + original_key_compare>::value || std::is_same, - key_compare>::value)>; + original_key_compare>::value)>; // This class is organized by absl::container_internal::Layout as if it had // the following structure: // // A pointer to the node's parent. // btree_node *parent; // + // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a + // // generation integer in order to check that when iterators are + // // used, they haven't been invalidated already. Only the generation on + // // the root is used, but we have one on each node because whether a node + // // is root or not can change. + // uint32_t generation; + // // // The position of the node in the node's parent. // field_type position; // // The index of the first populated value in `values`. @@ -553,63 +583,71 @@ class btree_node { btree_node() = default; private: - using layout_type = absl::container_internal::Layout; + using layout_type = + absl::container_internal::Layout; constexpr static size_type SizeWithNSlots(size_type n) { - return layout_type(/*parent*/ 1, - /*position, start, finish, max_count*/ 4, - /*slots*/ n, - /*children*/ 0) + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ n, + /*children*/ 0) .AllocSize(); } - // A lower bound for the overhead of fields other than values in a leaf node. + // A lower bound for the overhead of fields other than slots in a leaf node. constexpr static size_type MinimumOverhead() { - return SizeWithNSlots(1) - sizeof(value_type); + return SizeWithNSlots(1) - sizeof(slot_type); } // Compute how many values we can fit onto a leaf node taking into account // padding. - constexpr static size_type NodeTargetSlots(const int begin, const int end) { + constexpr static size_type NodeTargetSlots(const size_type begin, + const size_type end) { return begin == end ? begin - : SizeWithNSlots((begin + end) / 2 + 1) > - params_type::kTargetNodeSize - ? NodeTargetSlots(begin, (begin + end) / 2) - : NodeTargetSlots((begin + end) / 2 + 1, end); + : SizeWithNSlots((begin + end) / 2 + 1) > + params_type::kTargetNodeSize + ? NodeTargetSlots(begin, (begin + end) / 2) + : NodeTargetSlots((begin + end) / 2 + 1, end); } - enum { - kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), + constexpr static size_type kTargetNodeSize = params_type::kTargetNodeSize; + constexpr static size_type kNodeTargetSlots = + NodeTargetSlots(0, kTargetNodeSize); - // We need a minimum of 3 slots per internal node in order to perform - // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). For performance - // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy - // of 1/3 (for a node, not a b-tree). - kMinNodeSlots = 4, + // We need a minimum of 3 slots per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy of + // 1/3 (for a node, not a b-tree). + constexpr static size_type kMinNodeSlots = 4; - kNodeSlots = - kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, + constexpr static size_type kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots; - // The node is internal (i.e. is not a leaf node) if and only if `max_count` - // has this value. - kInternalNodeMaxCount = 0, - }; + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + constexpr static field_type kInternalNodeMaxCount = 0; // Leaves can have less than kNodeSlots values. - constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) { - return layout_type(/*parent*/ 1, - /*position, start, finish, max_count*/ 4, - /*slots*/ slot_count, - /*children*/ 0); + constexpr static layout_type LeafLayout( + const size_type slot_count = kNodeSlots) { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ slot_count, + /*children*/ 0); } constexpr static layout_type InternalLayout() { - return layout_type(/*parent*/ 1, - /*position, start, finish, max_count*/ 4, - /*slots*/ kNodeSlots, - /*children*/ kNodeSlots + 1); + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ kNodeSlots, + /*children*/ kNodeSlots + 1); } - constexpr static size_type LeafSize(const int slot_count = kNodeSlots) { + constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) { return LeafLayout(slot_count).AllocSize(); } constexpr static size_type InternalSize() { @@ -621,44 +659,47 @@ class btree_node { template inline typename layout_type::template ElementType *GetField() { // We assert that we don't read from values that aren't there. - assert(N < 3 || !leaf()); + assert(N < 4 || is_internal()); return InternalLayout().template Pointer(reinterpret_cast(this)); } template inline const typename layout_type::template ElementType *GetField() const { - assert(N < 3 || !leaf()); + assert(N < 4 || is_internal()); return InternalLayout().template Pointer( reinterpret_cast(this)); } void set_parent(btree_node *p) { *GetField<0>() = p; } - field_type &mutable_finish() { return GetField<1>()[2]; } - slot_type *slot(int i) { return &GetField<2>()[i]; } + field_type &mutable_finish() { return GetField<2>()[2]; } + slot_type *slot(size_type i) { return &GetField<3>()[i]; } slot_type *start_slot() { return slot(start()); } slot_type *finish_slot() { return slot(finish()); } - const slot_type *slot(int i) const { return &GetField<2>()[i]; } - void set_position(field_type v) { GetField<1>()[0] = v; } - void set_start(field_type v) { GetField<1>()[1] = v; } - void set_finish(field_type v) { GetField<1>()[2] = v; } + const slot_type *slot(size_type i) const { return &GetField<3>()[i]; } + void set_position(field_type v) { GetField<2>()[0] = v; } + void set_start(field_type v) { GetField<2>()[1] = v; } + void set_finish(field_type v) { GetField<2>()[2] = v; } // This method is only called by the node init methods. - void set_max_count(field_type v) { GetField<1>()[3] = v; } + void set_max_count(field_type v) { GetField<2>()[3] = v; } public: // Whether this is a leaf node or not. This value doesn't change after the // node is created. - bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; } + bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; } + // Whether this is an internal node or not. This value doesn't change after + // the node is created. + bool is_internal() const { return !is_leaf(); } // Getter for the position of this node in its parent. - field_type position() const { return GetField<1>()[0]; } + field_type position() const { return GetField<2>()[0]; } // Getter for the offset of the first value in the `values` array. field_type start() const { - // TODO(ezb): when floating storage is implemented, return GetField<1>()[1]; - assert(GetField<1>()[1] == 0); + // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; + assert(GetField<2>()[1] == 0); return 0; } // Getter for the offset after the last value in the `values` array. - field_type finish() const { return GetField<1>()[2]; } + field_type finish() const { return GetField<2>()[2]; } // Getters for the number of values stored in this node. field_type count() const { @@ -668,7 +709,7 @@ class btree_node { field_type max_count() const { // Internal nodes have max_count==kInternalNodeMaxCount. // Leaf nodes have max_count in [1, kNodeSlots]. - const field_type max_count = GetField<1>()[3]; + const field_type max_count = GetField<2>()[3]; return max_count == field_type{kInternalNodeMaxCount} ? field_type{kNodeSlots} : max_count; @@ -679,58 +720,83 @@ class btree_node { // Getter for whether the node is the root of the tree. The parent of the // root of the tree is the leftmost node in the tree which is guaranteed to // be a leaf. - bool is_root() const { return parent()->leaf(); } + bool is_root() const { return parent()->is_leaf(); } void make_root() { assert(parent()->is_root()); + set_generation(parent()->generation()); set_parent(parent()->parent()); } + // Gets the root node's generation integer, which is the one used by the tree. + uint32_t *get_root_generation() const { + assert(params_type::kEnableGenerations); + const btree_node *curr = this; + for (; !curr->is_root(); curr = curr->parent()) continue; + return const_cast(&curr->GetField<1>()[0]); + } + + // Returns the generation for iterator validation. + uint32_t generation() const { + return params_type::kEnableGenerations ? *get_root_generation() : 0; + } + // Updates generation. Should only be called on a root node or during node + // initialization. + void set_generation(uint32_t generation) { + if (params_type::kEnableGenerations) GetField<1>()[0] = generation; + } + // Updates the generation. We do this whenever the node is mutated. + void next_generation() { + if (params_type::kEnableGenerations) ++*get_root_generation(); + } + // Getters for the key/value at position i in the node. - const key_type &key(int i) const { return params_type::key(slot(i)); } - reference value(int i) { return params_type::element(slot(i)); } - const_reference value(int i) const { return params_type::element(slot(i)); } + const key_type &key(size_type i) const { return params_type::key(slot(i)); } + reference value(size_type i) { return params_type::element(slot(i)); } + const_reference value(size_type i) const { + return params_type::element(slot(i)); + } // Getters/setter for the child at position i in the node. - btree_node *child(int i) const { return GetField<3>()[i]; } + btree_node *child(field_type i) const { return GetField<4>()[i]; } btree_node *start_child() const { return child(start()); } - btree_node *&mutable_child(int i) { return GetField<3>()[i]; } - void clear_child(int i) { + btree_node *&mutable_child(field_type i) { return GetField<4>()[i]; } + void clear_child(field_type i) { absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); } - void set_child(int i, btree_node *c) { + void set_child(field_type i, btree_node *c) { absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); mutable_child(i) = c; c->set_position(i); } - void init_child(int i, btree_node *c) { + void init_child(field_type i, btree_node *c) { set_child(i, c); c->set_parent(this); } // Returns the position of the first value whose key is not less than k. template - SearchResult lower_bound( + SearchResult lower_bound( const K &k, const key_compare &comp) const { return use_linear_search::value ? linear_search(k, comp) : binary_search(k, comp); } // Returns the position of the first value whose key is greater than k. template - int upper_bound(const K &k, const key_compare &comp) const { + size_type upper_bound(const K &k, const key_compare &comp) const { auto upper_compare = upper_bound_adapter(comp); return use_linear_search::value ? linear_search(k, upper_compare).value : binary_search(k, upper_compare).value; } template - SearchResult::value> + SearchResult::value> linear_search(const K &k, const Compare &comp) const { return linear_search_impl(k, start(), finish(), comp, btree_is_key_compare_to()); } template - SearchResult::value> + SearchResult::value> binary_search(const K &k, const Compare &comp) const { return binary_search_impl(k, start(), finish(), comp, btree_is_key_compare_to()); @@ -739,8 +805,8 @@ class btree_node { // Returns the position of the first value whose key is not less than k using // linear search performed using plain compare. template - SearchResult linear_search_impl( - const K &k, int s, const int e, const Compare &comp, + SearchResult linear_search_impl( + const K &k, size_type s, const size_type e, const Compare &comp, std::false_type /* IsCompareTo */) const { while (s < e) { if (!comp(key(s), k)) { @@ -748,14 +814,14 @@ class btree_node { } ++s; } - return SearchResult{s}; + return SearchResult{s}; } // Returns the position of the first value whose key is not less than k using // linear search performed using compare-to. template - SearchResult linear_search_impl( - const K &k, int s, const int e, const Compare &comp, + SearchResult linear_search_impl( + const K &k, size_type s, const size_type e, const Compare &comp, std::true_type /* IsCompareTo */) const { while (s < e) { const absl::weak_ordering c = comp(key(s), k); @@ -772,30 +838,30 @@ class btree_node { // Returns the position of the first value whose key is not less than k using // binary search performed using plain compare. template - SearchResult binary_search_impl( - const K &k, int s, int e, const Compare &comp, + SearchResult binary_search_impl( + const K &k, size_type s, size_type e, const Compare &comp, std::false_type /* IsCompareTo */) const { while (s != e) { - const int mid = (s + e) >> 1; + const size_type mid = (s + e) >> 1; if (comp(key(mid), k)) { s = mid + 1; } else { e = mid; } } - return SearchResult{s}; + return SearchResult{s}; } // Returns the position of the first value whose key is not less than k using // binary search performed using compare-to. template - SearchResult binary_search_impl( - const K &k, int s, int e, const CompareTo &comp, + SearchResult binary_search_impl( + const K &k, size_type s, size_type e, const CompareTo &comp, std::true_type /* IsCompareTo */) const { if (params_type::template can_have_multiple_equivalent_keys()) { MatchKind exact_match = MatchKind::kNe; while (s != e) { - const int mid = (s + e) >> 1; + const size_type mid = (s + e) >> 1; const absl::weak_ordering c = comp(key(mid), k); if (c < 0) { s = mid + 1; @@ -812,7 +878,7 @@ class btree_node { return {s, exact_match}; } else { // Can't have multiple equivalent keys. while (s != e) { - const int mid = (s + e) >> 1; + const size_type mid = (s + e) >> 1; const absl::weak_ordering c = comp(key(mid), k); if (c < 0) { s = mid + 1; @@ -829,7 +895,7 @@ class btree_node { // Emplaces a value at position i, shifting all existing values and // children at positions >= i to the right by 1. template - void emplace_value(size_type i, allocator_type *alloc, Args &&... args); + void emplace_value(field_type i, allocator_type *alloc, Args &&...args); // Removes the values at positions [i, i + to_erase), shifting all existing // values and children after that range to the left by to_erase. Clears all @@ -837,9 +903,9 @@ class btree_node { void remove_values(field_type i, field_type to_erase, allocator_type *alloc); // Rebalances a node with its right sibling. - void rebalance_right_to_left(int to_move, btree_node *right, + void rebalance_right_to_left(field_type to_move, btree_node *right, allocator_type *alloc); - void rebalance_left_to_right(int to_move, btree_node *right, + void rebalance_left_to_right(field_type to_move, btree_node *right, allocator_type *alloc); // Splits a node, moving a portion of the node's values to its right sibling. @@ -850,7 +916,8 @@ class btree_node { void merge(btree_node *src, allocator_type *alloc); // Node allocation/deletion routines. - void init_leaf(btree_node *parent, int max_count) { + void init_leaf(field_type max_count, btree_node *parent) { + set_generation(0); set_parent(parent); set_position(0); set_start(0); @@ -860,7 +927,7 @@ class btree_node { start_slot(), max_count * sizeof(slot_type)); } void init_internal(btree_node *parent) { - init_leaf(parent, kNodeSlots); + init_leaf(kNodeSlots, parent); // Set `max_count` to a sentinel value to indicate that this node is // internal. set_max_count(kInternalNodeMaxCount); @@ -870,6 +937,7 @@ class btree_node { static void deallocate(const size_type size, btree_node *node, allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonMemoryRegion(node, size); absl::container_internal::Deallocate(alloc, node, size); } @@ -878,16 +946,19 @@ class btree_node { private: template - void value_init(const field_type i, allocator_type *alloc, Args &&... args) { + void value_init(const field_type i, allocator_type *alloc, Args &&...args) { + next_generation(); absl::container_internal::SanitizerUnpoisonObject(slot(i)); params_type::construct(alloc, slot(i), std::forward(args)...); } void value_destroy(const field_type i, allocator_type *alloc) { + next_generation(); params_type::destroy(alloc, slot(i)); absl::container_internal::SanitizerPoisonObject(slot(i)); } void value_destroy_n(const field_type i, const field_type n, allocator_type *alloc) { + next_generation(); for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) { params_type::destroy(alloc, s); absl::container_internal::SanitizerPoisonObject(s); @@ -903,6 +974,7 @@ class btree_node { // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. void transfer(const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { + next_generation(); transfer(slot(dest_i), src_node->slot(src_i), alloc); } @@ -911,6 +983,7 @@ class btree_node { void transfer_n(const size_type n, const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { + next_generation(); for (slot_type *src = src_node->slot(src_i), *end = src + n, *dest = slot(dest_i); src != end; ++src, ++dest) { @@ -923,23 +996,41 @@ class btree_node { void transfer_n_backward(const size_type n, const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { - for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, - *dest = slot(dest_i + n - 1); + next_generation(); + for (slot_type *src = src_node->slot(src_i + n), *end = src - n, + *dest = slot(dest_i + n); src != end; --src, --dest) { - transfer(dest, src, alloc); + // If we modified the loop index calculations above to avoid the -1s here, + // it would result in UB in the computation of `end` (and possibly `src` + // as well, if n == 0), since slot() is effectively an array index and it + // is UB to compute the address of any out-of-bounds array element except + // for one-past-the-end. + transfer(dest - 1, src - 1, alloc); } } template friend class btree; template - friend struct btree_iterator; + friend class btree_iterator; friend class BtreeNodePeer; + friend struct btree_access; }; +template +bool AreNodesFromSameContainer(const Node *node_a, const Node *node_b) { + // If either node is null, then give up on checking whether they're from the + // same container. (If exactly one is null, then we'll trigger the + // default-constructed assert in Equals.) + if (node_a == nullptr || node_b == nullptr) return true; + while (!node_a->is_root()) node_a = node_a->parent(); + while (!node_b->is_root()) node_b = node_b->parent(); + return node_a == node_b; +} + template -struct btree_iterator { - private: +class btree_iterator { + using field_type = typename Node::field_type; using key_type = typename Node::key_type; using size_type = typename Node::size_type; using params_type = typename Node::params_type; @@ -967,9 +1058,15 @@ struct btree_iterator { using reference = Reference; using iterator_category = std::bidirectional_iterator_tag; - btree_iterator() : node(nullptr), position(-1) {} - explicit btree_iterator(Node *n) : node(n), position(n->start()) {} - btree_iterator(Node *n, int p) : node(n), position(p) {} + btree_iterator() : btree_iterator(nullptr, -1) {} + explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {} + btree_iterator(Node *n, int p) : node_(n), position_(p) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // Use `~uint32_t{}` as a sentinel value for iterator generations so it + // doesn't match the initial value for the actual generation. + generation_ = n != nullptr ? n->generation() : ~uint32_t{}; +#endif + } // NOTE: this SFINAE allows for implicit conversions from iterator to // const_iterator, but it specifically avoids hiding the copy constructor so @@ -980,58 +1077,45 @@ struct btree_iterator { std::is_same::value, int> = 0> btree_iterator(const btree_iterator other) // NOLINT - : node(other.node), position(other.position) {} - - private: - // This SFINAE allows explicit conversions from const_iterator to - // iterator, but also avoids hiding the copy constructor. - // NOTE: the const_cast is safe because this constructor is only called by - // non-const methods and the container owns the nodes. - template , const_iterator>::value && - std::is_same::value, - int> = 0> - explicit btree_iterator(const btree_iterator other) - : node(const_cast(other.node)), position(other.position) {} - - // Increment/decrement the iterator. - void increment() { - if (node->leaf() && ++position < node->finish()) { - return; - } - increment_slow(); + : node_(other.node_), position_(other.position_) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + generation_ = other.generation_; +#endif } - void increment_slow(); - void decrement() { - if (node->leaf() && --position >= node->start()) { - return; - } - decrement_slow(); - } - void decrement_slow(); - - public: bool operator==(const iterator &other) const { - return node == other.node && position == other.position; + return Equals(other.node_, other.position_); } bool operator==(const const_iterator &other) const { - return node == other.node && position == other.position; + return Equals(other.node_, other.position_); } bool operator!=(const iterator &other) const { - return node != other.node || position != other.position; + return !Equals(other.node_, other.position_); } bool operator!=(const const_iterator &other) const { - return node != other.node || position != other.position; + return !Equals(other.node_, other.position_); + } + + // Returns n such that n calls to ++other yields *this. + // Precondition: n exists. + difference_type operator-(const_iterator other) const { + if (node_ == other.node_) { + if (node_->is_leaf()) return position_ - other.position_; + if (position_ == other.position_) return 0; + } + return distance_slow(other); } // Accessors for the key/value the iterator is pointing at. reference operator*() const { - ABSL_HARDENING_ASSERT(node != nullptr); - ABSL_HARDENING_ASSERT(node->start() <= position); - ABSL_HARDENING_ASSERT(node->finish() > position); - return node->value(position); + ABSL_HARDENING_ASSERT(node_ != nullptr); + assert_valid_generation(); + ABSL_HARDENING_ASSERT(position_ >= node_->start()); + if (position_ >= node_->finish()) { + ABSL_HARDENING_ASSERT(!IsEndIterator() && "Dereferencing end() iterator"); + ABSL_HARDENING_ASSERT(position_ < node_->finish()); + } + return node_->value(static_cast(position_)); } pointer operator->() const { return &operator*(); } @@ -1069,23 +1153,114 @@ struct btree_iterator { friend class btree_multiset_container; template friend class base_checker; + friend struct btree_access; - const key_type &key() const { return node->key(position); } - slot_type *slot() { return node->slot(position); } + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids hiding the copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template , const_iterator>::value && + std::is_same::value, + int> = 0> + explicit btree_iterator(const btree_iterator other) + : node_(const_cast(other.node_)), + position_(other.position_) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + generation_ = other.generation_; +#endif + } + + bool Equals(const node_type *other_node, int other_position) const { + ABSL_HARDENING_ASSERT(((node_ == nullptr && other_node == nullptr) || + (node_ != nullptr && other_node != nullptr)) && + "Comparing default-constructed iterator with " + "non-default-constructed iterator."); + // Note: we use assert instead of ABSL_HARDENING_ASSERT here because this + // changes the complexity of Equals from O(1) to O(log(N) + log(M)) where + // N/M are sizes of the containers containing node_/other_node. + assert(AreNodesFromSameContainer(node_, other_node) && + "Comparing iterators from different containers."); + return node_ == other_node && position_ == other_position; + } + + bool IsEndIterator() const { + if (position_ != node_->finish()) return false; + node_type *node = node_; + while (!node->is_root()) { + if (node->position() != node->parent()->finish()) return false; + node = node->parent(); + } + return true; + } + + // Returns n such that n calls to ++other yields *this. + // Precondition: n exists && (this->node_ != other.node_ || + // !this->node_->is_leaf() || this->position_ != other.position_). + difference_type distance_slow(const_iterator other) const; + + // Increment/decrement the iterator. + void increment() { + assert_valid_generation(); + if (node_->is_leaf() && ++position_ < node_->finish()) { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() { + assert_valid_generation(); + if (node_->is_leaf() && --position_ >= node_->start()) { + return; + } + decrement_slow(); + } + void decrement_slow(); + + // Updates the generation. For use internally right before we return an + // iterator to the user. + void update_generation() { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (node_ != nullptr) generation_ = node_->generation(); +#endif + } + + const key_type &key() const { + return node_->key(static_cast(position_)); + } + decltype(std::declval()->slot(0)) slot() { + return node_->slot(static_cast(position_)); + } + + void assert_valid_generation() const { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (node_ != nullptr && node_->generation() != generation_) { + ABSL_INTERNAL_LOG( + FATAL, + "Attempting to use an invalidated iterator. The corresponding b-tree " + "container has been mutated since this iterator was constructed."); + } +#endif + } // The node in the tree the iterator is pointing at. - Node *node; + Node *node_; // The position within the node of the tree the iterator is pointing at. // NOTE: this is an int rather than a field_type because iterators can point // to invalid positions (such as -1) in certain circumstances. - int position; + int position_; +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // Used to check that the iterator hasn't been invalidated. + uint32_t generation_; +#endif }; template class btree { using node_type = btree_node; using is_key_compare_to = typename Params::is_key_compare_to; - using init_type = typename Params::init_type; using field_type = typename node_type::field_type; // We use a static empty node for the root/leftmost/rightmost of empty btrees @@ -1093,6 +1268,9 @@ class btree { struct alignas(node_type::Alignment()) EmptyNodeType : node_type { using field_type = typename node_type::field_type; node_type *parent; +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + uint32_t generation = 0; +#endif field_type position = 0; field_type start = 0; field_type finish = 0; @@ -1104,7 +1282,7 @@ class btree { // MSVC has constexpr code generations bugs here. EmptyNodeType() : parent(this) {} #else - constexpr EmptyNodeType(node_type *p) : parent(p) {} + explicit constexpr EmptyNodeType(node_type *p) : parent(p) {} #endif }; @@ -1166,14 +1344,6 @@ class btree { using slot_type = typename Params::slot_type; private: - // For use in copy_or_move_values_in_order. - const value_type &maybe_move_from_iterator(const_iterator it) { return *it; } - value_type &&maybe_move_from_iterator(iterator it) { - // This is a destructive operation on the other container so it's safe for - // us to const_cast and move from the keys here even if it's a set. - return std::move(const_cast(*it)); - } - // Copies or moves (depending on the template parameter) the values in // other into this btree in their order in other. This btree must be empty // before this method is called. This method is used in copy construction, @@ -1186,7 +1356,7 @@ class btree { public: btree(const key_compare &comp, const allocator_type &alloc) - : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} + : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {} btree(const btree &other) : btree(other, other.allocator()) {} btree(const btree &other, const allocator_type &alloc) @@ -1194,10 +1364,10 @@ class btree { copy_or_move_values_in_order(other); } btree(btree &&other) noexcept - : root_(std::move(other.root_)), - rightmost_(absl::exchange(other.rightmost_, EmptyNode())), - size_(absl::exchange(other.size_, 0)) { - other.mutable_root() = EmptyNode(); + : root_(absl::exchange(other.root_, EmptyNode())), + rightmost_(std::move(other.rightmost_)), + size_(absl::exchange(other.size_, 0u)) { + other.mutable_rightmost() = EmptyNode(); } btree(btree &&other, const allocator_type &alloc) : btree(other.key_comp(), alloc) { @@ -1222,9 +1392,9 @@ class btree { iterator begin() { return iterator(leftmost()); } const_iterator begin() const { return const_iterator(leftmost()); } - iterator end() { return iterator(rightmost_, rightmost_->finish()); } + iterator end() { return iterator(rightmost(), rightmost()->finish()); } const_iterator end() const { - return const_iterator(rightmost_, rightmost_->finish()); + return const_iterator(rightmost(), rightmost()->finish()); } reverse_iterator rbegin() { return reverse_iterator(end()); } const_reverse_iterator rbegin() const { @@ -1275,7 +1445,7 @@ class btree { // Requirement: if `key` already exists in the btree, does not consume `args`. // Requirement: `key` is never referenced after consuming `args`. template - std::pair insert_unique(const K &key, Args &&... args); + std::pair insert_unique(const K &key, Args &&...args); // Inserts with hint. Checks to see if the value should be placed immediately // before `position` in the tree. If so, then the insertion will take @@ -1284,9 +1454,8 @@ class btree { // Requirement: if `key` already exists in the btree, does not consume `args`. // Requirement: `key` is never referenced after consuming `args`. template - std::pair insert_hint_unique(iterator position, - const K &key, - Args &&... args); + std::pair insert_hint_unique(iterator position, const K &key, + Args &&...args); // Insert a range of values into the btree. // Note: the first overload avoids constructing a value_type if the key @@ -1350,7 +1519,7 @@ class btree { void swap(btree &other); const key_compare &key_comp() const noexcept { - return root_.template get<0>(); + return rightmost_.template get<0>(); } template bool compare_keys(const K1 &a, const K2 &b) const { @@ -1397,6 +1566,7 @@ class btree { } // The total number of bytes used by the btree. + // TODO(b/169338300): update to support node_btree_*. size_type bytes_used() const { node_stats stats = internal_stats(root()); if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { @@ -1412,8 +1582,7 @@ class btree { static double average_bytes_per_value() { // The expected number of values per node with random insertion order is the // average of the maximum and minimum numbers of values per node. - const double expected_values_per_node = - (kNodeSlots + kMinNodeValues) / 2.0; + const double expected_values_per_node = (kNodeSlots + kMinNodeValues) / 2.0; return node_type::LeafSize() / expected_values_per_node; } @@ -1440,11 +1609,20 @@ class btree { allocator_type get_allocator() const { return allocator(); } private: + friend struct btree_access; + // Internal accessor routines. - node_type *root() { return root_.template get<2>(); } - const node_type *root() const { return root_.template get<2>(); } - node_type *&mutable_root() noexcept { return root_.template get<2>(); } - key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); } + node_type *root() { return root_; } + const node_type *root() const { return root_; } + node_type *&mutable_root() noexcept { return root_; } + node_type *rightmost() { return rightmost_.template get<2>(); } + const node_type *rightmost() const { return rightmost_.template get<2>(); } + node_type *&mutable_rightmost() noexcept { + return rightmost_.template get<2>(); + } + key_compare *mutable_key_comp() noexcept { + return &rightmost_.template get<0>(); + } // The leftmost node is stored as the parent of the root node. node_type *leftmost() { return root()->parent(); } @@ -1452,15 +1630,15 @@ class btree { // Allocator routines. allocator_type *mutable_allocator() noexcept { - return &root_.template get<1>(); + return &rightmost_.template get<1>(); } const allocator_type &allocator() const noexcept { - return root_.template get<1>(); + return rightmost_.template get<1>(); } // Allocates a correctly aligned node of at least size bytes using the // allocator. - node_type *allocate(const size_type size) { + node_type *allocate(size_type size) { return reinterpret_cast( absl::container_internal::Allocate( mutable_allocator(), size)); @@ -1474,12 +1652,12 @@ class btree { } node_type *new_leaf_node(node_type *parent) { node_type *n = allocate(node_type::LeafSize()); - n->init_leaf(parent, kNodeSlots); + n->init_leaf(kNodeSlots, parent); return n; } - node_type *new_leaf_root_node(const int max_count) { + node_type *new_leaf_root_node(field_type max_count) { node_type *n = allocate(node_type::LeafSize(max_count)); - n->init_leaf(/*parent=*/n, max_count); + n->init_leaf(max_count, /*parent=*/n); return n; } @@ -1503,22 +1681,21 @@ class btree { void try_shrink(); iterator internal_end(iterator iter) { - return iter.node != nullptr ? iter : end(); + return iter.node_ != nullptr ? iter : end(); } const_iterator internal_end(const_iterator iter) const { - return iter.node != nullptr ? iter : end(); + return iter.node_ != nullptr ? iter : end(); } // Emplaces a value into the btree immediately before iter. Requires that // key(v) <= iter.key() and (--iter).key() <= key(v). template - iterator internal_emplace(iterator iter, Args &&... args); + iterator internal_emplace(iterator iter, Args &&...args); // Returns an iterator pointing to the first value >= the value "iter" is // pointing at. Note that "iter" might be pointing to an invalid location such - // as iter.position == iter.node->finish(). This routine simply moves iter up - // in the tree to a valid location. - // Requires: iter.node is non-null. + // as iter.position_ == iter.node_->finish(). This routine simply moves iter + // up in the tree to a valid location. Requires: iter.node_ is non-null. template static IterType internal_last(IterType iter); @@ -1546,15 +1723,15 @@ class btree { iterator internal_find(const K &key) const; // Verifies the tree structure of node. - int internal_verify(const node_type *node, const key_type *lo, - const key_type *hi) const; + size_type internal_verify(const node_type *node, const key_type *lo, + const key_type *hi) const; node_stats internal_stats(const node_type *node) const { // The root can be a static empty node. if (node == nullptr || (node == root() && empty())) { return node_stats(0, 0); } - if (node->leaf()) { + if (node->is_leaf()) { return node_stats(1, 0); } node_stats res(0, 1); @@ -1564,15 +1741,14 @@ class btree { return res; } - // We use compressed tuple in order to save space because key_compare and - // allocator_type are usually empty. - absl::container_internal::CompressedTuple - root_; + node_type *root_; // A pointer to the rightmost node. Note that the leftmost node is stored as - // the root's parent. - node_type *rightmost_; + // the root's parent. We use compressed tuple in order to save space because + // key_compare and allocator_type are usually empty. + absl::container_internal::CompressedTuple + rightmost_; // Number of values. size_type size_; @@ -1582,9 +1758,9 @@ class btree { // btree_node methods template template -inline void btree_node

::emplace_value(const size_type i, +inline void btree_node

::emplace_value(const field_type i, allocator_type *alloc, - Args &&... args) { + Args &&...args) { assert(i >= start()); assert(i <= finish()); // Shift old values to create space for new value and then construct it in @@ -1593,11 +1769,11 @@ inline void btree_node

::emplace_value(const size_type i, transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, alloc); } - value_init(i, alloc, std::forward(args)...); + value_init(static_cast(i), alloc, std::forward(args)...); set_finish(finish() + 1); - if (!leaf() && finish() > i + 1) { - for (int j = finish(); j > i + 1; --j) { + if (is_internal() && finish() > i + 1) { + for (field_type j = finish(); j > i + 1; --j) { set_child(j, child(j - 1)); } clear_child(i + 1); @@ -1614,13 +1790,13 @@ inline void btree_node

::remove_values(const field_type i, const field_type src_i = i + to_erase; transfer_n(orig_finish - src_i, i, src_i, this, alloc); - if (!leaf()) { + if (is_internal()) { // Delete all children between begin and end. - for (int j = 0; j < to_erase; ++j) { + for (field_type j = 0; j < to_erase; ++j) { clear_and_delete(child(i + j + 1), alloc); } // Rotate children after end into new positions. - for (int j = i + to_erase + 1; j <= orig_finish; ++j) { + for (field_type j = i + to_erase + 1; j <= orig_finish; ++j) { set_child(j - to_erase, child(j)); clear_child(j); } @@ -1629,7 +1805,7 @@ inline void btree_node

::remove_values(const field_type i, } template -void btree_node

::rebalance_right_to_left(const int to_move, +void btree_node

::rebalance_right_to_left(field_type to_move, btree_node *right, allocator_type *alloc) { assert(parent() == right->parent()); @@ -1651,12 +1827,12 @@ void btree_node

::rebalance_right_to_left(const int to_move, right->transfer_n(right->count() - to_move, right->start(), right->start() + to_move, right, alloc); - if (!leaf()) { + if (is_internal()) { // Move the child pointers from the right to the left node. - for (int i = 0; i < to_move; ++i) { + for (field_type i = 0; i < to_move; ++i) { init_child(finish() + i + 1, right->child(i)); } - for (int i = right->start(); i <= right->finish() - to_move; ++i) { + for (field_type i = right->start(); i <= right->finish() - to_move; ++i) { assert(i + to_move <= right->max_count()); right->init_child(i, right->child(i + to_move)); right->clear_child(i + to_move); @@ -1669,7 +1845,7 @@ void btree_node

::rebalance_right_to_left(const int to_move, } template -void btree_node

::rebalance_left_to_right(const int to_move, +void btree_node

::rebalance_left_to_right(field_type to_move, btree_node *right, allocator_type *alloc) { assert(parent() == right->parent()); @@ -1698,13 +1874,13 @@ void btree_node

::rebalance_left_to_right(const int to_move, // 4) Move the new delimiting value to the parent from the left node. parent()->transfer(position(), finish() - to_move, this, alloc); - if (!leaf()) { + if (is_internal()) { // Move the child pointers from the left to the right node. - for (int i = right->finish(); i >= right->start(); --i) { - right->init_child(i + to_move, right->child(i)); - right->clear_child(i); + for (field_type i = right->finish() + 1; i > right->start(); --i) { + right->init_child(i - 1 + to_move, right->child(i - 1)); + right->clear_child(i - 1); } - for (int i = 1; i <= to_move; ++i) { + for (field_type i = 1; i <= to_move; ++i) { right->init_child(i - 1, child(finish() - to_move + i)); clear_child(finish() - to_move + i); } @@ -1744,8 +1920,8 @@ void btree_node

::split(const int insert_position, btree_node *dest, value_destroy(finish(), alloc); parent()->init_child(position() + 1, dest); - if (!leaf()) { - for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); + if (is_internal()) { + for (field_type i = dest->start(), j = finish() + 1; i <= dest->finish(); ++i, ++j) { assert(child(j) != nullptr); dest->init_child(i, child(j)); @@ -1765,9 +1941,10 @@ void btree_node

::merge(btree_node *src, allocator_type *alloc) { // Move the values from the right to the left node. transfer_n(src->count(), finish() + 1, src->start(), src, alloc); - if (!leaf()) { + if (is_internal()) { // Move the child pointers from the right to the left node. - for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) { + for (field_type i = src->start(), j = finish() + 1; i <= src->finish(); + ++i, ++j) { init_child(j, src->child(i)); src->clear_child(i); } @@ -1783,7 +1960,7 @@ void btree_node

::merge(btree_node *src, allocator_type *alloc) { template void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { - if (node->leaf()) { + if (node->is_leaf()) { node->value_destroy_n(node->start(), node->count(), alloc); deallocate(LeafSize(node->max_count()), node, alloc); return; @@ -1797,24 +1974,35 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { btree_node *delete_root_parent = node->parent(); // Navigate to the leftmost leaf under node, and then delete upwards. - while (!node->leaf()) node = node->start_child(); - // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which - // isn't guaranteed to be a valid `field_type`. - int pos = node->position(); + while (node->is_internal()) node = node->start_child(); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // When generations are enabled, we delete the leftmost leaf last in case it's + // the parent of the root and we need to check whether it's a leaf before we + // can update the root's generation. + // TODO(ezb): if we change btree_node::is_root to check a bool inside the node + // instead of checking whether the parent is a leaf, we can remove this logic. + btree_node *leftmost_leaf = node; +#endif + // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`, + // which isn't guaranteed to be a valid `field_type`. + size_type pos = node->position(); btree_node *parent = node->parent(); for (;;) { // In each iteration of the next loop, we delete one leaf node and go right. assert(pos <= parent->finish()); do { - node = parent->child(pos); - if (!node->leaf()) { + node = parent->child(static_cast(pos)); + if (node->is_internal()) { // Navigate to the leftmost leaf under node. - while (!node->leaf()) node = node->start_child(); + while (node->is_internal()) node = node->start_child(); pos = node->position(); parent = node->parent(); } node->value_destroy_n(node->start(), node->count(), alloc); - deallocate(LeafSize(node->max_count()), node, alloc); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (leftmost_leaf != node) +#endif + deallocate(LeafSize(node->max_count()), node, alloc); ++pos; } while (pos <= parent->finish()); @@ -1826,7 +2014,12 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { parent = node->parent(); node->value_destroy_n(node->start(), node->count(), alloc); deallocate(InternalSize(), node, alloc); - if (parent == delete_root_parent) return; + if (parent == delete_root_parent) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); +#endif + return; + } ++pos; } while (pos > parent->finish()); } @@ -1834,51 +2027,109 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { //// // btree_iterator methods + +// Note: the implementation here is based on btree_node::clear_and_delete. +template +auto btree_iterator::distance_slow(const_iterator other) const + -> difference_type { + const_iterator begin = other; + const_iterator end = *this; + assert(begin.node_ != end.node_ || !begin.node_->is_leaf() || + begin.position_ != end.position_); + + const node_type *node = begin.node_; + // We need to compensate for double counting if begin.node_ is a leaf node. + difference_type count = node->is_leaf() ? -begin.position_ : 0; + + // First navigate to the leftmost leaf node past begin. + if (node->is_internal()) { + ++count; + node = node->child(begin.position_ + 1); + } + while (node->is_internal()) node = node->start_child(); + + // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`, + // which isn't guaranteed to be a valid `field_type`. + size_type pos = node->position(); + const node_type *parent = node->parent(); + for (;;) { + // In each iteration of the next loop, we count one leaf node and go right. + assert(pos <= parent->finish()); + do { + node = parent->child(static_cast(pos)); + if (node->is_internal()) { + // Navigate to the leftmost leaf under node. + while (node->is_internal()) node = node->start_child(); + pos = node->position(); + parent = node->parent(); + } + if (node == end.node_) return count + end.position_; + if (parent == end.node_ && pos == static_cast(end.position_)) + return count + node->count(); + // +1 is for the next internal node value. + count += node->count() + 1; + ++pos; + } while (pos <= parent->finish()); + + // Once we've counted all children of parent, go up/right. + assert(pos > parent->finish()); + do { + node = parent; + pos = node->position(); + parent = node->parent(); + // -1 because we counted the value at end and shouldn't. + if (parent == end.node_ && pos == static_cast(end.position_)) + return count - 1; + ++pos; + } while (pos > parent->finish()); + } +} + template void btree_iterator::increment_slow() { - if (node->leaf()) { - assert(position >= node->finish()); + if (node_->is_leaf()) { + assert(position_ >= node_->finish()); btree_iterator save(*this); - while (position == node->finish() && !node->is_root()) { - assert(node->parent()->child(node->position()) == node); - position = node->position(); - node = node->parent(); + while (position_ == node_->finish() && !node_->is_root()) { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position(); + node_ = node_->parent(); } // TODO(ezb): assert we aren't incrementing end() instead of handling. - if (position == node->finish()) { + if (position_ == node_->finish()) { *this = save; } } else { - assert(position < node->finish()); - node = node->child(position + 1); - while (!node->leaf()) { - node = node->start_child(); + assert(position_ < node_->finish()); + node_ = node_->child(static_cast(position_ + 1)); + while (node_->is_internal()) { + node_ = node_->start_child(); } - position = node->start(); + position_ = node_->start(); } } template void btree_iterator::decrement_slow() { - if (node->leaf()) { - assert(position <= -1); + if (node_->is_leaf()) { + assert(position_ <= -1); btree_iterator save(*this); - while (position < node->start() && !node->is_root()) { - assert(node->parent()->child(node->position()) == node); - position = node->position() - 1; - node = node->parent(); + while (position_ < node_->start() && !node_->is_root()) { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position() - 1; + node_ = node_->parent(); } // TODO(ezb): assert we aren't decrementing begin() instead of handling. - if (position < node->start()) { + if (position_ < node_->start()) { *this = save; } } else { - assert(position >= node->start()); - node = node->child(position); - while (!node->leaf()) { - node = node->child(node->finish()); + assert(position_ >= node_->start()); + node_ = node_->child(static_cast(position_)); + while (node_->is_internal()) { + node_ = node_->child(node_->finish()); } - position = node->finish() - 1; + position_ = node_->finish() - 1; } } @@ -1896,12 +2147,12 @@ void btree

::copy_or_move_values_in_order(Btree &other) { // values is the same order we'll store them in. auto iter = other.begin(); if (iter == other.end()) return; - insert_multi(maybe_move_from_iterator(iter)); + insert_multi(iter.slot()); ++iter; for (; iter != other.end(); ++iter) { // If the btree is not empty, we can just insert the new value at the end // of the tree. - internal_emplace(end(), maybe_move_from_iterator(iter)); + internal_emplace(end(), iter.slot()); } } @@ -1921,15 +2172,12 @@ constexpr bool btree

::static_assert_validation() { "target node size too large"); // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. - using compare_result_type = - absl::result_of_t; static_assert( - std::is_same::value || - std::is_convertible::value, + compare_has_valid_result_type(), "key comparison function must return absl::{weak,strong}_ordering or " "bool."); - // Test the assumption made in setting kNodeValueSpace. + // Test the assumption made in setting kNodeSlotSpace. static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, "node space assumption incorrect"); @@ -1980,10 +2228,10 @@ auto btree

::equal_range(const K &key) -> std::pair { template template -auto btree

::insert_unique(const K &key, Args &&... args) +auto btree

::insert_unique(const K &key, Args &&...args) -> std::pair { if (empty()) { - mutable_root() = rightmost_ = new_leaf_root_node(1); + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); } SearchResult res = internal_locate(key); @@ -1996,7 +2244,7 @@ auto btree

::insert_unique(const K &key, Args &&... args) } } else { iterator last = internal_last(iter); - if (last.node && !compare_keys(key, last.key())) { + if (last.node_ && !compare_keys(key, last.key())) { // The key already exists in the tree, do nothing. return {last, false}; } @@ -2007,7 +2255,7 @@ auto btree

::insert_unique(const K &key, Args &&... args) template template inline auto btree

::insert_hint_unique(iterator position, const K &key, - Args &&... args) + Args &&...args) -> std::pair { if (!empty()) { if (position == end() || compare_keys(key, position.key())) { @@ -2041,8 +2289,11 @@ template template void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) { for (; b != e; ++b) { - init_type value(*b); - insert_hint_unique(end(), params_type::key(value), std::move(value)); + // Use a node handle to manage a temp slot. + auto node_handle = + CommonAccess::Construct(get_allocator(), *b); + slot_type *slot = CommonAccess::GetSlot(node_handle); + insert_hint_unique(end(), params_type::key(slot), slot); } } @@ -2050,11 +2301,11 @@ template template auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { if (empty()) { - mutable_root() = rightmost_ = new_leaf_root_node(1); + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); } iterator iter = internal_upper_bound(key); - if (iter.node == nullptr) { + if (iter.node_ == nullptr) { iter = end(); } return internal_emplace(iter, std::forward(v)); @@ -2114,15 +2365,15 @@ auto btree

::operator=(btree &&other) noexcept -> btree & { using std::swap; if (absl::allocator_traits< allocator_type>::propagate_on_container_copy_assignment::value) { - // Note: `root_` also contains the allocator and the key comparator. swap(root_, other.root_); + // Note: `rightmost_` also contains the allocator and the key comparator. swap(rightmost_, other.rightmost_); swap(size_, other.size_); } else { if (allocator() == other.allocator()) { swap(mutable_root(), other.mutable_root()); swap(*mutable_key_comp(), *other.mutable_key_comp()); - swap(rightmost_, other.rightmost_); + swap(mutable_rightmost(), other.mutable_rightmost()); swap(size_, other.size_); } else { // We aren't allowed to propagate the allocator and the allocator is @@ -2140,22 +2391,34 @@ auto btree

::operator=(btree &&other) noexcept -> btree & { template auto btree

::erase(iterator iter) -> iterator { - bool internal_delete = false; - if (!iter.node->leaf()) { - // Deletion of a value on an internal node. First, move the largest value - // from our left child here, then delete that position (in remove_values() - // below). We can get to the largest value from our left child by - // decrementing iter. + iter.node_->value_destroy(static_cast(iter.position_), + mutable_allocator()); + iter.update_generation(); + + const bool internal_delete = iter.node_->is_internal(); + if (internal_delete) { + // Deletion of a value on an internal node. First, transfer the largest + // value from our left child here, then erase/rebalance from that position. + // We can get to the largest value from our left child by decrementing iter. iterator internal_iter(iter); --iter; - assert(iter.node->leaf()); - params_type::move(mutable_allocator(), iter.node->slot(iter.position), - internal_iter.node->slot(internal_iter.position)); - internal_delete = true; + assert(iter.node_->is_leaf()); + internal_iter.node_->transfer( + static_cast(internal_iter.position_), + static_cast(iter.position_), iter.node_, + mutable_allocator()); + } else { + // Shift values after erased position in leaf. In the internal case, we + // don't need to do this because the leaf position is the end of the node. + const field_type transfer_from = + static_cast(iter.position_ + 1); + const field_type num_to_transfer = iter.node_->finish() - transfer_from; + iter.node_->transfer_n(num_to_transfer, + static_cast(iter.position_), + transfer_from, iter.node_, mutable_allocator()); } - - // Delete the key from the leaf. - iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator()); + // Update node finish and container size. + iter.node_->set_finish(iter.node_->finish() - 1); --size_; // We want to return the next value after the one we just erased. If we @@ -2163,7 +2426,7 @@ auto btree

::erase(iterator iter) -> iterator { // value is ++(++iter). If we erased from a leaf node (internal_delete == // false) then the next value is ++iter. Note that ++iter may point to an // internal node and the value in the internal node may move to a leaf node - // (iter.node) when rebalancing is performed at the leaf level. + // (iter.node_) when rebalancing is performed at the leaf level. iterator res = rebalance_after_delete(iter); @@ -2180,14 +2443,14 @@ auto btree

::rebalance_after_delete(iterator iter) -> iterator { iterator res(iter); bool first_iteration = true; for (;;) { - if (iter.node == root()) { + if (iter.node_ == root()) { try_shrink(); if (empty()) { return end(); } break; } - if (iter.node->count() >= kMinNodeValues) { + if (iter.node_->count() >= kMinNodeValues) { break; } bool merged = try_merge_or_rebalance(&iter); @@ -2200,14 +2463,15 @@ auto btree

::rebalance_after_delete(iterator iter) -> iterator { if (!merged) { break; } - iter.position = iter.node->position(); - iter.node = iter.node->parent(); + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); } + res.update_generation(); // Adjust our return value. If we're pointing at the end of a node, advance // the iterator. - if (res.position == res.node->finish()) { - res.position = res.node->finish() - 1; + if (res.position_ == res.node_->finish()) { + res.position_ = res.node_->finish() - 1; ++res; } @@ -2217,40 +2481,45 @@ auto btree

::rebalance_after_delete(iterator iter) -> iterator { template auto btree

::erase_range(iterator begin, iterator end) -> std::pair { - difference_type count = std::distance(begin, end); + size_type count = static_cast(end - begin); assert(count >= 0); if (count == 0) { return {0, begin}; } - if (count == size_) { + if (static_cast(count) == size_) { clear(); return {count, this->end()}; } - if (begin.node == end.node) { - assert(end.position > begin.position); - begin.node->remove_values(begin.position, end.position - begin.position, - mutable_allocator()); + if (begin.node_ == end.node_) { + assert(end.position_ > begin.position_); + begin.node_->remove_values( + static_cast(begin.position_), + static_cast(end.position_ - begin.position_), + mutable_allocator()); size_ -= count; return {count, rebalance_after_delete(begin)}; } const size_type target_size = size_ - count; while (size_ > target_size) { - if (begin.node->leaf()) { + if (begin.node_->is_leaf()) { const size_type remaining_to_erase = size_ - target_size; - const size_type remaining_in_node = begin.node->finish() - begin.position; - const size_type to_erase = - (std::min)(remaining_to_erase, remaining_in_node); - begin.node->remove_values(begin.position, to_erase, mutable_allocator()); + const size_type remaining_in_node = + static_cast(begin.node_->finish() - begin.position_); + const field_type to_erase = static_cast( + (std::min)(remaining_to_erase, remaining_in_node)); + begin.node_->remove_values(static_cast(begin.position_), + to_erase, mutable_allocator()); size_ -= to_erase; begin = rebalance_after_delete(begin); } else { begin = erase(begin); } } + begin.update_generation(); return {count, begin}; } @@ -2259,8 +2528,7 @@ void btree

::clear() { if (!empty()) { node_type::clear_and_delete(root(), mutable_allocator()); } - mutable_root() = EmptyNode(); - rightmost_ = EmptyNode(); + mutable_root() = mutable_rightmost() = EmptyNode(); size_ = 0; } @@ -2269,15 +2537,15 @@ void btree

::swap(btree &other) { using std::swap; if (absl::allocator_traits< allocator_type>::propagate_on_container_swap::value) { - // Note: `root_` also contains the allocator and the key comparator. - swap(root_, other.root_); + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); } else { // It's undefined behavior if the allocators are unequal here. assert(allocator() == other.allocator()); - swap(mutable_root(), other.mutable_root()); + swap(mutable_rightmost(), other.mutable_rightmost()); swap(*mutable_key_comp(), *other.mutable_key_comp()); } - swap(rightmost_, other.rightmost_); + swap(mutable_root(), other.mutable_root()); swap(size_, other.size_); } @@ -2285,18 +2553,18 @@ template void btree

::verify() const { assert(root() != nullptr); assert(leftmost() != nullptr); - assert(rightmost_ != nullptr); + assert(rightmost() != nullptr); assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); - assert(leftmost() == (++const_iterator(root(), -1)).node); - assert(rightmost_ == (--const_iterator(root(), root()->finish())).node); - assert(leftmost()->leaf()); - assert(rightmost_->leaf()); + assert(leftmost() == (++const_iterator(root(), -1)).node_); + assert(rightmost() == (--const_iterator(root(), root()->finish())).node_); + assert(leftmost()->is_leaf()); + assert(rightmost()->is_leaf()); } template void btree

::rebalance_or_split(iterator *iter) { - node_type *&node = iter->node; - int &insert_position = iter->position; + node_type *&node = iter->node_; + int &insert_position = iter->position_; assert(node->count() == node->max_count()); assert(kNodeSlots == node->max_count()); @@ -2311,16 +2579,19 @@ void btree

::rebalance_or_split(iterator *iter) { // We bias rebalancing based on the position being inserted. If we're // inserting at the end of the right node then we bias rebalancing to // fill up the left node. - int to_move = (kNodeSlots - left->count()) / - (1 + (insert_position < static_cast(kNodeSlots))); - to_move = (std::max)(1, to_move); + field_type to_move = + (kNodeSlots - left->count()) / + (1 + (static_cast(insert_position) < kNodeSlots)); + to_move = (std::max)(field_type{1}, to_move); - if (insert_position - to_move >= node->start() || - left->count() + to_move < static_cast(kNodeSlots)) { + if (static_cast(insert_position) - to_move >= + node->start() || + left->count() + to_move < kNodeSlots) { left->rebalance_right_to_left(to_move, node, mutable_allocator()); assert(node->max_count() - node->count() == to_move); - insert_position = insert_position - to_move; + insert_position = static_cast( + static_cast(insert_position) - to_move); if (insert_position < node->start()) { insert_position = insert_position + left->count() + 1; node = left; @@ -2340,12 +2611,13 @@ void btree

::rebalance_or_split(iterator *iter) { // We bias rebalancing based on the position being inserted. If we're // inserting at the beginning of the left node then we bias rebalancing // to fill up the right node. - int to_move = (static_cast(kNodeSlots) - right->count()) / - (1 + (insert_position > node->start())); - to_move = (std::max)(1, to_move); + field_type to_move = (kNodeSlots - right->count()) / + (1 + (insert_position > node->start())); + to_move = (std::max)(field_type{1}, to_move); - if (insert_position <= node->finish() - to_move || - right->count() + to_move < static_cast(kNodeSlots)) { + if (static_cast(insert_position) <= + node->finish() - to_move || + right->count() + to_move < kNodeSlots) { node->rebalance_left_to_right(to_move, right, mutable_allocator()); if (insert_position > node->finish()) { @@ -2371,19 +2643,20 @@ void btree

::rebalance_or_split(iterator *iter) { // Create a new root node and set the current root node as the child of the // new root. parent = new_internal_node(parent); + parent->set_generation(root()->generation()); parent->init_child(parent->start(), root()); mutable_root() = parent; // If the former root was a leaf node, then it's now the rightmost node. - assert(!parent->start_child()->leaf() || - parent->start_child() == rightmost_); + assert(parent->start_child()->is_internal() || + parent->start_child() == rightmost()); } // Split the node. node_type *split_node; - if (node->leaf()) { + if (node->is_leaf()) { split_node = new_leaf_node(parent); node->split(insert_position, split_node, mutable_allocator()); - if (rightmost_ == node) rightmost_ = split_node; + if (rightmost() == node) mutable_rightmost() = split_node; } else { split_node = new_internal_node(parent); node->split(insert_position, split_node, mutable_allocator()); @@ -2398,55 +2671,57 @@ void btree

::rebalance_or_split(iterator *iter) { template void btree

::merge_nodes(node_type *left, node_type *right) { left->merge(right, mutable_allocator()); - if (rightmost_ == right) rightmost_ = left; + if (rightmost() == right) mutable_rightmost() = left; } template bool btree

::try_merge_or_rebalance(iterator *iter) { - node_type *parent = iter->node->parent(); - if (iter->node->position() > parent->start()) { + node_type *parent = iter->node_->parent(); + if (iter->node_->position() > parent->start()) { // Try merging with our left sibling. - node_type *left = parent->child(iter->node->position() - 1); + node_type *left = parent->child(iter->node_->position() - 1); assert(left->max_count() == kNodeSlots); - if (1U + left->count() + iter->node->count() <= kNodeSlots) { - iter->position += 1 + left->count(); - merge_nodes(left, iter->node); - iter->node = left; + if (1U + left->count() + iter->node_->count() <= kNodeSlots) { + iter->position_ += 1 + left->count(); + merge_nodes(left, iter->node_); + iter->node_ = left; return true; } } - if (iter->node->position() < parent->finish()) { + if (iter->node_->position() < parent->finish()) { // Try merging with our right sibling. - node_type *right = parent->child(iter->node->position() + 1); + node_type *right = parent->child(iter->node_->position() + 1); assert(right->max_count() == kNodeSlots); - if (1U + iter->node->count() + right->count() <= kNodeSlots) { - merge_nodes(iter->node, right); + if (1U + iter->node_->count() + right->count() <= kNodeSlots) { + merge_nodes(iter->node_, right); return true; } // Try rebalancing with our right sibling. We don't perform rebalancing if - // we deleted the first element from iter->node and the node is not + // we deleted the first element from iter->node_ and the node is not // empty. This is a small optimization for the common pattern of deleting // from the front of the tree. if (right->count() > kMinNodeValues && - (iter->node->count() == 0 || iter->position > iter->node->start())) { - int to_move = (right->count() - iter->node->count()) / 2; - to_move = (std::min)(to_move, right->count() - 1); - iter->node->rebalance_right_to_left(to_move, right, mutable_allocator()); + (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { + field_type to_move = (right->count() - iter->node_->count()) / 2; + to_move = + (std::min)(to_move, static_cast(right->count() - 1)); + iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); return false; } } - if (iter->node->position() > parent->start()) { + if (iter->node_->position() > parent->start()) { // Try rebalancing with our left sibling. We don't perform rebalancing if - // we deleted the last element from iter->node and the node is not + // we deleted the last element from iter->node_ and the node is not // empty. This is a small optimization for the common pattern of deleting // from the back of the tree. - node_type *left = parent->child(iter->node->position() - 1); + node_type *left = parent->child(iter->node_->position() - 1); if (left->count() > kMinNodeValues && - (iter->node->count() == 0 || iter->position < iter->node->finish())) { - int to_move = (left->count() - iter->node->count()) / 2; - to_move = (std::min)(to_move, left->count() - 1); - left->rebalance_left_to_right(to_move, iter->node, mutable_allocator()); - iter->position += to_move; + (iter->node_->count() == 0 || + iter->position_ < iter->node_->finish())) { + field_type to_move = (left->count() - iter->node_->count()) / 2; + to_move = (std::min)(to_move, static_cast(left->count() - 1)); + left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); + iter->position_ += to_move; return false; } } @@ -2460,9 +2735,9 @@ void btree

::try_shrink() { return; } // Deleted the last item on the root node, shrink the height of the tree. - if (orig_root->leaf()) { + if (orig_root->is_leaf()) { assert(size() == 0); - mutable_root() = rightmost_ = EmptyNode(); + mutable_root() = mutable_rightmost() = EmptyNode(); } else { node_type *child = orig_root->start_child(); child->make_root(); @@ -2474,53 +2749,57 @@ void btree

::try_shrink() { template template inline IterType btree

::internal_last(IterType iter) { - assert(iter.node != nullptr); - while (iter.position == iter.node->finish()) { - iter.position = iter.node->position(); - iter.node = iter.node->parent(); - if (iter.node->leaf()) { - iter.node = nullptr; + assert(iter.node_ != nullptr); + while (iter.position_ == iter.node_->finish()) { + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + if (iter.node_->is_leaf()) { + iter.node_ = nullptr; break; } } + iter.update_generation(); return iter; } template template -inline auto btree

::internal_emplace(iterator iter, Args &&... args) +inline auto btree

::internal_emplace(iterator iter, Args &&...args) -> iterator { - if (!iter.node->leaf()) { + if (iter.node_->is_internal()) { // We can't insert on an internal node. Instead, we'll insert after the // previous value which is guaranteed to be on a leaf node. --iter; - ++iter.position; + ++iter.position_; } - const field_type max_count = iter.node->max_count(); + const field_type max_count = iter.node_->max_count(); allocator_type *alloc = mutable_allocator(); - if (iter.node->count() == max_count) { + if (iter.node_->count() == max_count) { // Make room in the leaf for the new item. if (max_count < kNodeSlots) { // Insertion into the root where the root is smaller than the full node // size. Simply grow the size of the root node. - assert(iter.node == root()); - iter.node = - new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); + assert(iter.node_ == root()); + iter.node_ = new_leaf_root_node(static_cast( + (std::min)(static_cast(kNodeSlots), 2 * max_count))); // Transfer the values from the old root to the new root. node_type *old_root = root(); - node_type *new_root = iter.node; + node_type *new_root = iter.node_; new_root->transfer_n(old_root->count(), new_root->start(), old_root->start(), old_root, alloc); new_root->set_finish(old_root->finish()); old_root->set_finish(old_root->start()); + new_root->set_generation(old_root->generation()); node_type::clear_and_delete(old_root, alloc); - mutable_root() = rightmost_ = new_root; + mutable_root() = mutable_rightmost() = new_root; } else { rebalance_or_split(&iter); } } - iter.node->emplace_value(iter.position, alloc, std::forward(args)...); + iter.node_->emplace_value(static_cast(iter.position_), alloc, + std::forward(args)...); ++size_; + iter.update_generation(); return iter; } @@ -2530,9 +2809,9 @@ inline auto btree

::internal_locate(const K &key) const -> SearchResult { iterator iter(const_cast(root())); for (;;) { - SearchResult res = - iter.node->lower_bound(key, key_comp()); - iter.position = res.value; + SearchResult res = + iter.node_->lower_bound(key, key_comp()); + iter.position_ = static_cast(res.value); if (res.IsEq()) { return {iter, MatchKind::kEq}; } @@ -2540,10 +2819,10 @@ inline auto btree

::internal_locate(const K &key) const // down the tree if the keys are equal, but determining equality would // require doing an extra comparison on each node on the way down, and we // will need to go all the way to the leaf node in the expected case. - if (iter.node->leaf()) { + if (iter.node_->is_leaf()) { break; } - iter.node = iter.node->child(iter.position); + iter.node_ = iter.node_->child(static_cast(iter.position_)); } // Note: in the non-key-compare-to case, the key may actually be equivalent // here (and the MatchKind::kNe is ignored). @@ -2560,16 +2839,16 @@ auto btree

::internal_lower_bound(const K &key) const return ret; } iterator iter(const_cast(root())); - SearchResult res; + SearchResult res; bool seen_eq = false; for (;;) { - res = iter.node->lower_bound(key, key_comp()); - iter.position = res.value; - if (iter.node->leaf()) { + res = iter.node_->lower_bound(key, key_comp()); + iter.position_ = static_cast(res.value); + if (iter.node_->is_leaf()) { break; } seen_eq = seen_eq || res.IsEq(); - iter.node = iter.node->child(iter.position); + iter.node_ = iter.node_->child(static_cast(iter.position_)); } if (res.IsEq()) return {iter, MatchKind::kEq}; return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; @@ -2580,11 +2859,11 @@ template auto btree

::internal_upper_bound(const K &key) const -> iterator { iterator iter(const_cast(root())); for (;;) { - iter.position = iter.node->upper_bound(key, key_comp()); - if (iter.node->leaf()) { + iter.position_ = static_cast(iter.node_->upper_bound(key, key_comp())); + if (iter.node_->is_leaf()) { break; } - iter.node = iter.node->child(iter.position); + iter.node_ = iter.node_->child(static_cast(iter.position_)); } return internal_last(iter); } @@ -2599,7 +2878,7 @@ auto btree

::internal_find(const K &key) const -> iterator { } } else { const iterator iter = internal_last(res.value); - if (iter.node != nullptr && !compare_keys(key, iter.key())) { + if (iter.node_ != nullptr && !compare_keys(key, iter.key())) { return iter; } } @@ -2607,8 +2886,8 @@ auto btree

::internal_find(const K &key) const -> iterator { } template -int btree

::internal_verify(const node_type *node, const key_type *lo, - const key_type *hi) const { +typename btree

::size_type btree

::internal_verify( + const node_type *node, const key_type *lo, const key_type *hi) const { assert(node->count() > 0); assert(node->count() <= node->max_count()); if (lo) { @@ -2620,9 +2899,9 @@ int btree

::internal_verify(const node_type *node, const key_type *lo, for (int i = node->start() + 1; i < node->finish(); ++i) { assert(!compare_keys(node->key(i), node->key(i - 1))); } - int count = node->count(); - if (!node->leaf()) { - for (int i = node->start(); i <= node->finish(); ++i) { + size_type count = node->count(); + if (node->is_internal()) { + for (field_type i = node->start(); i <= node->finish(); ++i) { assert(node->child(i) != nullptr); assert(node->child(i)->parent() == node); assert(node->child(i)->position() == i); @@ -2634,6 +2913,50 @@ int btree

::internal_verify(const node_type *node, const key_type *lo, return count; } +struct btree_access { + template + static auto erase_if(BtreeContainer &container, Pred pred) -> + typename BtreeContainer::size_type { + const auto initial_size = container.size(); + auto &tree = container.tree_; + auto *alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) { + if (!pred(*it)) { + ++it; + continue; + } + auto *node = it.node_; + if (node->is_internal()) { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position_; + node->value_destroy(it.position_, alloc); + while (++it.position_ < node->finish()) { + it.update_generation(); + if (pred(*it)) { + node->value_destroy(it.position_, alloc); + } else { + node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position_ = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } +}; + +#undef ABSL_BTREE_ENABLE_GENERATIONS + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/btree_container.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/btree_container.h index a99668c713..2bff11db04 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/btree_container.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/btree_container.h @@ -44,8 +44,8 @@ class btree_container { // transparent case. template using key_arg = - typename KeyArg::value>:: - template type; + typename KeyArg::template type< + K, typename Tree::key_type>; public: using key_type = typename Tree::key_type; @@ -65,6 +65,11 @@ class btree_container { using const_reverse_iterator = typename Tree::const_reverse_iterator; using node_type = typename Tree::node_handle_type; + struct extract_and_get_next_return_type { + node_type node; + iterator next; + }; + // Constructors/assignments. btree_container() : tree_(key_compare(), allocator_type()) {} explicit btree_container(const key_compare &comp, @@ -107,7 +112,7 @@ class btree_container { template size_type count(const key_arg &key) const { auto equal_range = this->equal_range(key); - return std::distance(equal_range.first, equal_range.second); + return equal_range.second - equal_range.first; } template iterator find(const key_arg &key) { @@ -165,10 +170,20 @@ class btree_container { } // Extract routines. + extract_and_get_next_return_type extract_and_get_next( + const_iterator position) { + // Use Construct instead of Transfer because the rebalancing code will + // destroy the slot later. + // Note: we rely on erase() taking place after Construct(). + return {CommonAccess::Construct(get_allocator(), + iterator(position).slot()), + erase(position)}; + } node_type extract(iterator position) { - // Use Move instead of Transfer, because the rebalancing code expects to - // have a valid object to scribble metadata bits on top of. - auto node = CommonAccess::Move(get_allocator(), position.slot()); + // Use Construct instead of Transfer because the rebalancing code will + // destroy the slot later. + auto node = + CommonAccess::Construct(get_allocator(), position.slot()); erase(position); return node; } @@ -228,6 +243,7 @@ class btree_container { } protected: + friend struct btree_access; Tree tree_; }; @@ -290,8 +306,11 @@ class btree_set_container : public btree_container { } template std::pair emplace(Args &&... args) { - init_type v(std::forward(args)...); - return this->tree_.insert_unique(params_type::key(v), std::move(v)); + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + auto *slot = CommonAccess::GetSlot(node); + return this->tree_.insert_unique(params_type::key(slot), slot); } iterator insert(const_iterator hint, const value_type &v) { return this->tree_ @@ -305,9 +324,12 @@ class btree_set_container : public btree_container { } template iterator emplace_hint(const_iterator hint, Args &&... args) { - init_type v(std::forward(args)...); + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + auto *slot = CommonAccess::GetSlot(node); return this->tree_ - .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) + .insert_hint_unique(iterator(hint), params_type::key(slot), slot) .first; } template @@ -536,6 +558,7 @@ class btree_multiset_container : public btree_container { using params_type = typename Tree::params_type; using init_type = typename params_type::init_type; using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; template using key_arg = typename super_type::template key_arg; @@ -596,12 +619,18 @@ class btree_multiset_container : public btree_container { } template iterator emplace(Args &&... args) { - return this->tree_.insert_multi(init_type(std::forward(args)...)); + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + return this->tree_.insert_multi(CommonAccess::GetSlot(node)); } template iterator emplace_hint(const_iterator hint, Args &&... args) { - return this->tree_.insert_hint_multi( - iterator(hint), init_type(std::forward(args)...)); + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + return this->tree_.insert_hint_multi(iterator(hint), + CommonAccess::GetSlot(node)); } iterator insert(node_type &&node) { if (!node) return this->end(); @@ -667,6 +696,7 @@ template class btree_multimap_container : public btree_multiset_container { using super_type = btree_multiset_container; using params_type = typename Tree::params_type; + friend class BtreeNodePeer; public: using mapped_type = typename params_type::mapped_type; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common.h index 030e9d4ab0..9239bb4d09 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_ -#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_ +#ifndef ABSL_CONTAINER_INTERNAL_COMMON_H_ +#define ABSL_CONTAINER_INTERNAL_COMMON_H_ #include #include @@ -84,10 +84,11 @@ class node_handle_base { PolicyTraits::transfer(alloc(), slot(), s); } - struct move_tag_t {}; - node_handle_base(move_tag_t, const allocator_type& a, slot_type* s) + struct construct_tag_t {}; + template + node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) : alloc_(a) { - PolicyTraits::construct(alloc(), slot(), s); + PolicyTraits::construct(alloc(), slot(), std::forward(args)...); } void destroy() { @@ -186,8 +187,8 @@ struct CommonAccess { } template - static T Move(Args&&... args) { - return T(typename T::move_tag_t{}, std::forward(args)...); + static T Construct(Args&&... args) { + return T(typename T::construct_tag_t{}, std::forward(args)...); } }; @@ -203,4 +204,4 @@ struct InsertReturnType { ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_ +#endif // ABSL_CONTAINER_INTERNAL_COMMON_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common_policy_traits.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common_policy_traits.h new file mode 100644 index 0000000000..0fd4866e38 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common_policy_traits.h @@ -0,0 +1,115 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Defines how slots are initialized/destroyed/moved. +template +struct common_policy_traits { + // The actual object stored in the container. + using slot_type = typename Policy::slot_type; + using reference = decltype(Policy::element(std::declval())); + using value_type = typename std::remove_reference::type; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + Policy::construct(alloc, slot, std::forward(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transferred. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { + transfer_impl(alloc, new_slot, old_slot, 0); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + // Note: we use remove_const_t so that the two overloads have different args + // in the case of sets with explicitly const value_types. + template + static auto element(absl::remove_const_t* slot) + -> decltype(P::element(slot)) { + return P::element(slot); + } + template + static auto element(const slot_type* slot) -> decltype(P::element(slot)) { + return P::element(slot); + } + + private: + // Use auto -> decltype as an enabler. + template + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, int) + -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { + P::transfer(alloc, new_slot, old_slot); + } + template + static void transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, char) { +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + if (absl::is_trivially_relocatable()) { + // TODO(b/247130232,b/251814870): remove casts after fixing warnings. + std::memcpy(static_cast( + std::launder(const_cast*>( + &element(new_slot)))), + static_cast(&element(old_slot)), + sizeof(value_type)); + return; + } +#endif + + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common_policy_traits_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common_policy_traits_test.cc new file mode 100644 index 0000000000..5eaa4aae00 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/common_policy_traits_test.cc @@ -0,0 +1,120 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/common_policy_traits.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::MockFunction; +using ::testing::AnyNumber; +using ::testing::ReturnRef; + +using Slot = int; + +struct PolicyWithoutOptionalOps { + using slot_type = Slot; + using key_type = Slot; + using init_type = Slot; + + static std::function construct; + static std::function destroy; + + static std::function element; +}; + +std::function PolicyWithoutOptionalOps::construct; +std::function PolicyWithoutOptionalOps::destroy; + +std::function PolicyWithoutOptionalOps::element; + +struct PolicyWithOptionalOps : PolicyWithoutOptionalOps { + static std::function transfer; +}; + +std::function PolicyWithOptionalOps::transfer; + +struct Test : ::testing::Test { + Test() { + PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) { + construct.Call(a1, a2, std::move(a3)); + }; + PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) { + destroy.Call(a1, a2); + }; + + PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& { + return element.Call(a1); + }; + + PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) { + return transfer.Call(a1, a2, a3); + }; + } + + std::allocator alloc; + int a = 53; + + MockFunction construct; + MockFunction destroy; + + MockFunction element; + + MockFunction transfer; +}; + +TEST_F(Test, construct) { + EXPECT_CALL(construct, Call(&alloc, &a, 53)); + common_policy_traits::construct(&alloc, &a, 53); +} + +TEST_F(Test, destroy) { + EXPECT_CALL(destroy, Call(&alloc, &a)); + common_policy_traits::destroy(&alloc, &a); +} + +TEST_F(Test, element) { + int b = 0; + EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &common_policy_traits::element(&a)); +} + +TEST_F(Test, without_transfer) { + int b = 42; + EXPECT_CALL(element, Call(&a)).Times(AnyNumber()).WillOnce(ReturnRef(a)); + EXPECT_CALL(element, Call(&b)).WillOnce(ReturnRef(b)); + EXPECT_CALL(construct, Call(&alloc, &a, b)).Times(AnyNumber()); + EXPECT_CALL(destroy, Call(&alloc, &b)).Times(AnyNumber()); + common_policy_traits::transfer(&alloc, &a, &b); +} + +TEST_F(Test, with_transfer) { + int b = 42; + EXPECT_CALL(transfer, Call(&alloc, &a, &b)); + common_policy_traits::transfer(&alloc, &a, &b); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc index 62a7483ee3..74111f975e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc @@ -403,6 +403,16 @@ TEST(CompressedTupleTest, EmptyFinalClass) { } #endif +// TODO(b/214288561): enable this test. +TEST(CompressedTupleTest, DISABLED_NestedEbo) { + struct Empty1 {}; + struct Empty2 {}; + CompressedTuple, int> x; + CompressedTuple y; + // Currently fails with sizeof(x) == 8, sizeof(y) == 4. + EXPECT_EQ(sizeof(x), sizeof(y)); +} + } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/container_memory.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/container_memory.h index e67529ecb6..bfa4ff93d7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/container_memory.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/container_memory.h @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -174,7 +175,7 @@ decltype(std::declval()(std::declval())) WithConstructed( // // 2. auto a = PairArgs(args...); // std::pair p(std::piecewise_construct, -// std::move(p.first), std::move(p.second)); +// std::move(a.first), std::move(a.second)); inline std::pair, std::tuple<>> PairArgs() { return {}; } template std::pair, std::tuple> PairArgs(F&& f, S&& s) { @@ -340,7 +341,8 @@ template struct map_slot_policy { using slot_type = map_slot_type; using value_type = std::pair; - using mutable_value_type = std::pair; + using mutable_value_type = + std::pair, absl::remove_const_t>; private: static void emplace(slot_type* slot) { @@ -402,6 +404,15 @@ struct map_slot_policy { } } + // Construct this slot by copying from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, + const slot_type* other) { + emplace(slot); + absl::allocator_traits::construct(*alloc, &slot->value, + other->value); + } + template static void destroy(Allocator* alloc, slot_type* slot) { if (kMutableKeys::value) { @@ -415,6 +426,16 @@ struct map_slot_policy { static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { emplace(new_slot); +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + if (absl::is_trivially_relocatable()) { + // TODO(b/247130232,b/251814870): remove casts after fixing warnings. + std::memcpy(static_cast(std::launder(&new_slot->value)), + static_cast(&old_slot->value), + sizeof(value_type)); + return; + } +#endif + if (kMutableKeys::value) { absl::allocator_traits::construct( *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); @@ -424,33 +445,6 @@ struct map_slot_policy { } destroy(alloc, old_slot); } - - template - static void swap(Allocator* alloc, slot_type* a, slot_type* b) { - if (kMutableKeys::value) { - using std::swap; - swap(a->mutable_value, b->mutable_value); - } else { - value_type tmp = std::move(a->value); - absl::allocator_traits::destroy(*alloc, &a->value); - absl::allocator_traits::construct(*alloc, &a->value, - std::move(b->value)); - absl::allocator_traits::destroy(*alloc, &b->value); - absl::allocator_traits::construct(*alloc, &b->value, - std::move(tmp)); - } - } - - template - static void move(Allocator* alloc, slot_type* src, slot_type* dest) { - if (kMutableKeys::value) { - dest->mutable_value = std::move(src->mutable_value); - } else { - absl::allocator_traits::destroy(*alloc, &dest->value); - absl::allocator_traits::construct(*alloc, &dest->value, - std::move(src->value)); - } - } }; } // namespace container_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/counting_allocator.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/counting_allocator.h index 927cf08255..66068a5a0d 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/counting_allocator.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/counting_allocator.h @@ -80,7 +80,15 @@ class CountingAllocator { template void destroy(U* p) { Allocator allocator; + // Ignore GCC warning bug. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuse-after-free" +#endif AllocatorTraits::destroy(allocator, p); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif if (instance_count_ != nullptr) { *instance_count_ -= 1; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc index 59576b8ede..9f0a4c72ca 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc @@ -310,7 +310,7 @@ struct StringLikeTest : public ::testing::Test { hash_default_hash hash; }; -TYPED_TEST_CASE_P(StringLikeTest); +TYPED_TEST_SUITE_P(StringLikeTest); TYPED_TEST_P(StringLikeTest, Eq) { EXPECT_TRUE(this->eq(this->a1, this->b1)); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h index 46c97b18a2..164ec12316 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h @@ -21,6 +21,7 @@ #include #include +#include "absl/container/internal/common_policy_traits.h" #include "absl/meta/type_traits.h" namespace absl { @@ -29,7 +30,7 @@ namespace container_internal { // Defines how slots are initialized/destroyed/moved. template -struct hash_policy_traits { +struct hash_policy_traits : common_policy_traits { // The type of the keys stored in the hashtable. using key_type = typename Policy::key_type; @@ -87,43 +88,6 @@ struct hash_policy_traits { // Defaults to false if not provided by the policy. using constant_iterators = ConstantIteratorsImpl<>; - // PRECONDITION: `slot` is UNINITIALIZED - // POSTCONDITION: `slot` is INITIALIZED - template - static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { - Policy::construct(alloc, slot, std::forward(args)...); - } - - // PRECONDITION: `slot` is INITIALIZED - // POSTCONDITION: `slot` is UNINITIALIZED - template - static void destroy(Alloc* alloc, slot_type* slot) { - Policy::destroy(alloc, slot); - } - - // Transfers the `old_slot` to `new_slot`. Any memory allocated by the - // allocator inside `old_slot` to `new_slot` can be transferred. - // - // OPTIONAL: defaults to: - // - // clone(new_slot, std::move(*old_slot)); - // destroy(old_slot); - // - // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED - // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is - // UNINITIALIZED - template - static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { - transfer_impl(alloc, new_slot, old_slot, 0); - } - - // PRECONDITION: `slot` is INITIALIZED - // POSTCONDITION: `slot` is INITIALIZED - template - static auto element(slot_type* slot) -> decltype(P::element(slot)) { - return P::element(slot); - } - // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. // // If `slot` is nullptr, returns the constant amount of memory owned by any @@ -174,8 +138,8 @@ struct hash_policy_traits { // Used for node handle manipulation. template static auto mutable_key(slot_type* slot) - -> decltype(P::apply(ReturnKey(), element(slot))) { - return P::apply(ReturnKey(), element(slot)); + -> decltype(P::apply(ReturnKey(), hash_policy_traits::element(slot))) { + return P::apply(ReturnKey(), hash_policy_traits::element(slot)); } // Returns the "value" (as opposed to the "key") portion of the element. Used @@ -184,21 +148,6 @@ struct hash_policy_traits { static auto value(T* elem) -> decltype(P::value(elem)) { return P::value(elem); } - - private: - // Use auto -> decltype as an enabler. - template - static auto transfer_impl(Alloc* alloc, slot_type* new_slot, - slot_type* old_slot, int) - -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { - P::transfer(alloc, new_slot, old_slot); - } - template - static void transfer_impl(Alloc* alloc, slot_type* new_slot, - slot_type* old_slot, char) { - construct(alloc, new_slot, std::move(element(old_slot))); - destroy(alloc, old_slot); - } }; } // namespace container_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc index 6ef8b9e05f..82d7cc3a70 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc @@ -38,81 +38,31 @@ struct PolicyWithoutOptionalOps { using key_type = Slot; using init_type = Slot; - static std::function construct; - static std::function destroy; - static std::function element; static int apply(int v) { return apply_impl(v); } static std::function apply_impl; static std::function value; }; -std::function PolicyWithoutOptionalOps::construct; -std::function PolicyWithoutOptionalOps::destroy; - -std::function PolicyWithoutOptionalOps::element; std::function PolicyWithoutOptionalOps::apply_impl; std::function PolicyWithoutOptionalOps::value; -struct PolicyWithOptionalOps : PolicyWithoutOptionalOps { - static std::function transfer; -}; - -std::function PolicyWithOptionalOps::transfer; - struct Test : ::testing::Test { Test() { - PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) { - construct.Call(a1, a2, std::move(a3)); - }; - PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) { - destroy.Call(a1, a2); - }; - - PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& { - return element.Call(a1); - }; PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int { return apply.Call(a1); }; PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& { return value.Call(a1); }; - - PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) { - return transfer.Call(a1, a2, a3); - }; } std::allocator alloc; int a = 53; - - MockFunction construct; - MockFunction destroy; - - MockFunction element; MockFunction apply; MockFunction value; - - MockFunction transfer; }; -TEST_F(Test, construct) { - EXPECT_CALL(construct, Call(&alloc, &a, 53)); - hash_policy_traits::construct(&alloc, &a, 53); -} - -TEST_F(Test, destroy) { - EXPECT_CALL(destroy, Call(&alloc, &a)); - hash_policy_traits::destroy(&alloc, &a); -} - -TEST_F(Test, element) { - int b = 0; - EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b)); - EXPECT_EQ(&b, &hash_policy_traits::element(&a)); -} - TEST_F(Test, apply) { EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337)); EXPECT_EQ(1337, (hash_policy_traits::apply(42))); @@ -124,20 +74,6 @@ TEST_F(Test, value) { EXPECT_EQ(&b, &hash_policy_traits::value(&a)); } -TEST_F(Test, without_transfer) { - int b = 42; - EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b)); - EXPECT_CALL(construct, Call(&alloc, &a, b)); - EXPECT_CALL(destroy, Call(&alloc, &b)); - hash_policy_traits::transfer(&alloc, &a, &b); -} - -TEST_F(Test, with_transfer) { - int b = 42; - EXPECT_CALL(transfer, Call(&alloc, &a, &b)); - hash_policy_traits::transfer(&alloc, &a, &b); -} - } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc index 4b1337051f..5b8cf341da 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc @@ -21,33 +21,43 @@ #include #include "absl/base/attributes.h" -#include "absl/base/internal/exponential_biased.h" -#include "absl/container/internal/have_sse.h" +#include "absl/base/config.h" #include "absl/debugging/stacktrace.h" #include "absl/memory/memory.h" +#include "absl/profiling/internal/exponential_biased.h" #include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr int HashtablezInfo::kMaxStackDepth; +#endif namespace { ABSL_CONST_INIT std::atomic g_hashtablez_enabled{ false }; ABSL_CONST_INIT std::atomic g_hashtablez_sample_parameter{1 << 10}; +std::atomic g_hashtablez_config_listener{nullptr}; #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased +ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased g_exponential_biased_generator; #endif +void TriggerHashtablezConfigListener() { + auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire); + if (listener != nullptr) listener(); +} + } // namespace #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; +ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) HashtablezSampler& GlobalHashtablezSampler() { @@ -55,10 +65,11 @@ HashtablezSampler& GlobalHashtablezSampler() { return *sampler; } -HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } +HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; -void HashtablezInfo::PrepareForSampling() { +void HashtablezInfo::PrepareForSampling(int64_t stride, + size_t inline_element_size_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); @@ -71,11 +82,13 @@ void HashtablezInfo::PrepareForSampling() { max_reserve.store(0, std::memory_order_relaxed); create_time = absl::Now(); + weight = stride; // The inliner makes hardcoded skip_count difficult (especially when combined // with LTO). We use the ability to exclude stacks by regex when encoding // instead. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); + inline_element_size = inline_element_size_value; } static bool ShouldForceSampling() { @@ -98,21 +111,32 @@ static bool ShouldForceSampling() { return state == kForce; } -HashtablezInfo* SampleSlow(int64_t* next_sample) { +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { - *next_sample = 1; - return GlobalHashtablezSampler().Register(); + next_sample.next_sample = 1; + const int64_t old_stride = exchange(next_sample.sample_stride, 1); + HashtablezInfo* result = + GlobalHashtablezSampler().Register(old_stride, inline_element_size); + return result; } #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - *next_sample = std::numeric_limits::max(); + next_sample = { + std::numeric_limits::max(), + std::numeric_limits::max(), + }; return nullptr; #else - bool first = *next_sample < 0; - *next_sample = g_exponential_biased_generator.GetStride( + bool first = next_sample.next_sample < 0; + + const int64_t next_stride = g_exponential_biased_generator.GetStride( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); + + next_sample.next_sample = next_stride; + const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); // Small values of interval are equivalent to just sampling next time. - ABSL_ASSERT(*next_sample >= 1); + ABSL_ASSERT(next_stride >= 1); // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold // low enough that we will start sampling in a reasonable time, so we just use @@ -122,11 +146,11 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) { // We will only be negative on our first count, so we should just retry in // that case. if (first) { - if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr; - return SampleSlow(next_sample); + if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; + return SampleSlow(next_sample, inline_element_size); } - return GlobalHashtablezSampler().Register(); + return GlobalHashtablezSampler().Register(old_stride, inline_element_size); #endif } @@ -139,7 +163,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, // SwissTables probe in groups of 16, so scale this to count items probes and // not offset from desired. size_t probe_length = distance_from_desired; -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#ifdef ABSL_INTERNAL_HAVE_SSE2 probe_length /= 16; #else probe_length /= 8; @@ -156,11 +180,33 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, info->size.fetch_add(1, std::memory_order_relaxed); } +void SetHashtablezConfigListener(HashtablezConfigListener l) { + g_hashtablez_config_listener.store(l, std::memory_order_release); +} + +bool IsHashtablezEnabled() { + return g_hashtablez_enabled.load(std::memory_order_acquire); +} + void SetHashtablezEnabled(bool enabled) { + SetHashtablezEnabledInternal(enabled); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezEnabledInternal(bool enabled) { g_hashtablez_enabled.store(enabled, std::memory_order_release); } +int32_t GetHashtablezSampleParameter() { + return g_hashtablez_sample_parameter.load(std::memory_order_acquire); +} + void SetHashtablezSampleParameter(int32_t rate) { + SetHashtablezSampleParameterInternal(rate); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezSampleParameterInternal(int32_t rate) { if (rate > 0) { g_hashtablez_sample_parameter.store(rate, std::memory_order_release); } else { @@ -169,12 +215,20 @@ void SetHashtablezSampleParameter(int32_t rate) { } } -void SetHashtablezMaxSamples(int32_t max) { +size_t GetHashtablezMaxSamples() { + return GlobalHashtablezSampler().GetMaxSamples(); +} + +void SetHashtablezMaxSamples(size_t max) { + SetHashtablezMaxSamplesInternal(max); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezMaxSamplesInternal(size_t max) { if (max > 0) { GlobalHashtablezSampler().SetMaxSamples(max); } else { - ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld", - static_cast(max)); // NOLINT(runtime/int) + ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: 0"); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h index 812118e3a9..a89518bb03 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h @@ -44,9 +44,9 @@ #include #include +#include "absl/base/config.h" #include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" -#include "absl/container/internal/have_sse.h" #include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" @@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample { // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. - void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + void PrepareForSampling(int64_t stride, size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be // thread-safe. @@ -84,17 +85,18 @@ struct HashtablezInfo : public profiling_internal::Sample { // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. - // These are guarded by init_mu, but that is not externalized to clients, who - // can only read them during `HashtablezSampler::Iterate` which will hold the - // lock. + // These are guarded by init_mu, but that is not externalized to clients, + // which can read them only during `SampleRecorder::Iterate` which will hold + // the lock. static constexpr int kMaxStackDepth = 64; absl::Time create_time; int32_t depth; void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? }; inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#ifdef ABSL_INTERNAL_HAVE_SSE2 total_probe_length /= 16; #else total_probe_length /= 8; @@ -143,7 +145,15 @@ inline void RecordEraseSlow(HashtablezInfo* info) { std::memory_order_relaxed); } -HashtablezInfo* SampleSlow(int64_t* next_sample); +struct SamplingState { + int64_t next_sample; + // When we make a sampling decision, we record that distance so we can weight + // each sample. + int64_t sample_stride; +}; + +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size); void UnsampleSlow(HashtablezInfo* info); #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) @@ -233,17 +243,19 @@ class HashtablezInfoHandle { #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample; +extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) // Returns an RAII sampling handle that manages registration and unregistation // with the global sampler. -inline HashtablezInfoHandle Sample() { +inline HashtablezInfoHandle Sample( + size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) { + if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { return HashtablezInfoHandle(nullptr); } - return HashtablezInfoHandle(SampleSlow(&global_next_sample)); + return HashtablezInfoHandle( + SampleSlow(global_next_sample, inline_element_size)); #else return HashtablezInfoHandle(nullptr); #endif // !ABSL_PER_THREAD_TLS @@ -255,14 +267,23 @@ using HashtablezSampler = // Returns a global Sampler. HashtablezSampler& GlobalHashtablezSampler(); +using HashtablezConfigListener = void (*)(); +void SetHashtablezConfigListener(HashtablezConfigListener l); + // Enables or disables sampling for Swiss tables. +bool IsHashtablezEnabled(); void SetHashtablezEnabled(bool enabled); +void SetHashtablezEnabledInternal(bool enabled); // Sets the rate at which Swiss tables will be sampled. +int32_t GetHashtablezSampleParameter(); void SetHashtablezSampleParameter(int32_t rate); +void SetHashtablezSampleParameterInternal(int32_t rate); // Sets a soft max for the number of samples that will be kept. -void SetHashtablezMaxSamples(int32_t max); +size_t GetHashtablezMaxSamples(); +void SetHashtablezMaxSamples(size_t max); +void SetHashtablezMaxSamplesInternal(size_t max); // Configuration override. // This allows process-wide sampling without depending on order of diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc index f053c19ba3..665d518fc7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc @@ -21,7 +21,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" -#include "absl/container/internal/have_sse.h" +#include "absl/base/config.h" #include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/internal/thread_pool.h" @@ -30,7 +30,7 @@ #include "absl/time/clock.h" #include "absl/time/time.h" -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#ifdef ABSL_INTERNAL_HAVE_SSE2 constexpr int kProbeLength = 16; #else constexpr int kProbeLength = 8; @@ -70,7 +70,9 @@ std::vector GetSizes(HashtablezSampler* s) { } HashtablezInfo* Register(HashtablezSampler* s, size_t size) { - auto* info = s->Register(); + const int64_t test_stride = 123; + const size_t test_element_size = 17; + auto* info = s->Register(test_stride, test_element_size); assert(info != nullptr); info->size.store(size); return info; @@ -78,9 +80,11 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) { TEST(HashtablezInfoTest, PrepareForSampling) { absl::Time test_start = absl::Now(); + const int64_t test_stride = 123; + const size_t test_element_size = 17; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); @@ -93,6 +97,8 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); + EXPECT_EQ(info.weight, test_stride); + EXPECT_EQ(info.inline_element_size, test_element_size); info.capacity.store(1, std::memory_order_relaxed); info.size.store(1, std::memory_order_relaxed); @@ -105,7 +111,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { info.max_reserve.store(1, std::memory_order_relaxed); info.create_time = test_start - absl::Hours(20); - info.PrepareForSampling(); + info.PrepareForSampling(test_stride * 2, test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -116,13 +122,17 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); + EXPECT_EQ(info.weight, 2 * test_stride); + EXPECT_EQ(info.inline_element_size, test_element_size); EXPECT_GE(info.create_time, test_start); } TEST(HashtablezInfoTest, RecordStorageChanged) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const int64_t test_stride = 21; + const size_t test_element_size = 19; + info.PrepareForSampling(test_stride, test_element_size); RecordStorageChangedSlow(&info, 17, 47); EXPECT_EQ(info.size.load(), 17); EXPECT_EQ(info.capacity.load(), 47); @@ -134,7 +144,9 @@ TEST(HashtablezInfoTest, RecordStorageChanged) { TEST(HashtablezInfoTest, RecordInsert) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const int64_t test_stride = 25; + const size_t test_element_size = 23; + info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.max_probe_length.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); @@ -154,9 +166,11 @@ TEST(HashtablezInfoTest, RecordInsert) { } TEST(HashtablezInfoTest, RecordErase) { + const int64_t test_stride = 31; + const size_t test_element_size = 29; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.size.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); @@ -164,12 +178,15 @@ TEST(HashtablezInfoTest, RecordErase) { RecordEraseSlow(&info); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 1); + EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordRehash) { + const int64_t test_stride = 33; + const size_t test_element_size = 31; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + info.PrepareForSampling(test_stride, test_element_size); RecordInsertSlow(&info, 0x1, 0); RecordInsertSlow(&info, 0x2, kProbeLength); RecordInsertSlow(&info, 0x4, kProbeLength); @@ -188,12 +205,15 @@ TEST(HashtablezInfoTest, RecordRehash) { EXPECT_EQ(info.total_probe_length.load(), 3); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 1); + EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordReservation) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const int64_t test_stride = 35; + const size_t test_element_size = 33; + info.PrepareForSampling(test_stride, test_element_size); RecordReservationSlow(&info, 3); EXPECT_EQ(info.max_reserve.load(), 3); @@ -208,39 +228,44 @@ TEST(HashtablezInfoTest, RecordReservation) { #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) TEST(HashtablezSamplerTest, SmallSampleParameter) { + const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); for (int i = 0; i < 1000; ++i) { - int64_t next_sample = 0; - HashtablezInfo* sample = SampleSlow(&next_sample); - EXPECT_GT(next_sample, 0); + SamplingState next_sample = {0, 0}; + HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); + EXPECT_GT(next_sample.next_sample, 0); + EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); EXPECT_NE(sample, nullptr); UnsampleSlow(sample); } } TEST(HashtablezSamplerTest, LargeSampleParameter) { + const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(std::numeric_limits::max()); for (int i = 0; i < 1000; ++i) { - int64_t next_sample = 0; - HashtablezInfo* sample = SampleSlow(&next_sample); - EXPECT_GT(next_sample, 0); + SamplingState next_sample = {0, 0}; + HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); + EXPECT_GT(next_sample.next_sample, 0); + EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); EXPECT_NE(sample, nullptr); UnsampleSlow(sample); } } TEST(HashtablezSamplerTest, Sample) { + const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); int64_t num_sampled = 0; int64_t total = 0; double sample_rate = 0.0; for (int i = 0; i < 1000000; ++i) { - HashtablezInfoHandle h = Sample(); + HashtablezInfoHandle h = Sample(test_element_size); ++total; if (HashtablezInfoHandlePeer::IsSampled(h)) { ++num_sampled; @@ -253,13 +278,16 @@ TEST(HashtablezSamplerTest, Sample) { TEST(HashtablezSamplerTest, Handle) { auto& sampler = GlobalHashtablezSampler(); - HashtablezInfoHandle h(sampler.Register()); + const int64_t test_stride = 41; + const size_t test_element_size = 39; + HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size)); auto* info = HashtablezInfoHandlePeer::GetInfo(&h); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); bool found = false; sampler.Iterate([&](const HashtablezInfo& h) { if (&h == info) { + EXPECT_EQ(h.weight, test_stride); EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678); found = true; } @@ -325,18 +353,20 @@ TEST(HashtablezSamplerTest, MultiThreaded) { ThreadPool pool(10); for (int i = 0; i < 10; ++i) { - pool.Schedule([&sampler, &stop]() { + const int64_t sampling_stride = 11 + i % 3; + const size_t elt_size = 10 + i % 2; + pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() { std::random_device rd; std::mt19937 gen(rd()); std::vector infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(sampling_stride, elt_size)); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(sampling_stride, elt_size)); break; } case 1: { @@ -345,6 +375,7 @@ TEST(HashtablezSamplerTest, MultiThreaded) { HashtablezInfo* info = infoz[p]; infoz[p] = infoz.back(); infoz.pop_back(); + EXPECT_EQ(info->weight, sampling_stride); sampler.Unregister(info); break; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/have_sse.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/have_sse.h deleted file mode 100644 index e75e1a16d3..0000000000 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/have_sse.h +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Shared config probing for SSE instructions used in Swiss tables. -#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ -#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ - -#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 -#if defined(__SSE2__) || \ - (defined(_MSC_VER) && \ - (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2))) -#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1 -#else -#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0 -#endif -#endif - -#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 -#ifdef __SSSE3__ -#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1 -#else -#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0 -#endif -#endif - -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \ - !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 -#error "Bad configuration!" -#endif - -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 -#include -#endif - -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 -#include -#endif - -#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/inlined_vector.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/inlined_vector.h index 1cfba9b218..0398f53008 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/inlined_vector.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/inlined_vector.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ -#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ +#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ +#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ #include #include @@ -21,8 +21,11 @@ #include #include #include +#include +#include #include +#include "absl/base/attributes.h" #include "absl/base/macros.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" @@ -37,7 +40,6 @@ namespace inlined_vector_internal { #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" -#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif template @@ -81,6 +83,11 @@ using IsMemcpyOk = absl::is_trivially_copy_assignable>, absl::is_trivially_destructible>>; +template +using IsMoveAssignOk = std::is_move_assignable>; +template +using IsSwapOk = absl::type_traits_internal::IsSwappable>; + template struct TypeIdentity { using type = T; @@ -91,33 +98,51 @@ struct TypeIdentity { template using NoTypeDeduction = typename TypeIdentity::type; +template >::value> +struct DestroyAdapter; + template -void DestroyElements(NoTypeDeduction& allocator, Pointer destroy_first, - SizeType destroy_size) { - if (destroy_first != nullptr) { - for (auto i = destroy_size; i != 0;) { +struct DestroyAdapter { + static void DestroyElements(A& allocator, Pointer destroy_first, + SizeType destroy_size) { + for (SizeType i = destroy_size; i != 0;) { --i; AllocatorTraits::destroy(allocator, destroy_first + i); } } -} +}; -// If kUseMemcpy is true, memcpy(dst, src, n); else do nothing. -// Useful to avoid compiler warnings when memcpy() is used for T values -// that are not trivially copyable in non-reachable code. -template -inline void MemcpyIfAllowed(void* dst, const void* src, size_t n); +template +struct DestroyAdapter { + static void DestroyElements(A& allocator, Pointer destroy_first, + SizeType destroy_size) { + static_cast(allocator); + static_cast(destroy_first); + static_cast(destroy_size); + } +}; -// memcpy when allowed. -template <> -inline void MemcpyIfAllowed(void* dst, const void* src, size_t n) { - memcpy(dst, src, n); -} +template +struct Allocation { + Pointer data = nullptr; + SizeType capacity = 0; +}; -// Do nothing for types that are not memcpy-able. This function is only -// called from non-reachable branches. -template <> -inline void MemcpyIfAllowed(void*, const void*, size_t) {} +template ) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> +struct MallocAdapter { + static Allocation Allocate(A& allocator, SizeType requested_capacity) { + return {AllocatorTraits::allocate(allocator, requested_capacity), + requested_capacity}; + } + + static void Deallocate(A& allocator, Pointer pointer, + SizeType capacity) { + AllocatorTraits::deallocate(allocator, pointer, capacity); + } +}; template void ConstructElements(NoTypeDeduction& allocator, @@ -126,7 +151,7 @@ void ConstructElements(NoTypeDeduction& allocator, for (SizeType i = 0; i < construct_size; ++i) { ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); } ABSL_INTERNAL_CATCH_ANY { - DestroyElements(allocator, construct_first, i); + DestroyAdapter::DestroyElements(allocator, construct_first, i); ABSL_INTERNAL_RETHROW; } } @@ -201,7 +226,7 @@ class AllocationTransaction { ~AllocationTransaction() { if (DidAllocate()) { - AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity()); + MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); } } @@ -213,18 +238,27 @@ class AllocationTransaction { SizeType& GetCapacity() { return capacity_; } bool DidAllocate() { return GetData() != nullptr; } - Pointer Allocate(SizeType capacity) { - GetData() = AllocatorTraits::allocate(GetAllocator(), capacity); - GetCapacity() = capacity; - return GetData(); + + Pointer Allocate(SizeType requested_capacity) { + Allocation result = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + GetData() = result.data; + GetCapacity() = result.capacity; + return result.data; } + ABSL_MUST_USE_RESULT Allocation Release() && { + Allocation result = {GetData(), GetCapacity()}; + Reset(); + return result; + } + + private: void Reset() { GetData() = nullptr; GetCapacity() = 0; } - private: container_internal::CompressedTuple> allocator_data_; SizeType capacity_; }; @@ -237,7 +271,7 @@ class ConstructionTransaction { ~ConstructionTransaction() { if (DidConstruct()) { - DestroyElements(GetAllocator(), GetData(), GetSize()); + DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); } } @@ -255,7 +289,7 @@ class ConstructionTransaction { GetData() = data; GetSize() = size; } - void Commit() { + void Commit() && { GetData() = nullptr; GetSize() = 0; } @@ -268,6 +302,20 @@ class ConstructionTransaction { template class Storage { public: + struct MemcpyPolicy {}; + struct ElementwiseAssignPolicy {}; + struct ElementwiseSwapPolicy {}; + struct ElementwiseConstructPolicy {}; + + using MoveAssignmentPolicy = absl::conditional_t< + IsMemcpyOk::value, MemcpyPolicy, + absl::conditional_t::value, ElementwiseAssignPolicy, + ElementwiseConstructPolicy>>; + using SwapPolicy = absl::conditional_t< + IsMemcpyOk::value, MemcpyPolicy, + absl::conditional_t::value, ElementwiseSwapPolicy, + ElementwiseConstructPolicy>>; + static SizeType NextCapacity(SizeType current_capacity) { return current_capacity * 2; } @@ -281,10 +329,10 @@ class Storage { // Storage Constructors and Destructor // --------------------------------------------------------------------------- - Storage() : metadata_(A(), /* size and is_allocated */ 0) {} + Storage() : metadata_(A(), /* size and is_allocated */ 0u) {} explicit Storage(const A& allocator) - : metadata_(allocator, /* size and is_allocated */ 0) {} + : metadata_(allocator, /* size and is_allocated */ 0u) {} ~Storage() { if (GetSizeAndIsAllocated() == 0) { @@ -331,7 +379,9 @@ class Storage { return data_.allocated.allocated_capacity; } - SizeType GetInlinedCapacity() const { return static_cast>(N); } + SizeType GetInlinedCapacity() const { + return static_cast>(kOptimalInlinedSize); + } StorageView MakeStorageView() { return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), @@ -400,24 +450,19 @@ class Storage { } void SubtractSize(SizeType count) { - assert(count <= GetSize()); + ABSL_HARDENING_ASSERT(count <= GetSize()); GetSizeAndIsAllocated() -= count << static_cast>(1); } - void SetAllocatedData(Pointer data, SizeType capacity) { - data_.allocated.allocated_data = data; - data_.allocated.allocated_capacity = capacity; - } - - void AcquireAllocatedData(AllocationTransaction& allocation_tx) { - SetAllocatedData(allocation_tx.GetData(), allocation_tx.GetCapacity()); - - allocation_tx.Reset(); + void SetAllocation(Allocation allocation) { + data_.allocated.allocated_data = allocation.data; + data_.allocated.allocated_capacity = allocation.capacity; } void MemcpyFrom(const Storage& other_storage) { - assert(IsMemcpyOk::value || other_storage.GetIsAllocated()); + ABSL_HARDENING_ASSERT(IsMemcpyOk::value || + other_storage.GetIsAllocated()); GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); data_ = other_storage.data_; @@ -425,8 +470,8 @@ class Storage { void DeallocateIfAllocated() { if (GetIsAllocated()) { - AllocatorTraits::deallocate(GetAllocator(), GetAllocatedData(), - GetAllocatedCapacity()); + MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), + GetAllocatedCapacity()); } } @@ -440,8 +485,15 @@ class Storage { SizeType allocated_capacity; }; + // `kOptimalInlinedSize` is an automatically adjusted inlined capacity of the + // `InlinedVector`. Sometimes, it is possible to increase the capacity (from + // the user requested `N`) without increasing the size of the `InlinedVector`. + static constexpr size_t kOptimalInlinedSize = + (std::max)(N, sizeof(Allocated) / sizeof(ValueType)); + struct Inlined { - alignas(ValueType) char inlined_data[sizeof(ValueType[N])]; + alignas(ValueType) char inlined_data[sizeof( + ValueType[kOptimalInlinedSize])]; }; union Data { @@ -449,6 +501,13 @@ class Storage { Inlined inlined; }; + void SwapN(ElementwiseSwapPolicy, Storage* other, SizeType n); + void SwapN(ElementwiseConstructPolicy, Storage* other, SizeType n); + + void SwapInlinedElements(MemcpyPolicy, Storage* other); + template + void SwapInlinedElements(NotMemcpyPolicy, Storage* other); + template ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); @@ -459,14 +518,14 @@ class Storage { template void Storage::DestroyContents() { Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); - DestroyElements(GetAllocator(), data, GetSize()); + DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); DeallocateIfAllocated(); } template void Storage::InitFrom(const Storage& other) { - const auto n = other.GetSize(); - assert(n > 0); // Empty sources handled handled in caller. + const SizeType n = other.GetSize(); + ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. ConstPointer src; Pointer dst; if (!other.GetIsAllocated()) { @@ -476,13 +535,16 @@ void Storage::InitFrom(const Storage& other) { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. - SizeType new_capacity = ComputeCapacity(GetInlinedCapacity(), n); - dst = AllocatorTraits::allocate(GetAllocator(), new_capacity); - SetAllocatedData(dst, new_capacity); + SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + SetAllocation(allocation); + dst = allocation.data; src = other.GetAllocatedData(); } if (IsMemcpyOk::value) { - MemcpyIfAllowed::value>(dst, src, sizeof(dst[0]) * n); + std::memcpy(reinterpret_cast(dst), + reinterpret_cast(src), n * sizeof(ValueType)); } else { auto values = IteratorValueAdapter>(src); ConstructElements(GetAllocator(), dst, values, n); @@ -495,17 +557,20 @@ template auto Storage::Initialize(ValueAdapter values, SizeType new_size) -> void { // Only callable from constructors! - assert(!GetIsAllocated()); - assert(GetSize() == 0); + ABSL_HARDENING_ASSERT(!GetIsAllocated()); + ABSL_HARDENING_ASSERT(GetSize() == 0); Pointer construct_data; if (new_size > GetInlinedCapacity()) { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. - SizeType new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size); - construct_data = AllocatorTraits::allocate(GetAllocator(), new_capacity); - SetAllocatedData(construct_data, new_capacity); + SizeType requested_capacity = + ComputeCapacity(GetInlinedCapacity(), new_size); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + construct_data = allocation.data; + SetAllocation(allocation); SetIsAllocated(); } else { construct_data = GetInlinedData(); @@ -532,8 +597,9 @@ auto Storage::Assign(ValueAdapter values, SizeType new_size) absl::Span> destroy_loop; if (new_size > storage_view.capacity) { - SizeType new_capacity = ComputeCapacity(storage_view.capacity, new_size); - construct_loop = {allocation_tx.Allocate(new_capacity), new_size}; + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; destroy_loop = {storage_view.data, storage_view.size}; } else if (new_size > storage_view.size) { assign_loop = {storage_view.data, storage_view.size}; @@ -549,11 +615,12 @@ auto Storage::Assign(ValueAdapter values, SizeType new_size) ConstructElements(GetAllocator(), construct_loop.data(), values, construct_loop.size()); - DestroyElements(GetAllocator(), destroy_loop.data(), destroy_loop.size()); + DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), + destroy_loop.size()); if (allocation_tx.DidAllocate()) { DeallocateIfAllocated(); - AcquireAllocatedData(allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } @@ -565,12 +632,12 @@ template auto Storage::Resize(ValueAdapter values, SizeType new_size) -> void { StorageView storage_view = MakeStorageView(); - auto* const base = storage_view.data; + Pointer const base = storage_view.data; const SizeType size = storage_view.size; - auto& alloc = GetAllocator(); + A& alloc = GetAllocator(); if (new_size <= size) { // Destroy extra old elements. - DestroyElements(alloc, base + new_size, size - new_size); + DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); } else if (new_size <= storage_view.capacity) { // Construct new elements in place. ConstructElements(alloc, base + size, values, new_size - size); @@ -578,13 +645,14 @@ auto Storage::Resize(ValueAdapter values, SizeType new_size) // Steps: // a. Allocate new backing store. // b. Construct new elements in new backing store. - // c. Move existing elements from old backing store to now. + // c. Move existing elements from old backing store to new backing store. // d. Destroy all elements in old backing store. // Use transactional wrappers for the first two steps so we can roll // back if necessary due to exceptions. AllocationTransaction allocation_tx(alloc); - SizeType new_capacity = ComputeCapacity(storage_view.capacity, new_size); - Pointer new_data = allocation_tx.Allocate(new_capacity); + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); ConstructionTransaction construction_tx(alloc); construction_tx.Construct(new_data + size, values, new_size - size); @@ -593,10 +661,10 @@ auto Storage::Resize(ValueAdapter values, SizeType new_size) (MoveIterator(base))); ConstructElements(alloc, new_data, move_values, size); - DestroyElements(alloc, base, size); - construction_tx.Commit(); + DestroyAdapter::DestroyElements(alloc, base, size); + std::move(construction_tx).Commit(); DeallocateIfAllocated(); - AcquireAllocatedData(allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } SetSize(new_size); @@ -608,8 +676,8 @@ auto Storage::Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count) -> Iterator { StorageView storage_view = MakeStorageView(); - SizeType insert_index = - std::distance(ConstIterator(storage_view.data), pos); + auto insert_index = static_cast>( + std::distance(ConstIterator(storage_view.data), pos)); SizeType insert_end_index = insert_index + insert_count; SizeType new_size = storage_view.size + insert_count; @@ -621,8 +689,9 @@ auto Storage::Insert(ConstIterator pos, ValueAdapter values, IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); - SizeType new_capacity = ComputeCapacity(storage_view.capacity, new_size); - Pointer new_data = allocation_tx.Allocate(new_capacity); + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); construction_tx.Construct(new_data + insert_index, values, insert_count); @@ -631,12 +700,13 @@ auto Storage::Insert(ConstIterator pos, ValueAdapter values, ConstructElements(GetAllocator(), new_data + insert_end_index, move_values, storage_view.size - insert_index); - DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); - construction_tx.Commit(); - move_construction_tx.Commit(); + std::move(construction_tx).Commit(); + std::move(move_construction_tx).Commit(); DeallocateIfAllocated(); - AcquireAllocatedData(allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetAllocatedSize(new_size); return Iterator(new_data + insert_index); @@ -686,7 +756,7 @@ auto Storage::Insert(ConstIterator pos, ValueAdapter values, ConstructElements(GetAllocator(), insert_construction.data(), values, insert_construction.size()); - move_construction_tx.Commit(); + std::move(move_construction_tx).Commit(); AddSize(insert_count); return Iterator(storage_view.data + insert_index); @@ -697,7 +767,7 @@ template template auto Storage::EmplaceBack(Args&&... args) -> Reference { StorageView storage_view = MakeStorageView(); - const auto n = storage_view.size; + const SizeType n = storage_view.size; if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { // Fast path; new element fits. Pointer last_ptr = storage_view.data + n; @@ -717,8 +787,8 @@ auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { AllocationTransaction allocation_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); - SizeType new_capacity = NextCapacity(storage_view.capacity); - Pointer construct_data = allocation_tx.Allocate(new_capacity); + SizeType requested_capacity = NextCapacity(storage_view.capacity); + Pointer construct_data = allocation_tx.Allocate(requested_capacity); Pointer last_ptr = construct_data + storage_view.size; // Construct new element. @@ -734,10 +804,11 @@ auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { ABSL_INTERNAL_RETHROW; } // Destroy elements in old backing store. - DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); DeallocateIfAllocated(); - AcquireAllocatedData(allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); AddSize(1); return *last_ptr; @@ -748,9 +819,9 @@ auto Storage::Erase(ConstIterator from, ConstIterator to) -> Iterator { StorageView storage_view = MakeStorageView(); - SizeType erase_size = std::distance(from, to); - SizeType erase_index = - std::distance(ConstIterator(storage_view.data), from); + auto erase_size = static_cast>(std::distance(from, to)); + auto erase_index = static_cast>( + std::distance(ConstIterator(storage_view.data), from)); SizeType erase_end_index = erase_index + erase_size; IteratorValueAdapter> move_values( @@ -759,9 +830,9 @@ auto Storage::Erase(ConstIterator from, ConstIterator to) AssignElements(storage_view.data + erase_index, move_values, storage_view.size - erase_end_index); - DestroyElements(GetAllocator(), - storage_view.data + (storage_view.size - erase_size), - erase_size); + DestroyAdapter::DestroyElements( + GetAllocator(), storage_view.data + (storage_view.size - erase_size), + erase_size); SubtractSize(erase_size); return Iterator(storage_view.data + erase_index); @@ -778,24 +849,25 @@ auto Storage::Reserve(SizeType requested_capacity) -> void { IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); - SizeType new_capacity = + SizeType new_requested_capacity = ComputeCapacity(storage_view.capacity, requested_capacity); - Pointer new_data = allocation_tx.Allocate(new_capacity); + Pointer new_data = allocation_tx.Allocate(new_requested_capacity); ConstructElements(GetAllocator(), new_data, move_values, storage_view.size); - DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); DeallocateIfAllocated(); - AcquireAllocatedData(allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } template auto Storage::ShrinkToFit() -> void { // May only be called on allocated instances! - assert(GetIsAllocated()); + ABSL_HARDENING_ASSERT(GetIsAllocated()); StorageView storage_view{GetAllocatedData(), GetSize(), GetAllocatedCapacity()}; @@ -809,8 +881,12 @@ auto Storage::ShrinkToFit() -> void { Pointer construct_data; if (storage_view.size > GetInlinedCapacity()) { - SizeType new_capacity = storage_view.size; - construct_data = allocation_tx.Allocate(new_capacity); + SizeType requested_capacity = storage_view.size; + construct_data = allocation_tx.Allocate(requested_capacity); + if (allocation_tx.GetCapacity() >= storage_view.capacity) { + // Already using the smallest available heap allocation. + return; + } } else { construct_data = GetInlinedData(); } @@ -820,17 +896,18 @@ auto Storage::ShrinkToFit() -> void { storage_view.size); } ABSL_INTERNAL_CATCH_ANY { - SetAllocatedData(storage_view.data, storage_view.capacity); + SetAllocation({storage_view.data, storage_view.capacity}); ABSL_INTERNAL_RETHROW; } - DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); - AllocatorTraits::deallocate(GetAllocator(), storage_view.data, - storage_view.capacity); + MallocAdapter::Deallocate(GetAllocator(), storage_view.data, + storage_view.capacity); if (allocation_tx.DidAllocate()) { - AcquireAllocatedData(allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); } else { UnsetIsAllocated(); } @@ -839,30 +916,12 @@ auto Storage::ShrinkToFit() -> void { template auto Storage::Swap(Storage* other_storage_ptr) -> void { using std::swap; - assert(this != other_storage_ptr); + ABSL_HARDENING_ASSERT(this != other_storage_ptr); if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { swap(data_.allocated, other_storage_ptr->data_.allocated); } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) { - Storage* small_ptr = this; - Storage* large_ptr = other_storage_ptr; - if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr); - - for (SizeType i = 0; i < small_ptr->GetSize(); ++i) { - swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]); - } - - IteratorValueAdapter> move_values( - MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); - - ConstructElements(large_ptr->GetAllocator(), - small_ptr->GetInlinedData() + small_ptr->GetSize(), - move_values, - large_ptr->GetSize() - small_ptr->GetSize()); - - DestroyElements(large_ptr->GetAllocator(), - large_ptr->GetInlinedData() + small_ptr->GetSize(), - large_ptr->GetSize() - small_ptr->GetSize()); + SwapInlinedElements(SwapPolicy{}, other_storage_ptr); } else { Storage* allocated_ptr = this; Storage* inlined_ptr = other_storage_ptr; @@ -881,23 +940,86 @@ auto Storage::Swap(Storage* other_storage_ptr) -> void { inlined_ptr->GetSize()); } ABSL_INTERNAL_CATCH_ANY { - allocated_ptr->SetAllocatedData(allocated_storage_view.data, - allocated_storage_view.capacity); + allocated_ptr->SetAllocation(Allocation{ + allocated_storage_view.data, allocated_storage_view.capacity}); ABSL_INTERNAL_RETHROW; } - DestroyElements(inlined_ptr->GetAllocator(), - inlined_ptr->GetInlinedData(), inlined_ptr->GetSize()); + DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), + inlined_ptr->GetInlinedData(), + inlined_ptr->GetSize()); - inlined_ptr->SetAllocatedData(allocated_storage_view.data, - allocated_storage_view.capacity); + inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, + allocated_storage_view.capacity}); } swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); swap(GetAllocator(), other_storage_ptr->GetAllocator()); } -// End ignore "array-bounds" and "maybe-uninitialized" +template +void Storage::SwapN(ElementwiseSwapPolicy, Storage* other, + SizeType n) { + std::swap_ranges(GetInlinedData(), GetInlinedData() + n, + other->GetInlinedData()); +} + +template +void Storage::SwapN(ElementwiseConstructPolicy, Storage* other, + SizeType n) { + Pointer a = GetInlinedData(); + Pointer b = other->GetInlinedData(); + // see note on allocators in `SwapInlinedElements`. + A& allocator_a = GetAllocator(); + A& allocator_b = other->GetAllocator(); + for (SizeType i = 0; i < n; ++i, ++a, ++b) { + ValueType tmp(std::move(*a)); + + AllocatorTraits::destroy(allocator_a, a); + AllocatorTraits::construct(allocator_b, a, std::move(*b)); + + AllocatorTraits::destroy(allocator_b, b); + AllocatorTraits::construct(allocator_a, b, std::move(tmp)); + } +} + +template +void Storage::SwapInlinedElements(MemcpyPolicy, Storage* other) { + Data tmp = data_; + data_ = other->data_; + other->data_ = tmp; +} + +template +template +void Storage::SwapInlinedElements(NotMemcpyPolicy policy, + Storage* other) { + // Note: `destroy` needs to use pre-swap allocator while `construct` - + // post-swap allocator. Allocators will be swaped later on outside of + // `SwapInlinedElements`. + Storage* small_ptr = this; + Storage* large_ptr = other; + if (small_ptr->GetSize() > large_ptr->GetSize()) { + std::swap(small_ptr, large_ptr); + } + + auto small_size = small_ptr->GetSize(); + auto diff = large_ptr->GetSize() - small_size; + SwapN(policy, other, small_size); + + IteratorValueAdapter> move_values( + MoveIterator(large_ptr->GetInlinedData() + small_size)); + + ConstructElements(large_ptr->GetAllocator(), + small_ptr->GetInlinedData() + small_size, move_values, + diff); + + DestroyAdapter::DestroyElements(large_ptr->GetAllocator(), + large_ptr->GetInlinedData() + small_size, + diff); +} + +// End ignore "array-bounds" #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif @@ -906,4 +1028,4 @@ auto Storage::Swap(Storage* other_storage_ptr) -> void { ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ +#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/layout_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/layout_test.cc index 1d7158ffc0..54e5d5bbb8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/layout_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/layout_test.cc @@ -1350,7 +1350,13 @@ TEST(Layout, CustomAlignment) { TEST(Layout, OverAligned) { constexpr size_t M = alignof(max_align_t); constexpr Layout> x(1, 3); +#ifdef __GNUC__ + // Using __attribute__ ((aligned ())) instead of alignas to bypass a gcc bug: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357 + __attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()]; +#else alignas(2 * M) unsigned char p[x.AllocSize()]; +#endif EXPECT_EQ(2 * M + 3, x.AllocSize()); EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_hash_policy.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_slot_policy.h similarity index 93% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_hash_policy.h rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_slot_policy.h index 4617162f0b..baba5743c8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_hash_policy.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_slot_policy.h @@ -30,8 +30,8 @@ // It may also optionally define `value()` and `apply()`. For documentation on // these, see hash_policy_traits.h. -#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ -#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ +#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ #include #include @@ -46,7 +46,7 @@ ABSL_NAMESPACE_BEGIN namespace container_internal { template -struct node_hash_policy { +struct node_slot_policy { static_assert(std::is_lvalue_reference::value, ""); using slot_type = typename std::remove_cv< @@ -89,4 +89,4 @@ struct node_hash_policy { ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ +#endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_hash_policy_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_slot_policy_test.cc similarity index 93% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_hash_policy_test.cc rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_slot_policy_test.cc index 84aabba968..51b7467bfb 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_hash_policy_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/node_slot_policy_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/container/internal/node_hash_policy.h" +#include "absl/container/internal/node_slot_policy.h" #include @@ -27,7 +27,7 @@ namespace { using ::testing::Pointee; -struct Policy : node_hash_policy { +struct Policy : node_slot_policy { using key_type = int; using init_type = int; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc index 687bcb8a4d..c63a2e02d1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc @@ -23,13 +23,17 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { +// A single block of empty control bytes for tables without any slots allocated. +// This enables removing a branch in the hot path of find(). alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = { ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty}; +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr size_t Group::kWidth; +#endif // Returns "random" seed. inline size_t RandomSeed() { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h index 212052ea74..de455d6cb5 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h @@ -53,40 +53,121 @@ // // IMPLEMENTATION DETAILS // -// The table stores elements inline in a slot array. In addition to the slot -// array the table maintains some control state per slot. The extra state is one -// byte per slot and stores empty or deleted marks, or alternatively 7 bits from -// the hash of an occupied slot. The table is split into logical groups of -// slots, like so: +// # Table Layout +// +// A raw_hash_set's backing array consists of control bytes followed by slots +// that may or may not contain objects. +// +// The layout of the backing array, for `capacity` slots, is thus, as a +// pseudo-struct: +// +// struct BackingArray { +// // Control bytes for the "real" slots. +// ctrl_t ctrl[capacity]; +// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to +// // stop and serves no other purpose. +// ctrl_t sentinel; +// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so +// // that if a probe sequence picks a value near the end of `ctrl`, +// // `Group` will have valid control bytes to look at. +// ctrl_t clones[kWidth - 1]; +// // The actual slot data. +// slot_type slots[capacity]; +// }; +// +// The length of this array is computed by `AllocSize()` below. +// +// Control bytes (`ctrl_t`) are bytes (collected into groups of a +// platform-specific size) that define the state of the corresponding slot in +// the slot array. Group manipulation is tightly optimized to be as efficient +// as possible: SSE and friends on x86, clever bit operations on other arches. // // Group 1 Group 2 Group 3 // +---------------+---------------+---------------+ // | | | | | | | | | | | | | | | | | | | | | | | | | // +---------------+---------------+---------------+ // -// On lookup the hash is split into two parts: -// - H2: 7 bits (those stored in the control bytes) -// - H1: the rest of the bits -// The groups are probed using H1. For each group the slots are matched to H2 in -// parallel. Because H2 is 7 bits (128 states) and the number of slots per group -// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. +// Each control byte is either a special value for empty slots, deleted slots +// (sometimes called *tombstones*), and a special end-of-table marker used by +// iterators, or, if occupied, seven bits (H2) from the hash of the value in the +// corresponding slot. // -// On insert, once the right group is found (as in lookup), its slots are -// filled in order. +// Storing control bytes in a separate array also has beneficial cache effects, +// since more logical slots will fit into a cache line. // -// On erase a slot is cleared. In case the group did not have any empty slots -// before the erase, the erased slot is marked as deleted. +// # Hashing // -// Groups without empty slots (but maybe with deleted slots) extend the probe -// sequence. The probing algorithm is quadratic. Given N the number of groups, -// the probing function for the i'th probe is: +// We compute two separate hashes, `H1` and `H2`, from the hash of an object. +// `H1(hash(x))` is an index into `slots`, and essentially the starting point +// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out +// objects that cannot possibly be the one we are looking for. // -// P(0) = H1 % N +// # Table operations. // -// P(i) = (P(i - 1) + i) % N +// The key operations are `insert`, `find`, and `erase`. // -// This probing function guarantees that after N probes, all the groups of the -// table will be probed exactly once. +// Since `insert` and `erase` are implemented in terms of `find`, we describe +// `find` first. To `find` a value `x`, we compute `hash(x)`. From +// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every +// group of slots in some interesting order. +// +// We now walk through these indices. At each index, we select the entire group +// starting with that index and extract potential candidates: occupied slots +// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the +// group, we stop and return an error. Each candidate slot `y` is compared with +// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the +// next probe index. Tombstones effectively behave like full slots that never +// match the value we're looking for. +// +// The `H2` bits ensure when we compare a slot to an object with `==`, we are +// likely to have actually found the object. That is, the chance is low that +// `==` is called and returns `false`. Thus, when we search for an object, we +// are unlikely to call `==` many times. This likelyhood can be analyzed as +// follows (assuming that H2 is a random enough hash function). +// +// Let's assume that there are `k` "wrong" objects that must be examined in a +// probe sequence. For example, when doing a `find` on an object that is in the +// table, `k` is the number of objects between the start of the probe sequence +// and the final found object (not including the final found object). The +// expected number of objects with an H2 match is then `k/128`. Measurements +// and analysis indicate that even at high load factors, `k` is less than 32, +// meaning that the number of "false positive" comparisons we must perform is +// less than 1/8 per `find`. + +// `insert` is implemented in terms of `unchecked_insert`, which inserts a +// value presumed to not be in the table (violating this requirement will cause +// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert +// it, we construct a `probe_seq` once again, and use it to find the first +// group with an unoccupied (empty *or* deleted) slot. We place `x` into the +// first such slot in the group and mark it as full with `x`'s H2. +// +// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and +// perform a `find` to see if it's already present; if it is, we're done. If +// it's not, we may decide the table is getting overcrowded (i.e. the load +// factor is greater than 7/8 for big tables; `is_small()` tables use a max load +// factor of 1); in this case, we allocate a bigger array, `unchecked_insert` +// each element of the table into the new array (we know that no insertion here +// will insert an already-present value), and discard the old backing array. At +// this point, we may `unchecked_insert` the value `x`. +// +// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which +// presents a viable, initialized slot pointee to the caller. +// +// `erase` is implemented in terms of `erase_at`, which takes an index to a +// slot. Given an offset, we simply create a tombstone and destroy its contents. +// If we can prove that the slot would not appear in a probe sequence, we can +// make the slot as empty, instead. We can prove this by observing that if a +// group has any empty slots, it has never been full (assuming we never create +// an empty slot in a group with no empties, which this heuristic guarantees we +// never do) and find would stop at this group anyways (since it does not probe +// beyond groups with empties). +// +// `erase` is `erase_at` composed with `find`: if we +// have a value `x`, we can perform a `find`, and then `erase_at` the resulting +// slot. +// +// To iterate, we simply traverse the array, skipping empty and deleted slots +// and stopping when we hit a `kSentinel`. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ @@ -102,7 +183,9 @@ #include #include +#include "absl/base/config.h" #include "absl/base/internal/endian.h" +#include "absl/base/internal/prefetch.h" #include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/container/internal/common.h" @@ -111,12 +194,27 @@ #include "absl/container/internal/hash_policy_traits.h" #include "absl/container/internal/hashtable_debug_hooks.h" #include "absl/container/internal/hashtablez_sampler.h" -#include "absl/container/internal/have_sse.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/numeric/bits.h" #include "absl/utility/utility.h" +#ifdef ABSL_INTERNAL_HAVE_SSE2 +#include +#endif + +#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#include +#endif + +#ifdef _MSC_VER +#include +#endif + +#ifdef ABSL_INTERNAL_HAVE_ARM_NEON +#include +#endif + namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { @@ -131,14 +229,40 @@ template void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, std::false_type /* propagate_on_container_swap */) {} +// The state for a probe sequence. +// +// Currently, the sequence is a triangular progression of the form +// +// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) +// +// The use of `Width` ensures that each probe step does not overlap groups; +// the sequence effectively outputs the addresses of *groups* (although not +// necessarily aligned to any boundary). The `Group` machinery allows us +// to check an entire group with minimal branching. +// +// Wrapping around at `mask + 1` is important, but not for the obvious reason. +// As described above, the first few entries of the control byte array +// are mirrored at the end of the array, which `Group` will find and use +// for selecting candidates. However, when those candidates' slots are +// actually inspected, there are no corresponding slots for the cloned bytes, +// so we need to make sure we've treated those offsets as "wrapping around". +// +// It turns out that this probe sequence visits every group exactly once if the +// number of groups is a power of two, since (i^2+i)/2 is a bijection in +// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing template class probe_seq { public: + // Creates a new probe sequence using `hash` as the initial value of the + // sequence and `mask` (usually the capacity of the table) as the mask to + // apply to each value in the progression. probe_seq(size_t hash, size_t mask) { assert(((mask + 1) & mask) == 0 && "not a mask"); mask_ = mask; offset_ = hash & mask_; } + + // The offset within the table, i.e., the value `p(i)` above. size_t offset() const { return offset_; } size_t offset(size_t i) const { return (offset_ + i) & mask_; } @@ -147,7 +271,7 @@ class probe_seq { offset_ += index_; offset_ &= mask_; } - // 0-based probe index. The i-th probe in the probe sequence. + // 0-based probe index, a multiple of `Width`. size_t index() const { return index_; } private: @@ -171,9 +295,9 @@ struct IsDecomposable : std::false_type {}; template struct IsDecomposable< - absl::void_t(), - std::declval()...))>, + absl::void_t(), + std::declval()...))>, Policy, Hash, Eq, Ts...> : std::true_type {}; // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. @@ -189,57 +313,84 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { template uint32_t TrailingZeros(T x) { - ABSL_INTERNAL_ASSUME(x != 0); - return countr_zero(x); + ABSL_ASSUME(x != 0); + return static_cast(countr_zero(x)); } -// An abstraction over a bitmask. It provides an easy way to iterate through the -// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE), -// this is a true bitmask. On non-SSE, platforms the arithematic used to -// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as -// either 0x00 or 0x80. +// An abstract bitmask, such as that emitted by a SIMD instruction. // -// For example: -// for (int i : BitMask(0x5)) -> yields 0, 2 -// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 +// Specifically, this type implements a simple bitset whose representation is +// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number +// of abstract bits in the bitset, while `Shift` is the log-base-two of the +// width of an abstract bit in the representation. +// This mask provides operations for any number of real bits set in an abstract +// bit. To add iteration on top of that, implementation must guarantee no more +// than one real bit is set in an abstract bit. template -class BitMask { - static_assert(std::is_unsigned::value, ""); - static_assert(Shift == 0 || Shift == 3, ""); - +class NonIterableBitMask { public: - // These are useful for unit tests (gunit). - using value_type = int; - using iterator = BitMask; - using const_iterator = BitMask; + explicit NonIterableBitMask(T mask) : mask_(mask) {} - explicit BitMask(T mask) : mask_(mask) {} - BitMask& operator++() { - mask_ &= (mask_ - 1); - return *this; - } - explicit operator bool() const { return mask_ != 0; } - int operator*() const { return LowestBitSet(); } + explicit operator bool() const { return this->mask_ != 0; } + + // Returns the index of the lowest *abstract* bit set in `self`. uint32_t LowestBitSet() const { return container_internal::TrailingZeros(mask_) >> Shift; } + + // Returns the index of the highest *abstract* bit set in `self`. uint32_t HighestBitSet() const { return static_cast((bit_width(mask_) - 1) >> Shift); } - BitMask begin() const { return *this; } - BitMask end() const { return BitMask(0); } - + // Return the number of trailing zero *abstract* bits. uint32_t TrailingZeros() const { return container_internal::TrailingZeros(mask_) >> Shift; } + // Return the number of leading zero *abstract* bits. uint32_t LeadingZeros() const { constexpr int total_significant_bits = SignificantBits << Shift; constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; - return countl_zero(mask_ << extra_bits) >> Shift; + return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; } + T mask_; +}; + +// Mask that can be iterable +// +// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just +// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When +// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as +// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. +// +// For example: +// for (int i : BitMask(0b101)) -> yields 0, 2 +// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 +template +class BitMask : public NonIterableBitMask { + using Base = NonIterableBitMask; + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + explicit BitMask(T mask) : Base(mask) {} + // BitMask is an iterator over the indices of its abstract bits. + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + BitMask& operator++() { + this->mask_ &= (this->mask_ - 1); + return *this; + } + + uint32_t operator*() const { return Base::LowestBitSet(); } + + BitMask begin() const { return *this; } + BitMask end() const { return BitMask(0); } + private: friend bool operator==(const BitMask& a, const BitMask& b) { return a.mask_ == b.mask_; @@ -247,15 +398,27 @@ class BitMask { friend bool operator!=(const BitMask& a, const BitMask& b) { return a.mask_ != b.mask_; } - - T mask_; }; using h2_t = uint8_t; // The values here are selected for maximum performance. See the static asserts -// below for details. We use an enum class so that when strict aliasing is -// enabled, the compiler knows ctrl_t doesn't alias other types. +// below for details. + +// A `ctrl_t` is a single control byte, which can have one of four +// states: empty, deleted, full (which has an associated seven-bit h2_t value) +// and the sentinel. They have the following bit patterns: +// +// empty: 1 0 0 0 0 0 0 0 +// deleted: 1 1 1 1 1 1 1 0 +// full: 0 h h h h h h h // h represents the hash bits. +// sentinel: 1 1 1 1 1 1 1 1 +// +// These values are specifically tuned for SSE-flavored SIMD. +// The static_asserts below detail the source of these choices. +// +// We use an enum class so that when strict aliasing is enabled, the compiler +// knows ctrl_t doesn't alias other types. enum class ctrl_t : int8_t { kEmpty = -128, // 0b10000000 kDeleted = -2, // 0b11111110 @@ -283,15 +446,17 @@ static_assert( static_cast(ctrl_t::kSentinel) & 0x7F) != 0, "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " "shared by ctrl_t::kSentinel to make the scalar test for " - "MatchEmptyOrDeleted() efficient"); + "MaskEmptyOrDeleted() efficient"); static_assert(ctrl_t::kDeleted == static_cast(-2), "ctrl_t::kDeleted must be -2 to make the implementation of " "ConvertSpecialToEmptyAndFullToDeleted efficient"); -// A single block of empty control bytes for tables without any slots allocated. -// This enables removing a branch in the hot path of find(). ABSL_DLL extern const ctrl_t kEmptyGroup[16]; + +// Returns a pointer to a control byte group that can be used by empty tables. inline ctrl_t* EmptyGroup() { + // Const must be cast away here; no uses of this function will actually write + // to it, because it is only used for empty tables. return const_cast(kEmptyGroup); } @@ -299,28 +464,61 @@ inline ctrl_t* EmptyGroup() { // randomize insertion order within groups. bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); -// Returns a hash seed. +// Returns a per-table, hash salt, which changes on resize. This gets mixed into +// H1 to randomize iteration order per-table. // // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure // non-determinism of iteration order in most cases. -inline size_t HashSeed(const ctrl_t* ctrl) { +inline size_t PerTableSalt(const ctrl_t* ctrl) { // The low bits of the pointer have little or no entropy because of // alignment. We shift the pointer to try to use higher entropy bits. A // good number seems to be 12 bits, because that aligns with page size. return reinterpret_cast(ctrl) >> 12; } - +// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. inline size_t H1(size_t hash, const ctrl_t* ctrl) { - return (hash >> 7) ^ HashSeed(ctrl); + return (hash >> 7) ^ PerTableSalt(ctrl); } + +// Extracts the H2 portion of a hash: the 7 bits not used for H1. +// +// These are used as an occupied control byte. inline h2_t H2(size_t hash) { return hash & 0x7F; } +// Helpers for checking the state of a control byte. inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#ifdef ABSL_INTERNAL_HAVE_SSE2 +// Quick reference guide for intrinsics used below: +// +// * __m128i: An XMM (128-bit) word. +// +// * _mm_setzero_si128: Returns a zero vector. +// * _mm_set1_epi8: Returns a vector with the same i8 in each lane. +// +// * _mm_subs_epi8: Saturating-subtracts two i8 vectors. +// * _mm_and_si128: Ands two i128s together. +// * _mm_or_si128: Ors two i128s together. +// * _mm_andnot_si128: And-nots two i128s together. +// +// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, +// filling each lane with 0x00 or 0xff. +// * _mm_cmpgt_epi8: Same as above, but using > rather than ==. +// +// * _mm_loadu_si128: Performs an unaligned load of an i128. +// * _mm_storeu_si128: Performs an unaligned store of an i128. +// +// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first +// argument if the corresponding lane of the second +// argument is positive, negative, or zero, respectively. +// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a +// bitmask consisting of those bits. +// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low +// four bits of each i8 lane in the second argument as +// indices. // https://github.com/abseil/abseil-cpp/issues/209 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 @@ -347,32 +545,34 @@ struct GroupSse2Impl { // Returns a bitmask representing the positions of slots that match hash. BitMask Match(h2_t hash) const { - auto match = _mm_set1_epi8(hash); + auto match = _mm_set1_epi8(static_cast(hash)); return BitMask( - _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))); + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); } // Returns a bitmask representing the positions of empty slots. - BitMask MatchEmpty() const { -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 + NonIterableBitMask MaskEmpty() const { +#ifdef ABSL_INTERNAL_HAVE_SSSE3 // This only works because ctrl_t::kEmpty is -128. - return BitMask( - _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); #else - return Match(static_cast(ctrl_t::kEmpty)); + auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); #endif } // Returns a bitmask representing the positions of empty or deleted slots. - BitMask MatchEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); - return BitMask( - _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))); + NonIterableBitMask MaskEmptyOrDeleted() const { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return NonIterableBitMask(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); } // Returns the number of trailing empty or deleted elements in the group. uint32_t CountLeadingEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); return TrailingZeros(static_cast( _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); } @@ -380,7 +580,7 @@ struct GroupSse2Impl { void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { auto msbs = _mm_set1_epi8(static_cast(-128)); auto x126 = _mm_set1_epi8(126); -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 +#ifdef ABSL_INTERNAL_HAVE_SSSE3 auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); #else auto zero = _mm_setzero_si128(); @@ -394,6 +594,64 @@ struct GroupSse2Impl { }; #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) +struct GroupAArch64Impl { + static constexpr size_t kWidth = 8; + + explicit GroupAArch64Impl(const ctrl_t* pos) { + ctrl = vld1_u8(reinterpret_cast(pos)); + } + + BitMask Match(h2_t hash) const { + uint8x8_t dup = vdup_n_u8(hash); + auto mask = vceq_u8(ctrl, dup); + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask( + vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs); + } + + NonIterableBitMask MaskEmpty() const { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vceq_s8( + vdup_n_s8(static_cast(ctrl_t::kEmpty)), + vreinterpret_s8_u8(ctrl))), + 0); + return NonIterableBitMask(mask); + } + + NonIterableBitMask MaskEmptyOrDeleted() const { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( + vdup_n_s8(static_cast(ctrl_t::kSentinel)), + vreinterpret_s8_u8(ctrl))), + 0); + return NonIterableBitMask(mask); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); + // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and + // kDeleted. We lower all other bits and count number of trailing zeros. + // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, + // so we should be fine. + constexpr uint64_t bits = 0x0101010101010101ULL; + return static_cast(countr_zero((mask | ~(mask >> 7)) & bits) >> + 3); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = mask & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint8x8_t ctrl; +}; +#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN + struct GroupPortableImpl { static constexpr size_t kWidth = 8; @@ -420,19 +678,24 @@ struct GroupPortableImpl { return BitMask((x - lsbs) & ~x & msbs); } - BitMask MatchEmpty() const { + NonIterableBitMask MaskEmpty() const { constexpr uint64_t msbs = 0x8080808080808080ULL; - return BitMask((ctrl & (~ctrl << 6)) & msbs); + return NonIterableBitMask((ctrl & (~ctrl << 6)) & + msbs); } - BitMask MatchEmptyOrDeleted() const { + NonIterableBitMask MaskEmptyOrDeleted() const { constexpr uint64_t msbs = 0x8080808080808080ULL; - return BitMask((ctrl & (~ctrl << 7)) & msbs); + return NonIterableBitMask((ctrl & (~ctrl << 7)) & + msbs); } uint32_t CountLeadingEmptyOrDeleted() const { - constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL; - return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3; + // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and + // kDeleted. We lower all other bits and count number of trailing zeros. + constexpr uint64_t bits = 0x0101010101010101ULL; + return static_cast(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> + 3); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { @@ -446,32 +709,40 @@ struct GroupPortableImpl { uint64_t ctrl; }; -#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +#ifdef ABSL_INTERNAL_HAVE_SSE2 using Group = GroupSse2Impl; +#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) +using Group = GroupAArch64Impl; #else using Group = GroupPortableImpl; #endif -// The number of cloned control bytes that we copy from the beginning to the -// end of the control bytes array. +// Returns he number of "cloned control bytes". +// +// This is the number of control bytes that are present both at the beginning +// of the control byte array and at the end, such that we can create a +// `Group::kWidth`-width probe window starting from any control byte. constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } template class raw_hash_set; +// Returns whether `n` is a valid capacity (i.e., number of slots). +// +// A valid capacity is a non-zero integer `2^m - 1`. inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } +// Applies the following mapping to every byte in the control array: +// * kDeleted -> kEmpty +// * kEmpty -> kEmpty +// * _ -> kDeleted // PRECONDITION: // IsValidCapacity(capacity) // ctrl[capacity] == ctrl_t::kSentinel // ctrl[i] != ctrl_t::kSentinel for all i < capacity -// Applies mapping for every byte in ctrl: -// DELETED -> EMPTY -// EMPTY -> EMPTY -// FULL -> DELETED void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); -// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. +// Converts `n` into the next valid capacity, per `IsValidCapacity`. inline size_t NormalizeCapacity(size_t n) { return n ? ~size_t{} >> countl_zero(n) : 1; } @@ -484,8 +755,8 @@ inline size_t NormalizeCapacity(size_t n) { // never need to probe (the whole table fits in one group) so we don't need a // load factor less than 1. -// Given `capacity` of the table, returns the size (i.e. number of full slots) -// at which we should grow the capacity. +// Given `capacity`, applies the load factor; i.e., it returns the maximum +// number of values we should put into the table before a resizing rehash. inline size_t CapacityToGrowth(size_t capacity) { assert(IsValidCapacity(capacity)); // `capacity*7/8` @@ -495,8 +766,12 @@ inline size_t CapacityToGrowth(size_t capacity) { } return capacity - capacity / 8; } -// From desired "growth" to a lowerbound of the necessary capacity. -// Might not be a valid one and requires NormalizeCapacity(). + +// Given `growth`, "unapplies" the load factor to find how large the capacity +// should be to stay within the load factor. +// +// This might not be a valid capacity and `NormalizeCapacity()` should be +// called on this. inline size_t GrowthToLowerboundCapacity(size_t growth) { // `growth*8/7` if (Group::kWidth == 8 && growth == 7) { @@ -522,16 +797,54 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last, return 0; } -inline void AssertIsFull(ctrl_t* ctrl) { - ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && - "Invalid operation on iterator. The element might have " - "been erased, or the table might have rehashed."); +#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, operation) \ + do { \ + ABSL_HARDENING_ASSERT( \ + (ctrl != nullptr) && operation \ + " called on invalid iterator. The iterator might be an end() " \ + "iterator or may have been default constructed."); \ + ABSL_HARDENING_ASSERT( \ + (IsFull(*ctrl)) && operation \ + " called on invalid iterator. The element might have been erased or " \ + "the table might have rehashed."); \ + } while (0) + +// Note that for comparisons, null/end iterators are valid. +inline void AssertIsValidForComparison(const ctrl_t* ctrl) { + ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) && + "Invalid iterator comparison. The element might have " + "been erased or the table might have rehashed."); } -inline void AssertIsValid(ctrl_t* ctrl) { - ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) && - "Invalid operation on iterator. The element might have " - "been erased, or the table might have rehashed."); +// If the two iterators come from the same container, then their pointers will +// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa. +// Note: we take slots by reference so that it's not UB if they're uninitialized +// as long as we don't read them (when ctrl is null). +inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a, + const ctrl_t* ctrl_b, + const void* const& slot_a, + const void* const& slot_b) { + // If either control byte is null, then we can't tell. + if (ctrl_a == nullptr || ctrl_b == nullptr) return true; + const void* low_slot = slot_a; + const void* hi_slot = slot_b; + if (ctrl_a > ctrl_b) { + std::swap(ctrl_a, ctrl_b); + std::swap(low_slot, hi_slot); + } + return ctrl_b < low_slot && low_slot <= hi_slot; +} + +// Asserts that two iterators come from the same container. +// Note: we take slots by reference so that it's not UB if they're uninitialized +// as long as we don't read them (when ctrl is null). +inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, + const void* const& slot_a, + const void* const& slot_b) { + ABSL_HARDENING_ASSERT( + AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) && + "Invalid iterator comparison. The iterators may be from different " + "containers or the container might have rehashed."); } struct FindInfo { @@ -539,44 +852,40 @@ struct FindInfo { size_t probe_length; }; -// The representation of the object has two modes: -// - small: For capacities < kWidth-1 -// - large: For the rest. +// Whether a table is "small". A small table fits entirely into a probing +// group, i.e., has a capacity < `Group::kWidth`. // -// Differences: -// - In small mode we are able to use the whole capacity. The extra control -// bytes give us at least one "empty" control byte to stop the iteration. -// This is important to make 1 a valid capacity. +// In small mode we are able to use the whole capacity. The extra control +// bytes give us at least one "empty" control byte to stop the iteration. +// This is important to make 1 a valid capacity. // -// - In small mode only the first `capacity()` control bytes after the -// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not -// represent a real slot. This is important to take into account on -// find_first_non_full(), where we never try ShouldInsertBackwards() for -// small tables. +// In small mode only the first `capacity` control bytes after the sentinel +// are valid. The rest contain dummy ctrl_t::kEmpty values that do not +// represent a real slot. This is important to take into account on +// `find_first_non_full()`, where we never try +// `ShouldInsertBackwards()` for small tables. inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } +// Begins a probing operation on `ctrl`, using `hash`. inline probe_seq probe(const ctrl_t* ctrl, size_t hash, size_t capacity) { return probe_seq(H1(hash, ctrl), capacity); } -// Probes the raw_hash_set with the probe sequence for hash and returns the -// pointer to the first empty or deleted slot. -// NOTE: this function must work with tables having both ctrl_t::kEmpty and -// ctrl_t::kDeleted in one group. Such tables appears during -// drop_deletes_without_resize. +// Probes an array of control bits using a probe sequence derived from `hash`, +// and returns the offset corresponding to the first deleted or empty slot. // -// This function is very useful when insertions happen and: -// - the input is already a set -// - there are enough slots -// - the element with the hash is not in the table +// Behavior when the entire table is full is undefined. +// +// NOTE: this function must work with tables having both empty and deleted +// slots in the same group. Such tables appear during `erase()`. template inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, size_t capacity) { auto seq = probe(ctrl, hash, capacity); while (true) { Group g{ctrl + seq.offset()}; - auto mask = g.MatchEmptyOrDeleted(); + auto mask = g.MaskEmptyOrDeleted(); if (mask) { #if !defined(NDEBUG) // We want to add entropy even when ASLR is not enabled. @@ -599,7 +908,8 @@ inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, // corresponding translation unit. extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); -// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel. +// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire +// array as marked as empty. inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { std::memset(ctrl, static_cast(ctrl_t::kEmpty), @@ -608,8 +918,10 @@ inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, SanitizerPoisonMemoryRegion(slot, slot_size * capacity); } -// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte -// at the end too. +// Sets `ctrl[i]` to `h`. +// +// Unlike setting it directly, this function will perform bounds checks and +// mirror the value to the cloned tail if necessary. inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { assert(i < capacity); @@ -625,25 +937,28 @@ inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; } +// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { SetCtrl(i, static_cast(h), capacity, ctrl, slot, slot_size); } -// The allocated block consists of `capacity + 1 + NumClonedBytes()` control -// bytes followed by `capacity` slots, which must be aligned to `slot_align`. -// SlotOffset returns the offset of the slots into the allocated block. +// Given the capacity of a table, computes the offset (from the start of the +// backing allocation) at which the slots begin. inline size_t SlotOffset(size_t capacity, size_t slot_align) { assert(IsValidCapacity(capacity)); const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); return (num_control_bytes + slot_align - 1) & (~slot_align + 1); } -// Returns the size of the allocated block. See also above comment. +// Given the capacity of a table, computes the total size of the backing +// array. inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { return SlotOffset(capacity, slot_align) + capacity * slot_size; } +// A SwissTable. +// // Policy: a policy defines how to perform different operations on // the slots of the hashtable (see hash_policy_traits.h for the full interface // of policy). @@ -758,16 +1073,19 @@ class raw_hash_set { // PRECONDITION: not an end() iterator. reference operator*() const { - AssertIsFull(ctrl_); + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator*()"); return PolicyTraits::element(slot_); } // PRECONDITION: not an end() iterator. - pointer operator->() const { return &operator*(); } + pointer operator->() const { + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator->"); + return &operator*(); + } // PRECONDITION: not an end() iterator. iterator& operator++() { - AssertIsFull(ctrl_); + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator++"); ++ctrl_; ++slot_; skip_empty_or_deleted(); @@ -781,8 +1099,9 @@ class raw_hash_set { } friend bool operator==(const iterator& a, const iterator& b) { - AssertIsValid(a.ctrl_); - AssertIsValid(b.ctrl_); + AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_); + AssertIsValidForComparison(a.ctrl_); + AssertIsValidForComparison(b.ctrl_); return a.ctrl_ == b.ctrl_; } friend bool operator!=(const iterator& a, const iterator& b) { @@ -793,9 +1112,13 @@ class raw_hash_set { iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) { // This assumption helps the compiler know that any non-end iterator is // not equal to any end iterator. - ABSL_INTERNAL_ASSUME(ctrl != nullptr); + ABSL_ASSUME(ctrl != nullptr); } + // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until + // they reach one. + // + // If a sentinel is reached, we null `ctrl_` out instead. void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); @@ -858,11 +1181,12 @@ class raw_hash_set { std::is_nothrow_default_constructible::value&& std::is_nothrow_default_constructible::value) {} - explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), + explicit raw_hash_set(size_t bucket_count, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : ctrl_(EmptyGroup()), - settings_(0, HashtablezInfoHandle(), hash, eq, alloc) { + settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) { if (bucket_count) { capacity_ = NormalizeCapacity(bucket_count); initialize_slots(); @@ -987,14 +1311,16 @@ class raw_hash_set { std::is_nothrow_copy_constructible::value) : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), slots_(absl::exchange(that.slots_, nullptr)), - size_(absl::exchange(that.size_, 0)), - capacity_(absl::exchange(that.capacity_, 0)), + size_(absl::exchange(that.size_, size_t{0})), + capacity_(absl::exchange(that.capacity_, size_t{0})), // Hash, equality and allocator are copied instead of moved because // `that` must be left valid. If Hash is std::function, moving it // would create a nullptr functor that cannot be called. - settings_(absl::exchange(that.growth_left(), 0), + settings_(absl::exchange(that.growth_left(), size_t{0}), absl::exchange(that.infoz(), HashtablezInfoHandle()), - that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} + that.hash_ref(), + that.eq_ref(), + that.alloc_ref()) {} raw_hash_set(raw_hash_set&& that, const allocator_type& a) : ctrl_(EmptyGroup()), @@ -1038,7 +1364,7 @@ class raw_hash_set { typename AllocTraits::propagate_on_container_move_assignment()); } - ~raw_hash_set() { destroy_slots(); } + ~raw_hash_set() { destroy_slots(/*reset=*/false); } iterator begin() { auto it = iterator_at(0); @@ -1068,7 +1394,7 @@ class raw_hash_set { // largest bucket_count() threshold for which iteration is still fast and // past that we simply deallocate the array. if (capacity_ > 127) { - destroy_slots(); + destroy_slots(/*reset=*/true); infoz().RecordClearedReservation(); } else if (capacity_) { @@ -1092,8 +1418,7 @@ class raw_hash_set { // m.insert(std::make_pair("abc", 42)); // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc // bug. - template = 0, - class T2 = T, + template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> std::pair insert(T&& value) { @@ -1313,7 +1638,7 @@ class raw_hash_set { // This overload is necessary because otherwise erase(const K&) would be // a better match if non-const iterator is passed as an argument. void erase(iterator it) { - AssertIsFull(it.ctrl_); + ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, "erase()"); PolicyTraits::destroy(&alloc_ref(), it.slot_); erase_meta_only(it); } @@ -1347,7 +1672,7 @@ class raw_hash_set { } node_type extract(const_iterator position) { - AssertIsFull(position.inner_.ctrl_); + ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, "extract()"); auto node = CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); erase_meta_only(position); @@ -1382,7 +1707,7 @@ class raw_hash_set { void rehash(size_t n) { if (n == 0 && capacity_ == 0) return; if (n == 0 && size_ == 0) { - destroy_slots(); + destroy_slots(/*reset=*/true); infoz().RecordStorageChanged(0, 0); infoz().RecordClearedReservation(); return; @@ -1434,11 +1759,13 @@ class raw_hash_set { template void prefetch(const key_arg& key) const { (void)key; -#if defined(__GNUC__) + // Avoid probing if we won't be able to prefetch the addresses received. +#ifdef ABSL_INTERNAL_HAVE_PREFETCH + prefetch_heap_block(); auto seq = probe(ctrl_, hash_ref()(key), capacity_); - __builtin_prefetch(static_cast(ctrl_ + seq.offset())); - __builtin_prefetch(static_cast(slots_ + seq.offset())); -#endif // __GNUC__ + base_internal::PrefetchT0(ctrl_ + seq.offset()); + base_internal::PrefetchT0(slots_ + seq.offset()); +#endif // ABSL_INTERNAL_HAVE_PREFETCH } // The API of find() has two extensions. @@ -1453,19 +1780,20 @@ class raw_hash_set { auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; - for (int i : g.Match(H2(hash))) { + for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) return iterator_at(seq.offset(i)); } - if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end(); + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); seq.next(); assert(seq.index() <= capacity_ && "full table!"); } } template iterator find(const key_arg& key) { + prefetch_heap_block(); return find(key, hash_ref()(key)); } @@ -1475,6 +1803,7 @@ class raw_hash_set { } template const_iterator find(const key_arg& key) const { + prefetch_heap_block(); return find(key, hash_ref()(key)); } @@ -1524,6 +1853,14 @@ class raw_hash_set { return !(a == b); } + template + friend typename std::enable_if::value, + H>::type + AbslHashValue(H h, const raw_hash_set& s) { + return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), + s.size()); + } + friend void swap(raw_hash_set& a, raw_hash_set& b) noexcept(noexcept(a.swap(b))) { a.swap(b); @@ -1589,17 +1926,17 @@ class raw_hash_set { slot_type&& slot; }; - // "erases" the object from the container, except that it doesn't actually - // destroy the object. It only updates all the metadata of the class. - // This can be used in conjunction with Policy::transfer to move the object to - // another place. + // Erases, but does not destroy, the value pointed to by `it`. + // + // This merely updates the pertinent control byte. This can be used in + // conjunction with Policy::transfer to move the object to another place. void erase_meta_only(const_iterator it) { assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); --size_; - const size_t index = it.inner_.ctrl_ - ctrl_; + const size_t index = static_cast(it.inner_.ctrl_ - ctrl_); const size_t index_before = (index - Group::kWidth) & capacity_; - const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); - const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); + const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MaskEmpty(); // We count how many consecutive non empties we have to the right and to the // left of `it`. If the sum is >= kWidth then there is at least one probe @@ -1615,6 +1952,11 @@ class raw_hash_set { infoz().RecordErase(); } + // Allocates a backing array for `self` and initializes its control bytes. + // This reads `capacity_` and updates all other fields based on the result of + // the allocation. + // + // This does not free the currently held array; `capacity_` must be nonzero. void initialize_slots() { assert(capacity_); // Folks with custom allocators often make unwarranted assumptions about the @@ -1629,7 +1971,7 @@ class raw_hash_set { // bound more carefully. if (std::is_same>::value && slots_ == nullptr) { - infoz() = Sample(); + infoz() = Sample(sizeof(slot_type)); } char* mem = static_cast(Allocate( @@ -1643,7 +1985,11 @@ class raw_hash_set { infoz().RecordStorageChanged(size_, capacity_); } - void destroy_slots() { + // Destroys all slots in the backing array, frees the backing array, + // If reset is true, also clears all top-level book-keeping data. + // + // This essentially implements `map = raw_hash_set();`. + void destroy_slots(bool reset) { if (!capacity_) return; for (size_t i = 0; i != capacity_; ++i) { if (IsFull(ctrl_[i])) { @@ -1656,11 +2002,13 @@ class raw_hash_set { Deallocate( &alloc_ref(), ctrl_, AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))); - ctrl_ = EmptyGroup(); - slots_ = nullptr; - size_ = 0; - capacity_ = 0; - growth_left() = 0; + if (reset) { + ctrl_ = EmptyGroup(); + slots_ = nullptr; + size_ = 0; + capacity_ = 0; + growth_left() = 0; + } } void resize(size_t new_capacity) { @@ -1693,6 +2041,9 @@ class raw_hash_set { infoz().RecordRehash(total_probe_length); } + // Prunes control bytes to remove as many tombstones as possible. + // + // See the comment on `rehash_and_grow_if_necessary()`. void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { assert(IsValidCapacity(capacity_)); assert(!is_small(capacity_)); @@ -1759,6 +2110,11 @@ class raw_hash_set { infoz().RecordRehash(total_probe_length); } + // Called whenever the table *might* need to conditionally grow. + // + // This function is an optimization opportunity to perform a rehash even when + // growth is unnecessary, because vacating tombstones is beneficial for + // performance in the long-run. void rehash_and_grow_if_necessary() { if (capacity_ == 0) { resize(1); @@ -1818,12 +2174,12 @@ class raw_hash_set { auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; - for (int i : g.Match(H2(hash))) { + for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == elem)) return true; } - if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false; + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false; seq.next(); assert(seq.index() <= capacity_ && "full table!"); } @@ -1843,25 +2199,33 @@ class raw_hash_set { } protected: + // Attempts to find `key` in the table; if it isn't found, returns a slot that + // the value can be inserted into, with the control byte already set to + // `key`'s H2. template std::pair find_or_prepare_insert(const K& key) { + prefetch_heap_block(); auto hash = hash_ref()(key); auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; - for (int i : g.Match(H2(hash))) { + for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) return {seq.offset(i), false}; } - if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break; + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break; seq.next(); assert(seq.index() <= capacity_ && "full table!"); } return {prepare_insert(hash), true}; } + // Given the hash of a value not currently in the table, finds the next + // viable slot index to insert it at. + // + // REQUIRES: At least one non-full slot available. size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { auto target = find_first_non_full(ctrl_, hash, capacity_); if (ABSL_PREDICT_FALSE(growth_left() == 0 && @@ -1905,8 +2269,25 @@ class raw_hash_set { growth_left() = CapacityToGrowth(capacity()) - size_; } + // The number of slots we can still fill without needing to rehash. + // + // This is stored separately due to tombstones: we do not include tombstones + // in the growth capacity, because we'd like to rehash when the table is + // otherwise filled with tombstones: otherwise, probe sequences might get + // unacceptably long without triggering a rehash. Callers can also force a + // rehash via the standard `rehash(0)`, which will recompute this value as a + // side-effect. + // + // See `CapacityToGrowth()`. size_t& growth_left() { return settings_.template get<0>(); } + // Prefetch the heap-allocated memory region to resolve potential TLB misses. + // This is intended to overlap with execution of calculating the hash for a + // key. + void prefetch_heap_block() const { + base_internal::PrefetchT2(ctrl_); + } + HashtablezInfoHandle& infoz() { return settings_.template get<1>(); } hasher& hash_ref() { return settings_.template get<2>(); } @@ -1921,20 +2302,33 @@ class raw_hash_set { // TODO(alkis): Investigate removing some of these fields: // - ctrl/slots can be derived from each other // - size can be moved into the slot array - ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t] - slot_type* slots_ = nullptr; // [capacity * slot_type] - size_t size_ = 0; // number of full slots - size_t capacity_ = 0; // total number of slots + + // The control bytes (and, also, a pointer to the base of the backing array). + // + // This contains `capacity_ + 1 + NumClonedBytes()` entries, even + // when the table is empty (hence EmptyGroup). + ctrl_t* ctrl_ = EmptyGroup(); + // The beginning of the slots, located at `SlotOffset()` bytes after + // `ctrl_`. May be null for empty tables. + slot_type* slots_ = nullptr; + + // The number of filled slots. + size_t size_ = 0; + + // The total number of available slots. + size_t capacity_ = 0; absl::container_internal::CompressedTuple - settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{}, + settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{}, allocator_type{}}; }; // Erases all elements that satisfy the predicate `pred` from the container `c`. template -void EraseIf(Predicate& pred, raw_hash_set* c) { +typename raw_hash_set::size_type EraseIf( + Predicate& pred, raw_hash_set* c) { + const auto initial_size = c->size(); for (auto it = c->begin(), last = c->end(); it != last;) { if (pred(*it)) { c->erase(it++); @@ -1942,6 +2336,7 @@ void EraseIf(Predicate& pred, raw_hash_set* c) { ++it; } } + return initial_size - c->size(); } namespace hashtable_debug_internal { @@ -1957,7 +2352,7 @@ struct HashtableDebugAccess> { auto seq = probe(set.ctrl_, hash, set.capacity_); while (true) { container_internal::Group g{set.ctrl_ + seq.offset()}; - for (int i : g.Match(container_internal::H2(hash))) { + for (uint32_t i : g.Match(container_internal::H2(hash))) { if (Traits::apply( typename Set::template EqualElement{ key, set.eq_ref()}, @@ -1965,7 +2360,7 @@ struct HashtableDebugAccess> { return num_probes; ++num_probes; } - if (g.MatchEmpty()) return num_probes; + if (g.MaskEmpty()) return num_probes; seq.next(); ++num_probes; } @@ -2007,4 +2402,6 @@ struct HashtableDebugAccess> { ABSL_NAMESPACE_END } // namespace absl +#undef ABSL_INTERNAL_ASSERT_IS_FULL + #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc index c886d3ad43..e17ba9b43f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc @@ -12,13 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/container/internal/raw_hash_set.h" - +#include +#include #include #include +#include +#include #include "absl/base/internal/raw_logging.h" #include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/raw_hash_set.h" #include "absl/strings/str_format.h" #include "benchmark/benchmark.h" @@ -202,40 +205,113 @@ void CacheInSteadyStateArgs(Benchmark* bm) { BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs); void BM_EndComparison(benchmark::State& state) { + StringTable t = {{"a", "a"}, {"b", "b"}}; + auto it = t.begin(); + for (auto i : state) { + benchmark::DoNotOptimize(t); + benchmark::DoNotOptimize(it); + benchmark::DoNotOptimize(it != t.end()); + } +} +BENCHMARK(BM_EndComparison); + +void BM_Iteration(benchmark::State& state) { std::random_device rd; std::mt19937 rng(rd()); string_generator gen{12}; StringTable t; - while (t.size() < state.range(0)) { + + size_t capacity = state.range(0); + size_t size = state.range(1); + t.reserve(capacity); + + while (t.size() < size) { t.emplace(gen(rng), gen(rng)); } - for (auto _ : state) { + for (auto i : state) { + benchmark::DoNotOptimize(t); for (auto it = t.begin(); it != t.end(); ++it) { - benchmark::DoNotOptimize(it); - benchmark::DoNotOptimize(t); - benchmark::DoNotOptimize(it != t.end()); + benchmark::DoNotOptimize(*it); } } } -BENCHMARK(BM_EndComparison)->Arg(400); -void BM_CopyCtor(benchmark::State& state) { +BENCHMARK(BM_Iteration) + ->ArgPair(1, 1) + ->ArgPair(2, 2) + ->ArgPair(4, 4) + ->ArgPair(7, 7) + ->ArgPair(10, 10) + ->ArgPair(15, 15) + ->ArgPair(16, 16) + ->ArgPair(54, 54) + ->ArgPair(100, 100) + ->ArgPair(400, 400) + // empty + ->ArgPair(0, 0) + ->ArgPair(10, 0) + ->ArgPair(100, 0) + ->ArgPair(1000, 0) + ->ArgPair(10000, 0) + // sparse + ->ArgPair(100, 1) + ->ArgPair(1000, 10); + +void BM_CopyCtorSparseInt(benchmark::State& state) { std::random_device rd; std::mt19937 rng(rd()); IntTable t; std::uniform_int_distribution dist(0, ~uint64_t{}); - while (t.size() < state.range(0)) { + size_t size = state.range(0); + t.reserve(size * 10); + while (t.size() < size) { t.emplace(dist(rng)); } - for (auto _ : state) { + for (auto i : state) { IntTable t2 = t; benchmark::DoNotOptimize(t2); } } -BENCHMARK(BM_CopyCtor)->Range(128, 4096); +BENCHMARK(BM_CopyCtorSparseInt)->Range(128, 4096); + +void BM_CopyCtorInt(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + IntTable t; + std::uniform_int_distribution dist(0, ~uint64_t{}); + + size_t size = state.range(0); + while (t.size() < size) { + t.emplace(dist(rng)); + } + + for (auto i : state) { + IntTable t2 = t; + benchmark::DoNotOptimize(t2); + } +} +BENCHMARK(BM_CopyCtorInt)->Range(128, 4096); + +void BM_CopyCtorString(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + StringTable t; + std::uniform_int_distribution dist(0, ~uint64_t{}); + + size_t size = state.range(0); + while (t.size() < size) { + t.emplace(std::to_string(dist(rng)), std::to_string(dist(rng))); + } + + for (auto i : state) { + StringTable t2 = t; + benchmark::DoNotOptimize(t2); + } +} +BENCHMARK(BM_CopyCtorString)->Range(128, 4096); void BM_CopyAssign(benchmark::State& state) { std::random_device rd; @@ -330,33 +406,42 @@ void BM_Group_Match(benchmark::State& state) { h2_t h = 1; for (auto _ : state) { ::benchmark::DoNotOptimize(h); + ::benchmark::DoNotOptimize(g); ::benchmark::DoNotOptimize(g.Match(h)); } } BENCHMARK(BM_Group_Match); -void BM_Group_MatchEmpty(benchmark::State& state) { +void BM_Group_MaskEmpty(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -4); Group g{group.data()}; - for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmpty()); + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.MaskEmpty()); + } } -BENCHMARK(BM_Group_MatchEmpty); +BENCHMARK(BM_Group_MaskEmpty); -void BM_Group_MatchEmptyOrDeleted(benchmark::State& state) { +void BM_Group_MaskEmptyOrDeleted(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -4); Group g{group.data()}; - for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmptyOrDeleted()); + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted()); + } } -BENCHMARK(BM_Group_MatchEmptyOrDeleted); +BENCHMARK(BM_Group_MaskEmptyOrDeleted); void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -2); Group g{group.data()}; - for (auto _ : state) + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted()); + } } BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted); @@ -364,7 +449,10 @@ void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -2); Group g{group.data()}; - for (auto _ : state) ::benchmark::DoNotOptimize(*g.MatchEmptyOrDeleted()); + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted().LowestBitSet()); + } } BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted); @@ -425,7 +513,6 @@ void CodegenAbslRawHashSetInt64Iterate( int odr = (::benchmark::DoNotOptimize(std::make_tuple( &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd, - &CodegenAbslRawHashSetInt64Insert, - &CodegenAbslRawHashSetInt64Contains, + &CodegenAbslRawHashSetInt64Insert, &CodegenAbslRawHashSetInt64Contains, &CodegenAbslRawHashSetInt64Iterate)), 1); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc index b46c492030..daa3281450 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc @@ -14,28 +14,40 @@ #include "absl/container/internal/raw_hash_set.h" +#include #include #include #include #include #include +#include +#include +#include #include #include +#include #include #include +#include #include #include +#include +#include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/prefetch.h" #include "absl/base/internal/raw_logging.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" #include "absl/container/internal/hash_policy_testing.h" #include "absl/container/internal/hashtable_debug.h" +#include "absl/log/log.h" #include "absl/strings/string_view.h" namespace absl { @@ -194,35 +206,39 @@ TEST(Group, Match) { } } -TEST(Group, MatchEmpty) { +TEST(Group, MaskEmpty) { if (Group::kWidth == 16) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; - EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4)); + EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); + EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4); } else if (Group::kWidth == 8) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), ctrl_t::kDeleted, CtrlT(2), CtrlT(1), ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0)); + EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); + EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0); } else { FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; } } -TEST(Group, MatchEmptyOrDeleted) { +TEST(Group, MaskEmptyOrDeleted) { if (Group::kWidth == 16) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), - ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), - CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; - EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4)); + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3), + ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); + EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4); } else if (Group::kWidth == 8) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), ctrl_t::kDeleted, CtrlT(2), CtrlT(1), ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3)); + EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); + EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3); } else { FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; } @@ -334,7 +350,7 @@ class StringPolicy { struct ctor {}; template - slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} + explicit slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} std::pair pair; }; @@ -406,7 +422,7 @@ struct CustomAlloc : std::allocator { CustomAlloc() {} template - CustomAlloc(const CustomAlloc& other) {} + explicit CustomAlloc(const CustomAlloc& /*other*/) {} template struct rebind { using other = CustomAlloc; @@ -1244,7 +1260,7 @@ ExpectedStats XorSeedExpectedStats() { case 16: if (kRandomizesInserts) { return {0.1, - 1.0, + 2.0, {{0.95, 0.1}}, {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; } else { @@ -1258,6 +1274,7 @@ ExpectedStats XorSeedExpectedStats() { return {}; } +// TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { ProbeStatsPerSize stats; std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; @@ -1269,6 +1286,7 @@ TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { for (size_t size : sizes) { auto& stat = stats[size]; VerifyStats(size, expected, stat); + LOG(INFO) << size << " " << stat; } } @@ -1330,17 +1348,17 @@ ExpectedStats LinearTransformExpectedStats() { {{0.95, 0.3}}, {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; } else { - return {0.15, - 0.5, - {{0.95, 0.3}}, - {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}}; + return {0.4, + 0.6, + {{0.95, 0.5}}, + {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}}; } case 16: if (kRandomizesInserts) { return {0.1, 0.4, {{0.95, 0.3}}, - {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}}; } else { return {0.05, 0.2, @@ -1352,6 +1370,7 @@ ExpectedStats LinearTransformExpectedStats() { return {}; } +// TODO(b/80415403): Figure out why this test is so flaky. TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { ProbeStatsPerSize stats; std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; @@ -1363,6 +1382,7 @@ TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { for (size_t size : sizes) { auto& stat = stats[size]; VerifyStats(size, expected, stat); + LOG(INFO) << size << " " << stat; } } @@ -1497,7 +1517,7 @@ TEST(Table, RehashZeroForcesRehash) { TEST(Table, ConstructFromInitList) { using P = std::pair; struct Q { - operator P() const { return {}; } + operator P() const { return {}; } // NOLINT }; StringTable t = {P(), Q(), {}, {{}, {}}}; } @@ -2016,20 +2036,75 @@ TEST(Table, UnstablePointers) { EXPECT_NE(old_ptr, addr(0)); } -// Confirm that we assert if we try to erase() end(). -TEST(TableDeathTest, EraseOfEndAsserts) { +bool IsAssertEnabled() { // Use an assert with side-effects to figure out if they are actually enabled. bool assert_enabled = false; - assert([&]() { + assert([&]() { // NOLINT assert_enabled = true; return true; }()); - if (!assert_enabled) return; + return assert_enabled; +} + +TEST(TableDeathTest, InvalidIteratorAsserts) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; IntTable t; // Extra simple "regexp" as regexp support is highly varied across platforms. - constexpr char kDeathMsg[] = "Invalid operation on iterator"; - EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg); + EXPECT_DEATH_IF_SUPPORTED( + t.erase(t.end()), + "erase.* called on invalid iterator. The iterator might be an " + "end.*iterator or may have been default constructed."); + typename IntTable::iterator iter; + EXPECT_DEATH_IF_SUPPORTED( + ++iter, + "operator.* called on invalid iterator. The iterator might be an " + "end.*iterator or may have been default constructed."); + t.insert(0); + iter = t.begin(); + t.erase(iter); + EXPECT_DEATH_IF_SUPPORTED( + ++iter, + "operator.* called on invalid iterator. The element might have been " + "erased or .*the table might have rehashed."); +} + +TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; + + IntTable t; + t.insert(1); + t.insert(2); + t.insert(3); + auto iter1 = t.begin(); + auto iter2 = std::next(iter1); + ASSERT_NE(iter1, t.end()); + ASSERT_NE(iter2, t.end()); + t.erase(iter1); + // Extra simple "regexp" as regexp support is highly varied across platforms. + const char* const kErasedDeathMessage = + "Invalid iterator comparison. The element might have .*been erased or " + "the table might have rehashed."; + EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage); + EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage); + t.erase(iter2); + EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage); + + IntTable t1, t2; + t1.insert(0); + t2.insert(0); + iter1 = t1.begin(); + iter2 = t2.begin(); + const char* const kContainerDiffDeathMessage = + "Invalid iterator comparison. The iterators may be from different " + ".*containers or the container might have rehashed."; + EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage); + EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage); + + for (int i = 0; i < 10; ++i) t1.insert(i); + // There should have been a rehash in t1. + EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), + kContainerDiffDeathMessage); } #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) @@ -2040,7 +2115,7 @@ TEST(RawHashSamplerTest, Sample) { auto& sampler = GlobalHashtablezSampler(); size_t start_size = 0; - std::unordered_set preexisting_info; + absl::flat_hash_set preexisting_info; start_size += sampler.Iterate([&](const HashtablezInfo& info) { preexisting_info.insert(&info); ++start_size; @@ -2067,14 +2142,15 @@ TEST(RawHashSamplerTest, Sample) { } } size_t end_size = 0; - std::unordered_map observed_checksums; - std::unordered_map reservations; + absl::flat_hash_map observed_checksums; + absl::flat_hash_map reservations; end_size += sampler.Iterate([&](const HashtablezInfo& info) { if (preexisting_info.count(&info) == 0) { observed_checksums[info.hashes_bitwise_xor.load( std::memory_order_relaxed)]++; reservations[info.max_reserve.load(std::memory_order_relaxed)]++; } + EXPECT_EQ(info.inline_element_size, sizeof(int64_t)); ++end_size; }); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h index c1d20f3c52..7e84dc2554 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h @@ -476,7 +476,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { // containers in unspecified state (and in practice in causes memory-leak // according to heap-checker!). -REGISTER_TYPED_TEST_CASE_P( +REGISTER_TYPED_TEST_SUITE_P( ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h index e76421e508..3713cd9a2b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h @@ -107,8 +107,8 @@ TYPED_TEST_P(LookupTest, EqualRange) { } } -REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find, - EqualRange); +REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find, + EqualRange); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h index d3543936f7..4d9ab30fd4 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h @@ -297,11 +297,12 @@ TYPED_TEST_P(ModifiersTest, Swap) { // TODO(alkis): Write tests for extract. // TODO(alkis): Write tests for merge. -REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint, - InsertRange, InsertWithinCapacity, - InsertRangeWithinCapacity, InsertOrAssign, - InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace, - TryEmplaceHint, Erase, EraseRange, EraseKey, Swap); +REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, + InsertRange, InsertWithinCapacity, + InsertRangeWithinCapacity, InsertOrAssign, + InsertOrAssignHint, Emplace, EmplaceHint, + TryEmplace, TryEmplaceHint, Erase, EraseRange, + EraseKey, Swap); template struct is_unique_ptr : std::false_type {}; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h index 41165b05e9..af1116e6c3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h @@ -478,7 +478,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); } -REGISTER_TYPED_TEST_CASE_P( +REGISTER_TYPED_TEST_SUITE_P( ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h index 8f2f4b207e..b35f766e79 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h @@ -82,7 +82,7 @@ TYPED_TEST_P(LookupTest, EqualRange) { } } -REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange); +REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h index 6e473e45da..d8864bb28e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h @@ -209,10 +209,10 @@ TYPED_TEST_P(ModifiersTest, Swap) { // TODO(alkis): Write tests for extract. // TODO(alkis): Write tests for merge. -REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint, - InsertRange, InsertWithinCapacity, - InsertRangeWithinCapacity, Emplace, EmplaceHint, - Erase, EraseRange, EraseKey, Swap); +REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, + InsertRange, InsertWithinCapacity, + InsertRangeWithinCapacity, Emplace, EmplaceHint, + Erase, EraseRange, EraseKey, Swap); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_map.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_map.h index 7a39f6284c..6868e63a42 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_map.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_map.h @@ -41,9 +41,10 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export -#include "absl/container/internal/node_hash_policy.h" +#include "absl/container/internal/node_slot_policy.h" #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export #include "absl/memory/memory.h" @@ -77,6 +78,10 @@ class NodeHashMapPolicy; // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // +// Using `absl::node_hash_map` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// // Example: // // // Create a node hash map of three strings (that map to strings) @@ -347,8 +352,8 @@ class node_hash_map // `node_hash_map`. // // iterator try_emplace(const_iterator hint, - // const init_type& k, Args&&... args): - // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args): + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): // // Inserts (via copy or move) the element of the specified key into the // `node_hash_map` using the position of `hint` as a non-binding suggestion @@ -525,17 +530,19 @@ class node_hash_map // erase_if(node_hash_map<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. template -void erase_if(node_hash_map& c, Predicate pred) { - container_internal::EraseIf(pred, &c); +typename node_hash_map::size_type erase_if( + node_hash_map& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); } namespace container_internal { template class NodeHashMapPolicy - : public absl::container_internal::node_hash_policy< + : public absl::container_internal::node_slot_policy< std::pair&, NodeHashMapPolicy> { using value_type = std::pair; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_map_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_map_test.cc index 8f59a1e4a2..e941a836a4 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_map_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_map_test.cc @@ -223,33 +223,36 @@ TEST(NodeHashMap, EraseIf) { // Erase all elements. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, [](std::pair) { return true; }); + EXPECT_EQ(erase_if(s, [](std::pair) { return true; }), 5); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, [](std::pair) { return false; }); + EXPECT_EQ(erase_if(s, [](std::pair) { return false; }), 0); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4), Pair(5, 5))); } // Erase specific elements. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, - [](std::pair kvp) { return kvp.first % 2 == 1; }); + EXPECT_EQ(erase_if(s, + [](std::pair kvp) { + return kvp.first % 2 == 1; + }), + 3); EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4))); } // Predicate is function reference. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, FirstIsEven); + EXPECT_EQ(erase_if(s, FirstIsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } // Predicate is function pointer. { node_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; - erase_if(s, &FirstIsEven); + EXPECT_EQ(erase_if(s, &FirstIsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_set.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_set.h index 93b15f4681..f2cc70c3f6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_set.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_set.h @@ -38,8 +38,9 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export -#include "absl/container/internal/node_hash_policy.h" +#include "absl/container/internal/node_slot_policy.h" #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export #include "absl/memory/memory.h" @@ -73,6 +74,10 @@ struct NodeHashSetPolicy; // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // +// Using `absl::node_hash_set` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// // Example: // // // Create a node hash set of three strings @@ -433,16 +438,18 @@ class node_hash_set // erase_if(node_hash_set<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. template -void erase_if(node_hash_set& c, Predicate pred) { - container_internal::EraseIf(pred, &c); +typename node_hash_set::size_type erase_if( + node_hash_set& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); } namespace container_internal { template struct NodeHashSetPolicy - : absl::container_internal::node_hash_policy> { + : absl::container_internal::node_slot_policy> { using key_type = T; using init_type = T; using constant_iterators = std::true_type; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_set_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_set_test.cc index 7ddad2021d..98a8dbdd90 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_set_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/node_hash_set_test.cc @@ -108,31 +108,31 @@ TEST(NodeHashSet, EraseIf) { // Erase all elements. { node_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, [](int) { return true; }); + EXPECT_EQ(erase_if(s, [](int) { return true; }), 5); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { node_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, [](int) { return false; }); + EXPECT_EQ(erase_if(s, [](int) { return false; }), 0); EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); } // Erase specific elements. { node_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, [](int k) { return k % 2 == 1; }); + EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3); EXPECT_THAT(s, UnorderedElementsAre(2, 4)); } // Predicate is function reference. { node_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, IsEven); + EXPECT_EQ(erase_if(s, IsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } // Predicate is function pointer. { node_hash_set s = {1, 2, 3, 4, 5}; - erase_if(s, &IsEven); + EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/sample_element_size_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/sample_element_size_test.cc new file mode 100644 index 0000000000..b23626b409 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/container/sample_element_size_test.cc @@ -0,0 +1,114 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" +#include "absl/container/node_hash_map.h" +#include "absl/container/node_hash_set.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +// Create some tables of type `Table`, then look at all the new +// `HashtablezInfo`s to make sure that the `inline_element_size == +// expected_element_size`. The `inline_element_size` is the amount of memory +// allocated for each slot of a hash table, that is `sizeof(slot_type)`. Add +// the new `HashtablezInfo`s to `preexisting_info`. Store all the new tables +// into `tables`. +template +void TestInlineElementSize( + HashtablezSampler& sampler, + // clang-tidy gives a false positive on this declaration. This unordered + // set cannot be flat_hash_set, however, since that would introduce a mutex + // deadlock. + std::unordered_set& preexisting_info, // NOLINT + std::vector& tables, const typename Table::value_type& elt, + size_t expected_element_size) { + for (int i = 0; i < 10; ++i) { + // We create a new table and must store it somewhere so that when we store + // a pointer to the resulting `HashtablezInfo` into `preexisting_info` + // that we aren't storing a dangling pointer. + tables.emplace_back(); + // We must insert an element to get a hashtablez to instantiate. + tables.back().insert(elt); + } + size_t new_count = 0; + sampler.Iterate([&](const HashtablezInfo& info) { + if (preexisting_info.insert(&info).second) { + EXPECT_EQ(info.inline_element_size, expected_element_size); + ++new_count; + } + }); + // Make sure we actually did get a new hashtablez. + EXPECT_GT(new_count, 0); +} + +struct bigstruct { + char a[1000]; + friend bool operator==(const bigstruct& x, const bigstruct& y) { + return memcmp(x.a, y.a, sizeof(x.a)) == 0; + } + template + friend H AbslHashValue(H h, const bigstruct& c) { + return H::combine_contiguous(std::move(h), c.a, sizeof(c.a)); + } +}; +#endif + +TEST(FlatHashMap, SampleElementSize) { +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + // Enable sampling even if the prod default is off. + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(1); + + auto& sampler = GlobalHashtablezSampler(); + std::vector> flat_map_tables; + std::vector> flat_set_tables; + std::vector> node_map_tables; + std::vector> node_set_tables; + + // It takes thousands of new tables after changing the sampling parameters + // before you actually get some instrumentation. And if you must actually + // put something into those tables. + for (int i = 0; i < 10000; ++i) { + flat_map_tables.emplace_back(); + flat_map_tables.back()[i] = bigstruct{}; + } + + // clang-tidy gives a false positive on this declaration. This unordered set + // cannot be a flat_hash_set, however, since that would introduce a mutex + // deadlock. + std::unordered_set preexisting_info; // NOLINT + sampler.Iterate( + [&](const HashtablezInfo& info) { preexisting_info.insert(&info); }); + TestInlineElementSize(sampler, preexisting_info, flat_map_tables, + {0, bigstruct{}}, sizeof(int) + sizeof(bigstruct)); + TestInlineElementSize(sampler, preexisting_info, node_map_tables, + {0, bigstruct{}}, sizeof(void*)); + TestInlineElementSize(sampler, preexisting_info, flat_set_tables, // + bigstruct{}, sizeof(bigstruct)); + TestInlineElementSize(sampler, preexisting_info, node_set_tables, // + bigstruct{}, sizeof(void*)); +#endif +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake index 942ce90a4d..f728c0e5ca 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake @@ -1,8 +1,6 @@ # See absl/copts/copts.py and absl/copts/generate_copts.py include(GENERATED_AbseilCopts) -set(ABSL_LSAN_LINKOPTS "") -set(ABSL_HAVE_LSAN OFF) set(ABSL_DEFAULT_LINKOPTS "") if (BUILD_SHARED_LIBS AND MSVC) @@ -12,7 +10,49 @@ else() set(ABSL_BUILD_DLL FALSE) endif() -if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64|AMD64") +if(APPLE AND CMAKE_CXX_COMPILER_ID MATCHES [[Clang]]) + # Some CMake targets (not known at the moment of processing) could be set to + # compile for multiple architectures as specified by the OSX_ARCHITECTURES + # property, which is target-specific. We should neither inspect nor rely on + # any CMake property or variable to detect an architecture, in particular: + # + # - CMAKE_OSX_ARCHITECTURES + # is just an initial value for OSX_ARCHITECTURES; set too early. + # + # - OSX_ARCHITECTURES + # is a per-target property; targets could be defined later, and their + # properties could be modified any time later. + # + # - CMAKE_SYSTEM_PROCESSOR + # does not reflect multiple architectures at all. + # + # When compiling for multiple architectures, a build system can invoke a + # compiler either + # + # - once: a single command line for multiple architectures (Ninja build) + # - twice: two command lines per each architecture (Xcode build system) + # + # If case of Xcode, it would be possible to set an Xcode-specific attributes + # like XCODE_ATTRIBUTE_OTHER_CPLUSPLUSFLAGS[arch=arm64] or similar. + # + # In both cases, the viable strategy is to pass all arguments at once, allowing + # the compiler to dispatch arch-specific arguments to a designated backend. + set(ABSL_RANDOM_RANDEN_COPTS "") + foreach(_arch IN ITEMS "x86_64" "arm64") + string(TOUPPER "${_arch}" _arch_uppercase) + string(REPLACE "X86_64" "X64" _arch_uppercase ${_arch_uppercase}) + foreach(_flag IN LISTS ABSL_RANDOM_HWAES_${_arch_uppercase}_FLAGS) + list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Xarch_${_arch}" "${_flag}") + endforeach() + endforeach() + # If a compiler happens to deal with an argument for a currently unused + # architecture, it will warn about an unused command line argument. + option(ABSL_RANDOM_RANDEN_COPTS_WARNING OFF + "Warn if one of ABSL_RANDOM_RANDEN_COPTS is unused") + if(ABSL_RANDOM_RANDEN_COPTS AND NOT ABSL_RANDOM_RANDEN_COPTS_WARNING) + list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Wno-unused-command-line-argument") + endif() +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64|AMD64") if (MSVC) set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_MSVC_X64_FLAGS}") else() @@ -27,7 +67,6 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm.*|aarch64") message(WARNING "Value of CMAKE_SIZEOF_VOID_P (${CMAKE_SIZEOF_VOID_P}) is not supported.") endif() else() - message(WARNING "Value of CMAKE_SYSTEM_PROCESSOR (${CMAKE_SYSTEM_PROCESSOR}) is unknown and cannot be used to set ABSL_RANDOM_RANDEN_COPTS") set(ABSL_RANDOM_RANDEN_COPTS "") endif() @@ -43,14 +82,6 @@ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") # MATCHES so we get both Clang an else() set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}") set(ABSL_TEST_COPTS "${ABSL_LLVM_FLAGS};${ABSL_LLVM_TEST_FLAGS}") - if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - # AppleClang doesn't have lsan - # https://developer.apple.com/documentation/code_diagnostics - if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5) - set(ABSL_LSAN_LINKOPTS "-fsanitize=leak") - set(ABSL_HAVE_LSAN ON) - endif() - endif() endif() elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") set(ABSL_DEFAULT_COPTS "${ABSL_MSVC_FLAGS}") diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake index a4ab1aa204..ba70ef9b02 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake @@ -13,22 +13,21 @@ list(APPEND ABSL_CLANG_CL_FLAGS ) list(APPEND ABSL_CLANG_CL_TEST_FLAGS - "-Wno-c99-extensions" "-Wno-deprecated-declarations" - "-Wno-missing-noreturn" + "-Wno-implicit-int-conversion" "-Wno-missing-prototypes" "-Wno-missing-variable-declarations" - "-Wno-null-conversion" "-Wno-shadow" - "-Wno-shift-sign-overflow" + "-Wno-shorten-64-to-32" "-Wno-sign-compare" + "-Wno-sign-conversion" + "-Wno-unreachable-code-loop-increment" "-Wno-unused-function" "-Wno-unused-member-function" "-Wno-unused-parameter" "-Wno-unused-private-field" "-Wno-unused-template" "-Wno-used-but-marked-unused" - "-Wno-zero-as-null-pointer-constant" "-Wno-gnu-zero-variadic-macro-arguments" ) @@ -51,7 +50,6 @@ list(APPEND ABSL_GCC_FLAGS ) list(APPEND ABSL_GCC_TEST_FLAGS - "-Wno-conversion-null" "-Wno-deprecated-declarations" "-Wno-missing-declarations" "-Wno-sign-compare" @@ -80,6 +78,7 @@ list(APPEND ABSL_LLVM_FLAGS "-Wshadow-all" "-Wstring-conversion" "-Wtautological-overlap-compare" + "-Wtautological-unsigned-zero-compare" "-Wundef" "-Wuninitialized" "-Wunreachable-code" @@ -91,30 +90,26 @@ list(APPEND ABSL_LLVM_FLAGS "-Wno-float-conversion" "-Wno-implicit-float-conversion" "-Wno-implicit-int-float-conversion" - "-Wno-implicit-int-conversion" - "-Wno-shorten-64-to-32" - "-Wno-sign-conversion" "-Wno-unknown-warning-option" "-DNOMINMAX" ) list(APPEND ABSL_LLVM_TEST_FLAGS - "-Wno-c99-extensions" "-Wno-deprecated-declarations" - "-Wno-missing-noreturn" + "-Wno-implicit-int-conversion" "-Wno-missing-prototypes" "-Wno-missing-variable-declarations" - "-Wno-null-conversion" "-Wno-shadow" - "-Wno-shift-sign-overflow" + "-Wno-shorten-64-to-32" "-Wno-sign-compare" + "-Wno-sign-conversion" + "-Wno-unreachable-code-loop-increment" "-Wno-unused-function" "-Wno-unused-member-function" "-Wno-unused-parameter" "-Wno-unused-private-field" "-Wno-unused-template" "-Wno-used-but-marked-unused" - "-Wno-zero-as-null-pointer-constant" "-Wno-gnu-zero-variadic-macro-arguments" ) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/GENERATED_copts.bzl b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/GENERATED_copts.bzl index a6efc98e11..62aab656a1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/GENERATED_copts.bzl +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/GENERATED_copts.bzl @@ -14,22 +14,21 @@ ABSL_CLANG_CL_FLAGS = [ ] ABSL_CLANG_CL_TEST_FLAGS = [ - "-Wno-c99-extensions", "-Wno-deprecated-declarations", - "-Wno-missing-noreturn", + "-Wno-implicit-int-conversion", "-Wno-missing-prototypes", "-Wno-missing-variable-declarations", - "-Wno-null-conversion", "-Wno-shadow", - "-Wno-shift-sign-overflow", + "-Wno-shorten-64-to-32", "-Wno-sign-compare", + "-Wno-sign-conversion", + "-Wno-unreachable-code-loop-increment", "-Wno-unused-function", "-Wno-unused-member-function", "-Wno-unused-parameter", "-Wno-unused-private-field", "-Wno-unused-template", "-Wno-used-but-marked-unused", - "-Wno-zero-as-null-pointer-constant", "-Wno-gnu-zero-variadic-macro-arguments", ] @@ -52,7 +51,6 @@ ABSL_GCC_FLAGS = [ ] ABSL_GCC_TEST_FLAGS = [ - "-Wno-conversion-null", "-Wno-deprecated-declarations", "-Wno-missing-declarations", "-Wno-sign-compare", @@ -81,6 +79,7 @@ ABSL_LLVM_FLAGS = [ "-Wshadow-all", "-Wstring-conversion", "-Wtautological-overlap-compare", + "-Wtautological-unsigned-zero-compare", "-Wundef", "-Wuninitialized", "-Wunreachable-code", @@ -92,30 +91,26 @@ ABSL_LLVM_FLAGS = [ "-Wno-float-conversion", "-Wno-implicit-float-conversion", "-Wno-implicit-int-float-conversion", - "-Wno-implicit-int-conversion", - "-Wno-shorten-64-to-32", - "-Wno-sign-conversion", "-Wno-unknown-warning-option", "-DNOMINMAX", ] ABSL_LLVM_TEST_FLAGS = [ - "-Wno-c99-extensions", "-Wno-deprecated-declarations", - "-Wno-missing-noreturn", + "-Wno-implicit-int-conversion", "-Wno-missing-prototypes", "-Wno-missing-variable-declarations", - "-Wno-null-conversion", "-Wno-shadow", - "-Wno-shift-sign-overflow", + "-Wno-shorten-64-to-32", "-Wno-sign-compare", + "-Wno-sign-conversion", + "-Wno-unreachable-code-loop-increment", "-Wno-unused-function", "-Wno-unused-member-function", "-Wno-unused-parameter", "-Wno-unused-private-field", "-Wno-unused-template", "-Wno-used-but-marked-unused", - "-Wno-zero-as-null-pointer-constant", "-Wno-gnu-zero-variadic-macro-arguments", ] diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/configure_copts.bzl b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/configure_copts.bzl index 40d5849a3a..c5e57b38bf 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/configure_copts.bzl +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/configure_copts.bzl @@ -25,6 +25,7 @@ ABSL_DEFAULT_COPTS = select({ "//absl:msvc_compiler": ABSL_MSVC_FLAGS, "//absl:clang-cl_compiler": ABSL_CLANG_CL_FLAGS, "//absl:clang_compiler": ABSL_LLVM_FLAGS, + "//absl:gcc_compiler": ABSL_GCC_FLAGS, "//conditions:default": ABSL_GCC_FLAGS, }) @@ -32,6 +33,7 @@ ABSL_TEST_COPTS = ABSL_DEFAULT_COPTS + select({ "//absl:msvc_compiler": ABSL_MSVC_TEST_FLAGS, "//absl:clang-cl_compiler": ABSL_CLANG_CL_TEST_FLAGS, "//absl:clang_compiler": ABSL_LLVM_TEST_FLAGS, + "//absl:gcc_compiler": ABSL_GCC_TEST_FLAGS, "//conditions:default": ABSL_GCC_TEST_FLAGS, }) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/copts.py b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/copts.py index 0d6c1ec3a6..732af9eaf8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/copts.py +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/copts/copts.py @@ -17,22 +17,21 @@ MSVC_BIG_WARNING_FLAGS = [ ] LLVM_TEST_DISABLE_WARNINGS_FLAGS = [ - "-Wno-c99-extensions", "-Wno-deprecated-declarations", - "-Wno-missing-noreturn", + "-Wno-implicit-int-conversion", "-Wno-missing-prototypes", "-Wno-missing-variable-declarations", - "-Wno-null-conversion", "-Wno-shadow", - "-Wno-shift-sign-overflow", + "-Wno-shorten-64-to-32", "-Wno-sign-compare", + "-Wno-sign-conversion", + "-Wno-unreachable-code-loop-increment", "-Wno-unused-function", "-Wno-unused-member-function", "-Wno-unused-parameter", "-Wno-unused-private-field", "-Wno-unused-template", "-Wno-used-but-marked-unused", - "-Wno-zero-as-null-pointer-constant", # gtest depends on this GNU extension being offered. "-Wno-gnu-zero-variadic-macro-arguments", ] @@ -68,7 +67,6 @@ COPT_VARS = { "-DNOMINMAX", ], "ABSL_GCC_TEST_FLAGS": [ - "-Wno-conversion-null", "-Wno-deprecated-declarations", "-Wno-missing-declarations", "-Wno-sign-compare", @@ -96,6 +94,7 @@ COPT_VARS = { "-Wshadow-all", "-Wstring-conversion", "-Wtautological-overlap-compare", + "-Wtautological-unsigned-zero-compare", "-Wundef", "-Wuninitialized", "-Wunreachable-code", @@ -109,9 +108,6 @@ COPT_VARS = { "-Wno-float-conversion", "-Wno-implicit-float-conversion", "-Wno-implicit-int-float-conversion", - "-Wno-implicit-int-conversion", - "-Wno-shorten-64-to-32", - "-Wno-sign-conversion", # Disable warnings on unknown warning flags (when warning flags are # unknown on older compiler versions) "-Wno-unknown-warning-option", diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/CMakeLists.txt new file mode 100644 index 0000000000..02c86aca9a --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/CMakeLists.txt @@ -0,0 +1,146 @@ +# Copyright 2022 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + crc_cpu_detect + HDRS + "internal/cpu_detect.h" + SRCS + "internal/cpu_detect.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + crc_internal + HDRS + "internal/crc.h" + "internal/crc32_x86_arm_combined_simd.h" + SRCS + "internal/crc.cc" + "internal/crc_internal.h" + "internal/crc_x86_arm_combined.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc_cpu_detect + absl::base + absl::config + absl::core_headers + absl::dynamic_annotations + absl::endian + absl::prefetch + absl::raw_logging_internal + absl::memory + absl::bits +) + +absl_cc_library( + NAME + crc32c + HDRS + "crc32c.h" + "internal/crc32c.h" + "internal/crc_memcpy.h" + SRCS + "crc32c.cc" + "internal/crc32c_inline.h" + "internal/crc_memcpy_fallback.cc" + "internal/crc_memcpy_x86_64.cc" + "internal/crc_non_temporal_memcpy.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc_cpu_detect + absl::crc_internal + absl::non_temporal_memcpy + absl::config + absl::core_headers + absl::dynamic_annotations + absl::endian + absl::prefetch + absl::strings +) + +absl_cc_test( + NAME + crc32c_test + SRCS + "crc32c_test.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc32c + absl::strings + GTest::gtest_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + non_temporal_arm_intrinsics + HDRS + "internal/non_temporal_arm_intrinsics.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + non_temporal_memcpy + HDRS + "internal/non_temporal_memcpy.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::non_temporal_arm_intrinsics + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + crc_memcpy_test + SRCS + "internal/crc_memcpy_test.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc32c + absl::memory + absl::random_random + absl::random_distributions + absl::strings + GTest::gtest_main +) + +absl_cc_test( + NAME + non_temporal_memcpy_test + SRCS + "internal/non_temporal_memcpy_test.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::non_temporal_memcpy + GTest::gtest_main +) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c.cc new file mode 100644 index 0000000000..82865df5c2 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c.cc @@ -0,0 +1,100 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/crc/crc32c.h" + +#include + +#include "absl/crc/internal/crc.h" +#include "absl/crc/internal/crc32c.h" +#include "absl/crc/internal/crc_memcpy.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace { + +const crc_internal::CRC* CrcEngine() { + static const crc_internal::CRC* engine = crc_internal::CRC::Crc32c(); + return engine; +} + +constexpr uint32_t kCRC32Xor = 0xffffffffU; + +} // namespace + +namespace crc_internal { + +crc32c_t UnextendCrc32cByZeroes(crc32c_t initial_crc, size_t length) { + uint32_t crc = static_cast(initial_crc) ^ kCRC32Xor; + CrcEngine()->UnextendByZeroes(&crc, length); + return static_cast(crc ^ kCRC32Xor); +} + +// Called by `absl::ExtendCrc32c()` on strings with size > 64 or when hardware +// CRC32C support is missing. +crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc, + absl::string_view buf_to_add) { + uint32_t crc = static_cast(initial_crc) ^ kCRC32Xor; + CrcEngine()->Extend(&crc, buf_to_add.data(), buf_to_add.size()); + return static_cast(crc ^ kCRC32Xor); +} + +} // namespace crc_internal + +crc32c_t ComputeCrc32c(absl::string_view buf) { + return ExtendCrc32c(ToCrc32c(0), buf); +} + +crc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length) { + uint32_t crc = static_cast(initial_crc) ^ kCRC32Xor; + CrcEngine()->ExtendByZeroes(&crc, length); + return static_cast(crc ^ kCRC32Xor); +} + +crc32c_t ConcatCrc32c(crc32c_t lhs_crc, crc32c_t rhs_crc, size_t rhs_len) { + uint32_t result = static_cast(lhs_crc); + CrcEngine()->ExtendByZeroes(&result, rhs_len); + return static_cast(result) ^ rhs_crc; +} + +crc32c_t RemoveCrc32cPrefix(crc32c_t crc_a, crc32c_t crc_ab, size_t length_b) { + return ConcatCrc32c(crc_a, crc_ab, length_b); +} + +crc32c_t MemcpyCrc32c(void* dest, const void* src, size_t count, + crc32c_t initial_crc) { + return static_cast( + crc_internal::Crc32CAndCopy(dest, src, count, initial_crc, false)); +} + +// Remove a Suffix of given size from a buffer +// +// Given a CRC32C of an existing buffer, `full_string_crc`; the CRC32C of a +// suffix of that buffer to remove, `suffix_crc`; and suffix buffer's length, +// `suffix_len` return the CRC32C of the buffer with suffix removed +// +// This operation has a runtime cost of O(log(`suffix_len`)) +crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc, + size_t suffix_len) { + crc32c_t crc_with_suffix_zeroed = + suffix_crc ^ full_string_crc ^ + ExtendCrc32cByZeroes(ToCrc32c(0), suffix_len); + return crc_internal::UnextendCrc32cByZeroes( + crc_with_suffix_zeroed, suffix_len); +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c.h new file mode 100644 index 0000000000..8b03073265 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c.h @@ -0,0 +1,176 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: crc32c.h +// ----------------------------------------------------------------------------- +// +// This header file defines the API for computing CRC32C values as checksums +// for arbitrary sequences of bytes provided as a string buffer. +// +// The API includes the basic functions for computing such CRC32C values and +// some utility functions for performing more efficient mathematical +// computations using an existing checksum. +#ifndef ABSL_CRC_CRC32C_H_ +#define ABSL_CRC_CRC32C_H_ + +#include +#include +#include + +#include "absl/crc/internal/crc32c_inline.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +//----------------------------------------------------------------------------- +// crc32c_t +//----------------------------------------------------------------------------- + +// `crc32c_t` defines a strongly typed integer type for holding a CRC32C value. +enum class crc32c_t : uint32_t {}; + +// ToCrc32c() +// +// Converts a uint32_t value to crc32c_t. This API is necessary in C++14 +// and earlier. Code targeting C++17-or-later can instead use `crc32c_t{n}`. +inline crc32c_t ToCrc32c(uint32_t n) { + return static_cast(n); +} +// operator^ +// +// Performs a bitwise XOR on two CRC32C values +inline crc32c_t operator^(crc32c_t lhs, crc32c_t rhs) { + const auto lhs_int = static_cast(lhs); + const auto rhs_int = static_cast(rhs); + return ToCrc32c(lhs_int ^ rhs_int); +} + +namespace crc_internal { +// Non-inline code path for `absl::ExtendCrc32c()`. Do not call directly. +// Call `absl::ExtendCrc32c()` (defined below) instead. +crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc, + absl::string_view buf_to_add); +} // namespace crc_internal + +// ----------------------------------------------------------------------------- +// CRC32C Computation Functions +// ----------------------------------------------------------------------------- + +// ComputeCrc32c() +// +// Returns the CRC32C value of the provided string. +crc32c_t ComputeCrc32c(absl::string_view buf); + +// ExtendCrc32c() +// +// Computes a CRC32C value from an `initial_crc` CRC32C value including the +// `buf_to_add` bytes of an additional buffer. Using this function is more +// efficient than computing a CRC32C value for the combined buffer from +// scratch. +// +// Note: `ExtendCrc32c` with an initial_crc of 0 is equivalent to +// `ComputeCrc32c`. +// +// This operation has a runtime cost of O(`buf_to_add.size()`) +inline crc32c_t ExtendCrc32c(crc32c_t initial_crc, + absl::string_view buf_to_add) { + // Approximately 75% of calls have size <= 64. + if (buf_to_add.size() <= 64) { + uint32_t crc = static_cast(initial_crc); + if (crc_internal::ExtendCrc32cInline(&crc, buf_to_add.data(), + buf_to_add.size())) { + return ToCrc32c(crc); + } + } + return crc_internal::ExtendCrc32cInternal(initial_crc, buf_to_add); +} + +// ExtendCrc32cByZeroes() +// +// Computes a CRC32C value for a buffer with an `initial_crc` CRC32C value, +// where `length` bytes with a value of 0 are appended to the buffer. Using this +// function is more efficient than computing a CRC32C value for the combined +// buffer from scratch. +// +// This operation has a runtime cost of O(log(`length`)) +crc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length); + +// MemcpyCrc32c() +// +// Copies `src` to `dest` using `memcpy()` semantics, returning the CRC32C +// value of the copied buffer. +// +// Using `MemcpyCrc32c()` is potentially faster than performing the `memcpy()` +// and `ComputeCrc32c()` operations separately. +crc32c_t MemcpyCrc32c(void* dest, const void* src, size_t count, + crc32c_t initial_crc = ToCrc32c(0)); + +// ----------------------------------------------------------------------------- +// CRC32C Arithmetic Functions +// ----------------------------------------------------------------------------- + +// The following functions perform arithmetic on CRC32C values, which are +// generally more efficient than recalculating any given result's CRC32C value. + +// ConcatCrc32c() +// +// Calculates the CRC32C value of two buffers with known CRC32C values +// concatenated together. +// +// Given a buffer with CRC32C value `crc1` and a buffer with +// CRC32C value `crc2` and length, `crc2_length`, returns the CRC32C value of +// the concatenation of these two buffers. +// +// This operation has a runtime cost of O(log(`crc2_length`)). +crc32c_t ConcatCrc32c(crc32c_t crc1, crc32c_t crc2, size_t crc2_length); + +// RemoveCrc32cPrefix() +// +// Calculates the CRC32C value of an existing buffer with a series of bytes +// (the prefix) removed from the beginning of that buffer. +// +// Given the CRC32C value of an existing buffer, `full_string_crc`; The CRC32C +// value of a prefix of that buffer, `prefix_crc`; and the length of the buffer +// with the prefix removed, `remaining_string_length` , return the CRC32C +// value of the buffer with the prefix removed. +// +// This operation has a runtime cost of O(log(`remaining_string_length`)). +crc32c_t RemoveCrc32cPrefix(crc32c_t prefix_crc, crc32c_t full_string_crc, + size_t remaining_string_length); +// RemoveCrc32cSuffix() +// +// Calculates the CRC32C value of an existing buffer with a series of bytes +// (the suffix) removed from the end of that buffer. +// +// Given a CRC32C value of an existing buffer `full_string_crc`, the CRC32C +// value of the suffix to remove `suffix_crc`, and the length of that suffix +// `suffix_len`, returns the CRC32C value of the buffer with suffix removed. +// +// This operation has a runtime cost of O(log(`suffix_len`)) +crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc, + size_t suffix_length); + +// operator<< +// +// Streams the CRC32C value `crc` to the stream `os`. +inline std::ostream& operator<<(std::ostream& os, crc32c_t crc) { + return os << static_cast(crc); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_CRC32C_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c_benchmark.cc new file mode 100644 index 0000000000..2c7ac594f9 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c_benchmark.cc @@ -0,0 +1,162 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/crc/crc32c.h" +#include "absl/crc/internal/crc32c.h" +#include "absl/memory/memory.h" +#include "benchmark/benchmark.h" + +namespace { + +std::string TestString(size_t len) { + std::string result; + result.reserve(len); + for (size_t i = 0; i < len; ++i) { + result.push_back(static_cast(i % 256)); + } + return result; +} + +void BM_Calculate(benchmark::State& state) { + int len = state.range(0); + std::string data = TestString(len); + for (auto s : state) { + benchmark::DoNotOptimize(data); + absl::crc32c_t crc = absl::ComputeCrc32c(data); + benchmark::DoNotOptimize(crc); + } +} +BENCHMARK(BM_Calculate)->Arg(0)->Arg(1)->Arg(100)->Arg(10000)->Arg(500000); + +void BM_Extend(benchmark::State& state) { + int len = state.range(0); + std::string extension = TestString(len); + absl::crc32c_t base = absl::ToCrc32c(0xC99465AA); // CRC32C of "Hello World" + for (auto s : state) { + benchmark::DoNotOptimize(base); + benchmark::DoNotOptimize(extension); + absl::crc32c_t crc = absl::ExtendCrc32c(base, extension); + benchmark::DoNotOptimize(crc); + } +} +BENCHMARK(BM_Extend)->Arg(0)->Arg(1)->Arg(100)->Arg(10000)->Arg(500000); + +void BM_ExtendByZeroes(benchmark::State& state) { + absl::crc32c_t base = absl::ToCrc32c(0xC99465AA); // CRC32C of "Hello World" + int num_zeroes = state.range(0); + for (auto s : state) { + benchmark::DoNotOptimize(base); + absl::crc32c_t crc = absl::ExtendCrc32cByZeroes(base, num_zeroes); + benchmark::DoNotOptimize(crc); + } +} +BENCHMARK(BM_ExtendByZeroes) + ->RangeMultiplier(10) + ->Range(1, 1000000) + ->RangeMultiplier(32) + ->Range(1, 1 << 20); + +void BM_UnextendByZeroes(benchmark::State& state) { + absl::crc32c_t base = absl::ToCrc32c(0xdeadbeef); + int num_zeroes = state.range(0); + for (auto s : state) { + benchmark::DoNotOptimize(base); + absl::crc32c_t crc = + absl::crc_internal::UnextendCrc32cByZeroes(base, num_zeroes); + benchmark::DoNotOptimize(crc); + } +} +BENCHMARK(BM_UnextendByZeroes) + ->RangeMultiplier(10) + ->Range(1, 1000000) + ->RangeMultiplier(32) + ->Range(1, 1 << 20); + +void BM_Concat(benchmark::State& state) { + int string_b_len = state.range(0); + std::string string_b = TestString(string_b_len); + + // CRC32C of "Hello World" + absl::crc32c_t crc_a = absl::ToCrc32c(0xC99465AA); + absl::crc32c_t crc_b = absl::ComputeCrc32c(string_b); + + for (auto s : state) { + benchmark::DoNotOptimize(crc_a); + benchmark::DoNotOptimize(crc_b); + benchmark::DoNotOptimize(string_b_len); + absl::crc32c_t crc_ab = absl::ConcatCrc32c(crc_a, crc_b, string_b_len); + benchmark::DoNotOptimize(crc_ab); + } +} +BENCHMARK(BM_Concat) + ->RangeMultiplier(10) + ->Range(1, 1000000) + ->RangeMultiplier(32) + ->Range(1, 1 << 20); + +void BM_Memcpy(benchmark::State& state) { + int string_len = state.range(0); + + std::string source = TestString(string_len); + auto dest = absl::make_unique(string_len); + + for (auto s : state) { + benchmark::DoNotOptimize(source); + absl::crc32c_t crc = + absl::MemcpyCrc32c(dest.get(), source.data(), source.size()); + benchmark::DoNotOptimize(crc); + benchmark::DoNotOptimize(dest); + benchmark::DoNotOptimize(dest.get()); + benchmark::DoNotOptimize(dest[0]); + } + + state.SetBytesProcessed(static_cast(state.iterations()) * + state.range(0)); +} +BENCHMARK(BM_Memcpy)->Arg(0)->Arg(1)->Arg(100)->Arg(10000)->Arg(500000); + +void BM_RemoveSuffix(benchmark::State& state) { + int full_string_len = state.range(0); + int suffix_len = state.range(1); + + std::string full_string = TestString(full_string_len); + std::string suffix = full_string.substr( + full_string_len - suffix_len, full_string_len); + + absl::crc32c_t full_string_crc = absl::ComputeCrc32c(full_string); + absl::crc32c_t suffix_crc = absl::ComputeCrc32c(suffix); + + for (auto s : state) { + benchmark::DoNotOptimize(full_string_crc); + benchmark::DoNotOptimize(suffix_crc); + benchmark::DoNotOptimize(suffix_len); + absl::crc32c_t crc = absl::RemoveCrc32cSuffix(full_string_crc, suffix_crc, + suffix_len); + benchmark::DoNotOptimize(crc); + } +} +BENCHMARK(BM_RemoveSuffix) + ->ArgPair(1, 1) + ->ArgPair(100, 10) + ->ArgPair(100, 100) + ->ArgPair(10000, 1) + ->ArgPair(10000, 100) + ->ArgPair(10000, 10000) + ->ArgPair(500000, 1) + ->ArgPair(500000, 100) + ->ArgPair(500000, 10000) + ->ArgPair(500000, 500000); +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c_test.cc new file mode 100644 index 0000000000..98e5fea395 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/crc32c_test.cc @@ -0,0 +1,186 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/crc/crc32c.h" + +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/crc/internal/crc32c.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" + +namespace { + +TEST(CRC32C, RFC3720) { + // Test the results of the vectors from + // https://www.rfc-editor.org/rfc/rfc3720#appendix-B.4 + char data[32]; + + // 32 bytes of ones. + memset(data, 0, sizeof(data)); + EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))), + absl::ToCrc32c(0x8a9136aa)); + + // 32 bytes of ones. + memset(data, 0xff, sizeof(data)); + EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))), + absl::ToCrc32c(0x62a8ab43)); + + // 32 incrementing bytes. + for (int i = 0; i < 32; ++i) data[i] = static_cast(i); + EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))), + absl::ToCrc32c(0x46dd794e)); + + // 32 decrementing bytes. + for (int i = 0; i < 32; ++i) data[i] = static_cast(31 - i); + EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))), + absl::ToCrc32c(0x113fdb5c)); + + // An iSCSI - SCSI Read (10) Command PDU. + constexpr uint8_t cmd[48] = { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + EXPECT_EQ(absl::ComputeCrc32c(absl::string_view( + reinterpret_cast(cmd), sizeof(cmd))), + absl::ToCrc32c(0xd9963a56)); +} + +std::string TestString(size_t len) { + std::string result; + result.reserve(len); + for (size_t i = 0; i < len; ++i) { + result.push_back(static_cast(i % 256)); + } + return result; +} + +TEST(CRC32C, Compute) { + EXPECT_EQ(absl::ComputeCrc32c(""), absl::ToCrc32c(0)); + EXPECT_EQ(absl::ComputeCrc32c("hello world"), absl::ToCrc32c(0xc99465aa)); +} + +TEST(CRC32C, Extend) { + uint32_t base = 0xC99465AA; // CRC32C of "Hello World" + std::string extension = "Extension String"; + + EXPECT_EQ( + absl::ExtendCrc32c(absl::ToCrc32c(base), extension), + absl::ToCrc32c(0xD2F65090)); // CRC32C of "Hello WorldExtension String" +} + +TEST(CRC32C, ExtendByZeroes) { + std::string base = "hello world"; + absl::crc32c_t base_crc = absl::ToCrc32c(0xc99465aa); + + for (const size_t extend_by : {100, 10000, 100000}) { + SCOPED_TRACE(extend_by); + absl::crc32c_t crc2 = absl::ExtendCrc32cByZeroes(base_crc, extend_by); + EXPECT_EQ(crc2, absl::ComputeCrc32c(base + std::string(extend_by, '\0'))); + } +} + +TEST(CRC32C, UnextendByZeroes) { + for (auto seed_crc : {absl::ToCrc32c(0), absl::ToCrc32c(0xc99465aa)}) { + SCOPED_TRACE(seed_crc); + for (const size_t size_1 : {2, 200, 20000, 200000, 20000000}) { + for (const size_t size_2 : {0, 100, 10000, 100000, 10000000}) { + size_t extend_size = std::max(size_1, size_2); + size_t unextend_size = std::min(size_1, size_2); + SCOPED_TRACE(extend_size); + SCOPED_TRACE(unextend_size); + + // Extending by A zeroes an unextending by B +#include + +#include "absl/base/config.h" + +#if defined(__aarch64__) && defined(__linux__) +#include +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +#if defined(__x86_64__) + +// Inline cpuid instruction. %rbx is occasionally used to address stack +// variables in presence of dynamic allocas. Preserve the %rbx register via +// %rdi to work around a clang bug https://bugs.llvm.org/show_bug.cgi?id=17907 +// (%rbx in an output constraint is not considered a clobbered register). +// +// a_inp and c_inp are the input parameters eax and ecx of the CPUID +// instruction. +// a, b, c, and d contain the contents of eax, ebx, ecx, and edx as returned by +// the CPUID instruction +#define ABSL_INTERNAL_GETCPUID(a, b, c, d, a_inp, c_inp) \ + asm("mov %%rbx, %%rdi\n" \ + "cpuid\n" \ + "xchg %%rdi, %%rbx\n" \ + : "=a"(a), "=D"(b), "=c"(c), "=d"(d) \ + : "a"(a_inp), "2"(c_inp)) + +namespace { + +enum class Vendor { + kUnknown, + kIntel, + kAmd, +}; + +Vendor GetVendor() { + uint32_t eax, ebx, ecx, edx; + + // Get vendor string (issue CPUID with eax = 0) + ABSL_INTERNAL_GETCPUID(eax, ebx, ecx, edx, 0, 0); + std::string vendor; + vendor.append(reinterpret_cast(&ebx), 4); + vendor.append(reinterpret_cast(&edx), 4); + vendor.append(reinterpret_cast(&ecx), 4); + if (vendor == "GenuineIntel") { + return Vendor::kIntel; + } else if (vendor == "AuthenticAmd") { + return Vendor::kAmd; + } else { + return Vendor::kUnknown; + } +} + +CpuType GetIntelCpuType() { + uint32_t eax, ebx, ecx, edx; + // to get general information and extended features we send eax = 1 and + // ecx = 0 to cpuid. The response is returned in eax, ebx, ecx and edx. + // (See Intel 64 and IA-32 Architectures Software Developer's Manual + // Volume 2A: Instruction Set Reference, A-M CPUID). + // https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-2a-manual.html + ABSL_INTERNAL_GETCPUID(eax, ebx, ecx, edx, 1, 0); + + // Response in eax bits as follows: + // 0-3 (stepping id) + // 4-7 (model number), + // 8-11 (family code), + // 12-13 (processor type), + // 16-19 (extended model) + // 20-27 (extended family) + + int family = (eax >> 8) & 0x0f; + int model_num = (eax >> 4) & 0x0f; + int ext_family = (eax >> 20) & 0xff; + int ext_model_num = (eax >> 16) & 0x0f; + + int brand_id = ebx & 0xff; + + // Process the extended family and model info if necessary + if (family == 0x0f) { + family += ext_family; + } + + if (family == 0x0f || family == 0x6) { + model_num += (ext_model_num << 4); + } + + switch (brand_id) { + case 0: // no brand ID, so parse CPU family/model + switch (family) { + case 6: // Most PentiumIII processors are in this category + switch (model_num) { + case 0x2c: // Westmere: Gulftown + return CpuType::kIntelWestmere; + case 0x2d: // Sandybridge + return CpuType::kIntelSandybridge; + case 0x3e: // Ivybridge + return CpuType::kIntelIvybridge; + case 0x3c: // Haswell (client) + case 0x3f: // Haswell + return CpuType::kIntelHaswell; + case 0x4f: // Broadwell + case 0x56: // BroadwellDE + return CpuType::kIntelBroadwell; + case 0x55: // Skylake Xeon + if ((eax & 0x0f) < 5) { // stepping < 5 is skylake + return CpuType::kIntelSkylakeXeon; + } else { // stepping >= 5 is cascadelake + return CpuType::kIntelCascadelakeXeon; + } + case 0x5e: // Skylake (client) + return CpuType::kIntelSkylake; + default: + return CpuType::kUnknown; + } + default: + return CpuType::kUnknown; + } + default: + return CpuType::kUnknown; + } +} + +CpuType GetAmdCpuType() { + uint32_t eax, ebx, ecx, edx; + // to get general information and extended features we send eax = 1 and + // ecx = 0 to cpuid. The response is returned in eax, ebx, ecx and edx. + // (See Intel 64 and IA-32 Architectures Software Developer's Manual + // Volume 2A: Instruction Set Reference, A-M CPUID). + ABSL_INTERNAL_GETCPUID(eax, ebx, ecx, edx, 1, 0); + + // Response in eax bits as follows: + // 0-3 (stepping id) + // 4-7 (model number), + // 8-11 (family code), + // 12-13 (processor type), + // 16-19 (extended model) + // 20-27 (extended family) + + int family = (eax >> 8) & 0x0f; + int model_num = (eax >> 4) & 0x0f; + int ext_family = (eax >> 20) & 0xff; + int ext_model_num = (eax >> 16) & 0x0f; + + if (family == 0x0f) { + family += ext_family; + model_num += (ext_model_num << 4); + } + + switch (family) { + case 0x17: + switch (model_num) { + case 0x0: // Stepping Ax + case 0x1: // Stepping Bx + return CpuType::kAmdNaples; + case 0x30: // Stepping Ax + case 0x31: // Stepping Bx + return CpuType::kAmdRome; + default: + return CpuType::kUnknown; + } + break; + case 0x19: + switch (model_num) { + case 0x1: // Stepping B0 + return CpuType::kAmdMilan; + default: + return CpuType::kUnknown; + } + break; + default: + return CpuType::kUnknown; + } +} + +} // namespace + +CpuType GetCpuType() { + switch (GetVendor()) { + case Vendor::kIntel: + return GetIntelCpuType(); + case Vendor::kAmd: + return GetAmdCpuType(); + default: + return CpuType::kUnknown; + } +} + +bool SupportsArmCRC32PMULL() { return false; } + +#elif defined(__aarch64__) && defined(__linux__) + +#define ABSL_INTERNAL_AARCH64_ID_REG_READ(id, val) \ + asm("mrs %0, " #id : "=r"(val)) + +CpuType GetCpuType() { + // MIDR_EL1 is not visible to EL0, however the access will be emulated by + // linux if AT_HWCAP has HWCAP_CPUID set. + // + // This method will be unreliable on heterogeneous computing systems (ex: + // big.LITTLE) since the value of MIDR_EL1 will change based on the calling + // thread. + uint64_t hwcaps = getauxval(AT_HWCAP); + if (hwcaps & HWCAP_CPUID) { + uint64_t midr = 0; + ABSL_INTERNAL_AARCH64_ID_REG_READ(MIDR_EL1, midr); + uint32_t implementer = (midr >> 24) & 0xff; + uint32_t part_number = (midr >> 4) & 0xfff; + if (implementer == 0x41 && part_number == 0xd0c) { + return CpuType::kArmNeoverseN1; + } + } + return CpuType::kUnknown; +} + +bool SupportsArmCRC32PMULL() { + uint64_t hwcaps = getauxval(AT_HWCAP); + return (hwcaps & HWCAP_CRC32) && (hwcaps & HWCAP_PMULL); +} + +#else + +CpuType GetCpuType() { return CpuType::kUnknown; } + +bool SupportsArmCRC32PMULL() { return false; } + +#endif + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/cpu_detect.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/cpu_detect.h new file mode 100644 index 0000000000..6054f6960d --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/cpu_detect.h @@ -0,0 +1,57 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CPU_DETECT_H_ +#define ABSL_CRC_INTERNAL_CPU_DETECT_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +// Enumeration of architectures that we have special-case tuning parameters for. +// This set may change over time. +enum class CpuType { + kUnknown, + kIntelHaswell, + kAmdRome, + kAmdNaples, + kAmdMilan, + kIntelCascadelakeXeon, + kIntelSkylakeXeon, + kIntelBroadwell, + kIntelSkylake, + kIntelIvybridge, + kIntelSandybridge, + kIntelWestmere, + kArmNeoverseN1, +}; + +// Returns the type of host CPU this code is running on. Returns kUnknown if +// the host CPU is of unknown type, or if detection otherwise fails. +CpuType GetCpuType(); + +// Returns whether the host CPU supports the CPU features needed for our +// accelerated implementations. The CpuTypes enumerated above apart from +// kUnknown support the required features. On unknown CPUs, we can use +// this to see if it's safe to use hardware acceleration, though without any +// tuning. +bool SupportsArmCRC32PMULL(); + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CPU_DETECT_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc.cc new file mode 100644 index 0000000000..bb8936e373 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc.cc @@ -0,0 +1,468 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implementation of CRCs (aka Rabin Fingerprints). +// Treats the input as a polynomial with coefficients in Z(2), +// and finds the remainder when divided by an irreducible polynomial +// of the appropriate length. +// It handles all CRC sizes from 8 to 128 bits. +// It's somewhat complicated by having separate implementations optimized for +// CRC's <=32 bits, <= 64 bits, and <= 128 bits. +// The input string is prefixed with a "1" bit, and has "degree" "0" bits +// appended to it before the remainder is found. This ensures that +// short strings are scrambled somewhat and that strings consisting +// of all nulls have a non-zero CRC. +// +// Uses the "interleaved word-by-word" method from +// "Everything we know about CRC but afraid to forget" by Andrew Kadatch +// and Bob Jenkins, +// http://crcutil.googlecode.com/files/crc-doc.1.0.pdf +// +// The idea is to compute kStride CRCs simultaneously, allowing the +// processor to more effectively use multiple execution units. Each of +// the CRCs is calculated on one word of data followed by kStride - 1 +// words of zeroes; the CRC starting points are staggered by one word. +// Assuming a stride of 4 with data words "ABCDABCDABCD", the first +// CRC is over A000A000A, the second over 0B000B000B, and so on. +// The CRC of the whole data is then calculated by properly aligning the +// CRCs by appending zeroes until the data lengths agree then XORing +// the CRCs. + +#include "absl/crc/internal/crc.h" + +#include + +#include "absl/base/internal/endian.h" +#include "absl/base/internal/prefetch.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/crc/internal/crc_internal.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +namespace { + +// Constants +#if defined(__i386__) || defined(__x86_64__) +constexpr bool kNeedAlignedLoads = false; +#else +constexpr bool kNeedAlignedLoads = true; +#endif + +// We express the number of zeroes as a number in base ZEROES_BASE. By +// pre-computing the zero extensions for all possible components of such an +// expression (numbers in a form a*ZEROES_BASE**b), we can calculate the +// resulting extension by multiplying the extensions for individual components +// using log_{ZEROES_BASE}(num_zeroes) polynomial multiplications. The tables of +// zero extensions contain (ZEROES_BASE - 1) * (log_{ZEROES_BASE}(64)) entries. +constexpr int ZEROES_BASE_LG = 4; // log_2(ZEROES_BASE) +constexpr int ZEROES_BASE = (1 << ZEROES_BASE_LG); // must be a power of 2 + +constexpr uint32_t kCrc32cPoly = 0x82f63b78; + +uint32_t ReverseBits(uint32_t bits) { + bits = (bits & 0xaaaaaaaau) >> 1 | (bits & 0x55555555u) << 1; + bits = (bits & 0xccccccccu) >> 2 | (bits & 0x33333333u) << 2; + bits = (bits & 0xf0f0f0f0u) >> 4 | (bits & 0x0f0f0f0fu) << 4; + return absl::gbswap_32(bits); +} + +// Polynomial long multiplication mod the polynomial of degree 32. +void PolyMultiply(uint32_t* val, uint32_t m, uint32_t poly) { + uint32_t l = *val; + uint32_t result = 0; + auto onebit = uint32_t{0x80000000u}; + for (uint32_t one = onebit; one != 0; one >>= 1) { + if ((l & one) != 0) { + result ^= m; + } + if (m & 1) { + m = (m >> 1) ^ poly; + } else { + m >>= 1; + } + } + *val = result; +} +} // namespace + +void CRCImpl::FillWordTable(uint32_t poly, uint32_t last, int word_size, + Uint32By256* t) { + for (int j = 0; j != word_size; j++) { // for each byte of extension.... + t[j][0] = 0; // a zero has no effect + for (int i = 128; i != 0; i >>= 1) { // fill in entries for powers of 2 + if (j == 0 && i == 128) { + t[j][i] = last; // top bit in last byte is given + } else { + // each successive power of two is derived from the previous + // one, either in this table, or the last table + uint32_t pred; + if (i == 128) { + pred = t[j - 1][1]; + } else { + pred = t[j][i << 1]; + } + // Advance the CRC by one bit (multiply by X, and take remainder + // through one step of polynomial long division) + if (pred & 1) { + t[j][i] = (pred >> 1) ^ poly; + } else { + t[j][i] = pred >> 1; + } + } + } + // CRCs have the property that CRC(a xor b) == CRC(a) xor CRC(b) + // so we can make all the tables for non-powers of two by + // xoring previously created entries. + for (int i = 2; i != 256; i <<= 1) { + for (int k = i + 1; k != (i << 1); k++) { + t[j][k] = t[j][i] ^ t[j][k - i]; + } + } + } +} + +int CRCImpl::FillZeroesTable(uint32_t poly, Uint32By256* t) { + uint32_t inc = 1; + inc <<= 31; + + // Extend by one zero bit. We know degree > 1 so (inc & 1) == 0. + inc >>= 1; + + // Now extend by 2, 4, and 8 bits, so now `inc` is extended by one zero byte. + for (int i = 0; i < 3; ++i) { + PolyMultiply(&inc, inc, poly); + } + + int j = 0; + for (uint64_t inc_len = 1; inc_len != 0; inc_len <<= ZEROES_BASE_LG) { + // Every entry in the table adds an additional inc_len zeroes. + uint32_t v = inc; + for (int a = 1; a != ZEROES_BASE; a++) { + t[0][j] = v; + PolyMultiply(&v, inc, poly); + j++; + } + inc = v; + } + ABSL_RAW_CHECK(j <= 256, ""); + return j; +} + +// Internal version of the "constructor". +CRCImpl* CRCImpl::NewInternal() { + // Find an accelearated implementation first. + CRCImpl* result = TryNewCRC32AcceleratedX86ARMCombined(); + + // Fall back to generic implementions if no acceleration is available. + if (result == nullptr) { + result = new CRC32(); + } + + result->InitTables(); + + return result; +} + +// The CRC of the empty string is always the CRC polynomial itself. +void CRCImpl::Empty(uint32_t* crc) const { *crc = kCrc32cPoly; } + +// The 32-bit implementation + +void CRC32::InitTables() { + // Compute the table for extending a CRC by one byte. + Uint32By256* t = new Uint32By256[4]; + FillWordTable(kCrc32cPoly, kCrc32cPoly, 1, t); + for (int i = 0; i != 256; i++) { + this->table0_[i] = t[0][i]; + } + + // Construct a table for updating the CRC by 4 bytes data followed by + // 12 bytes of zeroes. + // + // Note: the data word size could be larger than the CRC size; it might + // be slightly faster to use a 64-bit data word, but doing so doubles the + // table size. + uint32_t last = kCrc32cPoly; + const size_t size = 12; + for (size_t i = 0; i < size; ++i) { + last = (last >> 8) ^ this->table0_[last & 0xff]; + } + FillWordTable(kCrc32cPoly, last, 4, t); + for (size_t b = 0; b < 4; ++b) { + for (int i = 0; i < 256; ++i) { + this->table_[b][i] = t[b][i]; + } + } + + int j = FillZeroesTable(kCrc32cPoly, t); + ABSL_RAW_CHECK(j <= static_cast(ABSL_ARRAYSIZE(this->zeroes_)), ""); + for (int i = 0; i < j; i++) { + this->zeroes_[i] = t[0][i]; + } + + delete[] t; + + // Build up tables for _reversing_ the operation of doing CRC operations on + // zero bytes. + + // In C++, extending `crc` by a single zero bit is done by the following: + // (A) bool low_bit_set = (crc & 1); + // crc >>= 1; + // if (low_bit_set) crc ^= kCrc32cPoly; + // + // In particular note that the high bit of `crc` after this operation will be + // set if and only if the low bit of `crc` was set before it. This means that + // no information is lost, and the operation can be reversed, as follows: + // (B) bool high_bit_set = (crc & 0x80000000u); + // if (high_bit_set) crc ^= kCrc32cPoly; + // crc <<= 1; + // if (high_bit_set) crc ^= 1; + // + // Or, equivalently: + // (C) bool high_bit_set = (crc & 0x80000000u); + // crc <<= 1; + // if (high_bit_set) crc ^= ((kCrc32cPoly << 1) ^ 1); + // + // The last observation is, if we store our checksums in variable `rcrc`, + // with order of the bits reversed, the inverse operation becomes: + // (D) bool low_bit_set = (rcrc & 1); + // rcrc >>= 1; + // if (low_bit_set) rcrc ^= ReverseBits((kCrc32cPoly << 1) ^ 1) + // + // This is the same algorithm (A) that we started with, only with a different + // polynomial bit pattern. This means that by building up our tables with + // this alternate polynomial, we can apply the CRC algorithms to a + // bit-reversed CRC checksum to perform inverse zero-extension. + + const uint32_t kCrc32cUnextendPoly = + ReverseBits(static_cast((kCrc32cPoly << 1) ^ 1)); + FillWordTable(kCrc32cUnextendPoly, kCrc32cUnextendPoly, 1, &reverse_table0_); + + j = FillZeroesTable(kCrc32cUnextendPoly, &reverse_zeroes_); + ABSL_RAW_CHECK(j <= static_cast(ABSL_ARRAYSIZE(this->reverse_zeroes_)), + ""); +} + +void CRC32::Extend(uint32_t* crc, const void* bytes, size_t length) const { + const uint8_t* p = static_cast(bytes); + const uint8_t* e = p + length; + uint32_t l = *crc; + + auto step_one_byte = [this, &p, &l] () { + int c = (l & 0xff) ^ *p++; + l = this->table0_[c] ^ (l >> 8); + }; + + if (kNeedAlignedLoads) { + // point x at first 4-byte aligned byte in string. this might be past the + // end of the string. + const uint8_t* x = RoundUp<4>(p); + if (x <= e) { + // Process bytes until finished or p is 4-byte aligned + while (p != x) { + step_one_byte(); + } + } + } + + const size_t kSwathSize = 16; + if (static_cast(e - p) >= kSwathSize) { + // Load one swath of data into the operating buffers. + uint32_t buf0 = absl::little_endian::Load32(p) ^ l; + uint32_t buf1 = absl::little_endian::Load32(p + 4); + uint32_t buf2 = absl::little_endian::Load32(p + 8); + uint32_t buf3 = absl::little_endian::Load32(p + 12); + p += kSwathSize; + + // Increment a CRC value by a "swath"; this combines the four bytes + // starting at `ptr` and twelve zero bytes, so that four CRCs can be + // built incrementally and combined at the end. + const auto step_swath = [this](uint32_t crc_in, const std::uint8_t* ptr) { + return absl::little_endian::Load32(ptr) ^ + this->table_[3][crc_in & 0xff] ^ + this->table_[2][(crc_in >> 8) & 0xff] ^ + this->table_[1][(crc_in >> 16) & 0xff] ^ + this->table_[0][crc_in >> 24]; + }; + + // Run one CRC calculation step over all swaths in one 16-byte stride + const auto step_stride = [&]() { + buf0 = step_swath(buf0, p); + buf1 = step_swath(buf1, p + 4); + buf2 = step_swath(buf2, p + 8); + buf3 = step_swath(buf3, p + 12); + p += 16; + }; + + // Process kStride interleaved swaths through the data in parallel. + while ((e - p) > kPrefetchHorizon) { + base_internal::PrefetchNta( + reinterpret_cast(p + kPrefetchHorizon)); + // Process 64 bytes at a time + step_stride(); + step_stride(); + step_stride(); + step_stride(); + } + while (static_cast(e - p) >= kSwathSize) { + step_stride(); + } + + // Now advance one word at a time as far as possible. This isn't worth + // doing if we have word-advance tables. + while (static_cast(e - p) >= 4) { + buf0 = step_swath(buf0, p); + uint32_t tmp = buf0; + buf0 = buf1; + buf1 = buf2; + buf2 = buf3; + buf3 = tmp; + p += 4; + } + + // Combine the results from the different swaths. This is just a CRC + // on the data values in the bufX words. + auto combine_one_word = [this](uint32_t crc_in, uint32_t w) { + w ^= crc_in; + for (size_t i = 0; i < 4; ++i) { + w = (w >> 8) ^ this->table0_[w & 0xff]; + } + return w; + }; + + l = combine_one_word(0, buf0); + l = combine_one_word(l, buf1); + l = combine_one_word(l, buf2); + l = combine_one_word(l, buf3); + } + + // Process the last few bytes + while (p != e) { + step_one_byte(); + } + + *crc = l; +} + +void CRC32::ExtendByZeroesImpl(uint32_t* crc, size_t length, + const uint32_t zeroes_table[256], + const uint32_t poly_table[256]) const { + if (length != 0) { + uint32_t l = *crc; + // For each ZEROES_BASE_LG bits in length + // (after the low-order bits have been removed) + // we lookup the appropriate polynomial in the zeroes_ array + // and do a polynomial long multiplication (mod the CRC polynomial) + // to extend the CRC by the appropriate number of bits. + for (int i = 0; length != 0; + i += ZEROES_BASE - 1, length >>= ZEROES_BASE_LG) { + int c = length & (ZEROES_BASE - 1); // pick next ZEROES_BASE_LG bits + if (c != 0) { // if they are not zero, + // multiply by entry in table + // Build a table to aid in multiplying 2 bits at a time. + // It takes too long to build tables for more bits. + uint64_t m = zeroes_table[c + i - 1]; + m <<= 1; + uint64_t m2 = m << 1; + uint64_t mtab[4] = {0, m, m2, m2 ^ m}; + + // Do the multiply one byte at a time. + uint64_t result = 0; + for (int x = 0; x < 32; x += 8) { + // The carry-less multiply. + result ^= mtab[l & 3] ^ (mtab[(l >> 2) & 3] << 2) ^ + (mtab[(l >> 4) & 3] << 4) ^ (mtab[(l >> 6) & 3] << 6); + l >>= 8; + + // Reduce modulo the polynomial + result = (result >> 8) ^ poly_table[result & 0xff]; + } + l = static_cast(result); + } + } + *crc = l; + } +} + +void CRC32::ExtendByZeroes(uint32_t* crc, size_t length) const { + return CRC32::ExtendByZeroesImpl(crc, length, zeroes_, table0_); +} + +void CRC32::UnextendByZeroes(uint32_t* crc, size_t length) const { + // See the comment in CRC32::InitTables() for an explanation of the algorithm + // below. + *crc = ReverseBits(*crc); + ExtendByZeroesImpl(crc, length, reverse_zeroes_, reverse_table0_); + *crc = ReverseBits(*crc); +} + +void CRC32::Scramble(uint32_t* crc) const { + // Rotate by near half the word size plus 1. See the scramble comment in + // crc_internal.h for an explanation. + constexpr int scramble_rotate = (32 / 2) + 1; + *crc = RotateRight(static_cast(*crc + kScrambleLo), + 32, scramble_rotate) & + MaskOfLength(32); +} + +void CRC32::Unscramble(uint32_t* crc) const { + constexpr int scramble_rotate = (32 / 2) + 1; + uint64_t rotated = RotateRight(static_cast(*crc), 32, + 32 - scramble_rotate); + *crc = (rotated - kScrambleLo) & MaskOfLength(32); +} + +// Constructor and destructor for base class CRC. +CRC::~CRC() {} +CRC::CRC() {} + +// The "constructor" for a CRC32C with a standard polynomial. +CRC* CRC::Crc32c() { + static CRC* singleton = CRCImpl::NewInternal(); + return singleton; +} + +// This Concat implementation works for arbitrary polynomials. +void CRC::Concat(uint32_t* px, uint32_t y, size_t ylen) { + // https://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks + // The CRC of a message M is the remainder of polynomial divison modulo G, + // where the coefficient arithmetic is performed modulo 2 (so +/- are XOR): + // R(x) = M(x) x**n (mod G) + // (n is the degree of G) + // In practice, we use an initial value A and a bitmask B to get + // R = (A ^ B)x**|M| ^ Mx**n ^ B (mod G) + // If M is the concatenation of two strings S and T, and Z is the string of + // len(T) 0s, then the remainder CRC(ST) can be expressed as: + // R = (A ^ B)x**|ST| ^ STx**n ^ B + // = (A ^ B)x**|SZ| ^ SZx**n ^ B ^ Tx**n + // = CRC(SZ) ^ Tx**n + // CRC(Z) = (A ^ B)x**|T| ^ B + // CRC(T) = (A ^ B)x**|T| ^ Tx**n ^ B + // So R = CRC(SZ) ^ CRC(Z) ^ CRC(T) + // + // And further, since CRC(SZ) = Extend(CRC(S), Z), + // CRC(SZ) ^ CRC(Z) = Extend(CRC(S) ^ CRC(''), Z). + uint32_t z; + uint32_t t; + Empty(&z); + t = *px ^ z; + ExtendByZeroes(&t, ylen); + *px = t ^ y; +} + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc.h new file mode 100644 index 0000000000..72515b061d --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc.h @@ -0,0 +1,91 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC_H_ +#define ABSL_CRC_INTERNAL_CRC_H_ + +#include + +#include "absl/base/config.h" + +// This class implements CRCs (aka Rabin Fingerprints). +// Treats the input as a polynomial with coefficients in Z(2), +// and finds the remainder when divided by an primitive polynomial +// of the appropriate length. + +// A polynomial is represented by the bit pattern formed by its coefficients, +// but with the highest order bit not stored. +// The highest degree coefficient is stored in the lowest numbered bit +// in the lowest addressed byte. Thus, in what follows, the highest degree +// coefficient that is stored is in the low order bit of "lo" or "*lo". + +// Hardware acceleration is used when available. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +class CRC { + public: + virtual ~CRC(); + + // Place the CRC of the empty string in "*crc" + virtual void Empty(uint32_t* crc) const = 0; + + // If "*crc" is the CRC of bytestring A, place the CRC of + // the bytestring formed from the concatenation of A and the "length" + // bytes at "bytes" into "*crc". + virtual void Extend(uint32_t* crc, const void* bytes, + size_t length) const = 0; + + // Equivalent to Extend(crc, bytes, length) where "bytes" + // points to an array of "length" zero bytes. + virtual void ExtendByZeroes(uint32_t* crc, size_t length) const = 0; + + // Inverse opration of ExtendByZeroes. If `crc` is the CRC value of a string + // ending in `length` zero bytes, this returns a CRC value of that string + // with those zero bytes removed. + virtual void UnextendByZeroes(uint32_t* crc, size_t length) const = 0; + + // If *px is the CRC (as defined by *crc) of some string X, + // and y is the CRC of some string Y that is ylen bytes long, set + // *px to the CRC of the concatenation of X followed by Y. + virtual void Concat(uint32_t* px, uint32_t y, size_t ylen); + + // Apply a non-linear transformation to "*crc" so that + // it is safe to CRC the result with the same polynomial without + // any reduction of error-detection ability in the outer CRC. + // Unscramble() performs the inverse transformation. + // It is strongly recommended that CRCs be scrambled before storage or + // transmission, and unscrambled at the other end before futher manipulation. + virtual void Scramble(uint32_t* crc) const = 0; + virtual void Unscramble(uint32_t* crc) const = 0; + + // Crc32c() returns the singleton implementation of CRC for the CRC32C + // polynomial. Returns a handle that MUST NOT be destroyed with delete. + static CRC* Crc32c(); + + protected: + CRC(); // Clients may not call constructor; use Crc32c() instead. + + private: + CRC(const CRC&) = delete; + CRC& operator=(const CRC&) = delete; +}; + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h new file mode 100644 index 0000000000..59d71fd466 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h @@ -0,0 +1,260 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_ +#define ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_ + +#include + +#include "absl/base/config.h" + +// ------------------------------------------------------------------------- +// Many x86 and ARM machines have CRC acceleration hardware. +// We can do a faster version of Extend() on such machines. +// We define a translation layer for both x86 and ARM for the ease of use and +// most performance gains. + +// We need CRC (part of sse4.2) and PCLMULQDQ instructions. +#if defined(__SSE4_2__) && defined(__PCLMUL__) + +#include +#define ABSL_CRC_INTERNAL_HAVE_X86_SIMD + +#elif defined(__aarch64__) && defined(__LITTLE_ENDIAN__) && \ + defined(__ARM_FEATURE_CRC32) && defined(__ARM_NEON) + +#include +#include +#define ABSL_CRC_INTERNAL_HAVE_ARM_SIMD + +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || \ + defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD) + +#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) +using V128 = uint64x2_t; +#else +using V128 = __m128i; +#endif + +// Starting with the initial value in |crc|, accumulates a CRC32 value for +// unsigned integers of different sizes. +uint32_t CRC32_u8(uint32_t crc, uint8_t v); + +uint32_t CRC32_u16(uint32_t crc, uint16_t v); + +uint32_t CRC32_u32(uint32_t crc, uint32_t v); + +uint32_t CRC32_u64(uint32_t crc, uint64_t v); + +// Loads 128 bits of integer data. |src| must be 16-byte aligned. +V128 V128_Load(const V128* src); + +// Load 128 bits of integer data. |src| does not need to be aligned. +V128 V128_LoadU(const V128* src); + +// Polynomially multiplies the high 64 bits of |l| and |r|. +V128 V128_PMulHi(const V128 l, const V128 r); + +// Polynomially multiplies the low 64 bits of |l| and |r|. +V128 V128_PMulLow(const V128 l, const V128 r); + +// Polynomially multiplies the low 64 bits of |r| and high 64 bits of |l|. +V128 V128_PMul01(const V128 l, const V128 r); + +// Polynomially multiplies the low 64 bits of |l| and high 64 bits of |r|. +V128 V128_PMul10(const V128 l, const V128 r); + +// Produces a XOR operation of |l| and |r|. +V128 V128_Xor(const V128 l, const V128 r); + +// Produces an AND operation of |l| and |r|. +V128 V128_And(const V128 l, const V128 r); + +// Sets two 64 bit integers to one 128 bit vector. The order is reverse. +// dst[63:0] := |r| +// dst[127:64] := |l| +V128 V128_From2x64(const uint64_t l, const uint64_t r); + +// Shift |l| right by |imm| bytes while shifting in zeros. +template +V128 V128_ShiftRight(const V128 l); + +// Extracts a 32-bit integer from |l|, selected with |imm|. +template +int V128_Extract32(const V128 l); + +// Extracts the low 64 bits from V128. +int64_t V128_Low64(const V128 l); + +// Left-shifts packed 64-bit integers in l by r. +V128 V128_ShiftLeft64(const V128 l, const V128 r); + +#endif + +#if defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD) + +inline uint32_t CRC32_u8(uint32_t crc, uint8_t v) { + return _mm_crc32_u8(crc, v); +} + +inline uint32_t CRC32_u16(uint32_t crc, uint16_t v) { + return _mm_crc32_u16(crc, v); +} + +inline uint32_t CRC32_u32(uint32_t crc, uint32_t v) { + return _mm_crc32_u32(crc, v); +} + +inline uint32_t CRC32_u64(uint32_t crc, uint64_t v) { + return _mm_crc32_u64(crc, v); +} + +inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); } + +inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); } + +inline V128 V128_PMulHi(const V128 l, const V128 r) { + return _mm_clmulepi64_si128(l, r, 0x11); +} + +inline V128 V128_PMulLow(const V128 l, const V128 r) { + return _mm_clmulepi64_si128(l, r, 0x00); +} + +inline V128 V128_PMul01(const V128 l, const V128 r) { + return _mm_clmulepi64_si128(l, r, 0x01); +} + +inline V128 V128_PMul10(const V128 l, const V128 r) { + return _mm_clmulepi64_si128(l, r, 0x10); +} + +inline V128 V128_Xor(const V128 l, const V128 r) { return _mm_xor_si128(l, r); } + +inline V128 V128_And(const V128 l, const V128 r) { return _mm_and_si128(l, r); } + +inline V128 V128_From2x64(const uint64_t l, const uint64_t r) { + return _mm_set_epi64x(l, r); +} + +template +inline V128 V128_ShiftRight(const V128 l) { + return _mm_srli_si128(l, imm); +} + +template +inline int V128_Extract32(const V128 l) { + return _mm_extract_epi32(l, imm); +} + +inline int64_t V128_Low64(const V128 l) { return _mm_cvtsi128_si64(l); } + +inline V128 V128_ShiftLeft64(const V128 l, const V128 r) { + return _mm_sll_epi64(l, r); +} + +#elif defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) + +inline uint32_t CRC32_u8(uint32_t crc, uint8_t v) { return __crc32cb(crc, v); } + +inline uint32_t CRC32_u16(uint32_t crc, uint16_t v) { + return __crc32ch(crc, v); +} + +inline uint32_t CRC32_u32(uint32_t crc, uint32_t v) { + return __crc32cw(crc, v); +} + +inline uint32_t CRC32_u64(uint32_t crc, uint64_t v) { + return __crc32cd(crc, v); +} + +inline V128 V128_Load(const V128* src) { + return vld1q_u64(reinterpret_cast(src)); +} + +inline V128 V128_LoadU(const V128* src) { + return vld1q_u64(reinterpret_cast(src)); +} + +// Using inline assembly as clang does not generate the pmull2 instruction and +// performance drops by 15-20%. +// TODO(b/193678732): Investigate why the compiler decides not to generate +// such instructions and why it becomes so much worse. +inline V128 V128_PMulHi(const V128 l, const V128 r) { + uint64x2_t res; + __asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" + : "=w"(res) + : "w"(l), "w"(r)); + return res; +} + +inline V128 V128_PMulLow(const V128 l, const V128 r) { + return reinterpret_cast(vmull_p64( + reinterpret_cast(vget_low_p64(vreinterpretq_p64_u64(l))), + reinterpret_cast(vget_low_p64(vreinterpretq_p64_u64(r))))); +} + +inline V128 V128_PMul01(const V128 l, const V128 r) { + return reinterpret_cast(vmull_p64( + reinterpret_cast(vget_high_p64(vreinterpretq_p64_u64(l))), + reinterpret_cast(vget_low_p64(vreinterpretq_p64_u64(r))))); +} + +inline V128 V128_PMul10(const V128 l, const V128 r) { + return reinterpret_cast(vmull_p64( + reinterpret_cast(vget_low_p64(vreinterpretq_p64_u64(l))), + reinterpret_cast(vget_high_p64(vreinterpretq_p64_u64(r))))); +} + +inline V128 V128_Xor(const V128 l, const V128 r) { return veorq_u64(l, r); } + +inline V128 V128_And(const V128 l, const V128 r) { return vandq_u64(l, r); } + +inline V128 V128_From2x64(const uint64_t l, const uint64_t r) { + return vcombine_u64(vcreate_u64(r), vcreate_u64(l)); +} + +template +inline V128 V128_ShiftRight(const V128 l) { + return vreinterpretq_u64_s8( + vextq_s8(vreinterpretq_s8_u64(l), vdupq_n_s8(0), imm)); +} + +template +inline int V128_Extract32(const V128 l) { + return vgetq_lane_s32(vreinterpretq_s32_u64(l), imm); +} + +inline int64_t V128_Low64(const V128 l) { + return vgetq_lane_s64(vreinterpretq_s64_u64(l), 0); +} + +inline V128 V128_ShiftLeft64(const V128 l, const V128 r) { + return vshlq_u64(l, r); +} + +#endif + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32c.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32c.h new file mode 100644 index 0000000000..34027c55e4 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32c.h @@ -0,0 +1,39 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC32C_H_ +#define ABSL_CRC_INTERNAL_CRC32C_H_ + +#include "absl/base/config.h" +#include "absl/crc/crc32c.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +// Modifies a CRC32 value by removing `length` bytes with a value of 0 from +// the end of the string. +// +// This is the inverse operation of ExtendCrc32cByZeroes(). +// +// This operation has a runtime cost of O(log(`length`)) +// +// Internal implementation detail, exposed for testing only. +crc32c_t UnextendCrc32cByZeroes(crc32c_t initial_crc, size_t length); + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC32C_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32c_inline.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32c_inline.h new file mode 100644 index 0000000000..43ad14f4dc --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc32c_inline.h @@ -0,0 +1,72 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC32C_INLINE_H_ +#define ABSL_CRC_INTERNAL_CRC32C_INLINE_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/crc/internal/crc32_x86_arm_combined_simd.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +// CRC32C implementation optimized for small inputs. +// Either computes crc and return true, or if there is +// no hardware support does nothing and returns false. +inline bool ExtendCrc32cInline(uint32_t* crc, const char* p, size_t n) { +#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || \ + defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD) + constexpr uint32_t kCrc32Xor = 0xffffffffU; + *crc ^= kCrc32Xor; + if (n & 1) { + *crc = CRC32_u8(*crc, *p); + n--; + p++; + } + if (n & 2) { + *crc = CRC32_u16(*crc, absl::little_endian::Load16(p)); + n -= 2; + p += 2; + } + if (n & 4) { + *crc = CRC32_u32(*crc, absl::little_endian::Load32(p)); + n -= 4; + p += 4; + } + while (n) { + *crc = CRC32_u64(*crc, absl::little_endian::Load64(p)); + n -= 8; + p += 8; + } + *crc ^= kCrc32Xor; + return true; +#else + // No hardware support, signal the need to fallback. + static_cast(crc); + static_cast(p); + static_cast(n); + return false; +#endif // defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || + // defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD) +} + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC32C_INLINE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_internal.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_internal.h new file mode 100644 index 0000000000..7a503433a5 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_internal.h @@ -0,0 +1,177 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC_INTERNAL_H_ +#define ABSL_CRC_INTERNAL_CRC_INTERNAL_H_ + +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/crc/internal/crc.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace crc_internal { + +// Prefetch constants used in some Extend() implementations +constexpr int kPrefetchHorizon = ABSL_CACHELINE_SIZE * 4; // Prefetch this far +static_assert(kPrefetchHorizon >= 64, "CRCPrefetchHorizon less than loop len"); + +// We require the Scramble() function: +// - to be reversible (Unscramble() must exist) +// - to be non-linear in the polynomial's Galois field (so the CRC of a +// scrambled CRC is not linearly affected by the scrambled CRC, even if +// using the same polynomial) +// - not to be its own inverse. Preferably, if X=Scramble^N(X) and N!=0, then +// N is large. +// - to be fast. +// - not to change once defined. +// We introduce non-linearity in two ways: +// Addition of a constant. +// - The carries introduce non-linearity; we use bits of an irrational +// (phi) to make it unlikely that we introduce no carries. +// Rotate by a constant number of bits. +// - We use floor(degree/2)+1, which does not divide the degree, and +// splits the bits nearly evenly, which makes it less likely the +// halves will be the same or one will be all zeroes. +// We do both things to improve the chances of non-linearity in the face of +// bit patterns with low numbers of bits set, while still being fast. +// Below is the constant that we add. The bits are the first 128 bits of the +// fractional part of phi, with a 1 ored into the bottom bit to maximize the +// cycle length of repeated adds. +constexpr uint64_t kScrambleHi = (static_cast(0x4f1bbcdcU) << 32) | + static_cast(0xbfa53e0aU); +constexpr uint64_t kScrambleLo = (static_cast(0xf9ce6030U) << 32) | + static_cast(0x2e76e41bU); + +class CRCImpl : public CRC { // Implemention of the abstract class CRC + public: + using Uint32By256 = uint32_t[256]; + + CRCImpl() {} + ~CRCImpl() override = default; + + // The internal version of CRC::New(). + static CRCImpl* NewInternal(); + + void Empty(uint32_t* crc) const override; + + // Fill in a table for updating a CRC by one word of 'word_size' bytes + // [last_lo, last_hi] contains the answer if the last bit in the word + // is set. + static void FillWordTable(uint32_t poly, uint32_t last, int word_size, + Uint32By256* t); + + // Build the table for extending by zeroes, returning the number of entries. + // For a in {1, 2, ..., ZEROES_BASE-1}, b in {0, 1, 2, 3, ...}, + // entry j=a-1+(ZEROES_BASE-1)*b + // contains a polynomial Pi such that multiplying + // a CRC by Pi mod P, where P is the CRC polynomial, is equivalent to + // appending a*2**(ZEROES_BASE_LG*b) zero bytes to the original string. + static int FillZeroesTable(uint32_t poly, Uint32By256* t); + + virtual void InitTables() = 0; + + private: + CRCImpl(const CRCImpl&) = delete; + CRCImpl& operator=(const CRCImpl&) = delete; +}; + +// This is the 32-bit implementation. It handles all sizes from 8 to 32. +class CRC32 : public CRCImpl { + public: + CRC32() {} + ~CRC32() override {} + + void Extend(uint32_t* crc, const void* bytes, size_t length) const override; + void ExtendByZeroes(uint32_t* crc, size_t length) const override; + void Scramble(uint32_t* crc) const override; + void Unscramble(uint32_t* crc) const override; + void UnextendByZeroes(uint32_t* crc, size_t length) const override; + + void InitTables() override; + + private: + // Common implementation guts for ExtendByZeroes and UnextendByZeroes(). + // + // zeroes_table is a table as returned by FillZeroesTable(), containing + // polynomials representing CRCs of strings-of-zeros of various lenghts, + // and which can be combined by polynomial multiplication. poly_table is + // a table of CRC byte extension values. These tables are determined by + // the generator polynomial. + // + // These will be set to reverse_zeroes_ and reverse_table0_ for Unextend, and + // CRC32::zeroes_ and CRC32::table0_ for Extend. + void ExtendByZeroesImpl(uint32_t* crc, size_t length, + const uint32_t zeroes_table[256], + const uint32_t poly_table[256]) const; + + uint32_t table0_[256]; // table of byte extensions + uint32_t zeroes_[256]; // table of zero extensions + + // table of 4-byte extensions shifted by 12 bytes of zeroes + uint32_t table_[4][256]; + + // Reverse lookup tables, using the alternate polynomial used by + // UnextendByZeroes(). + uint32_t reverse_table0_[256]; // table of reverse byte extensions + uint32_t reverse_zeroes_[256]; // table of reverse zero extensions + + CRC32(const CRC32&) = delete; + CRC32& operator=(const CRC32&) = delete; +}; + +// Helpers + +// Return a bit mask containing len 1-bits. +// Requires 0 < len <= sizeof(T) +template +T MaskOfLength(int len) { + // shift 2 by len-1 rather than 1 by len because shifts of wordsize + // are undefined. + return (T(2) << (len - 1)) - 1; +} + +// Rotate low-order "width" bits of "in" right by "r" bits, +// setting other bits in word to arbitrary values. +template +T RotateRight(T in, int width, int r) { + return (in << (width - r)) | ((in >> r) & MaskOfLength(width - r)); +} + +// RoundUp(p) returns the lowest address >= p aligned to an N-byte +// boundary. Requires that N is a power of 2. +template +const uint8_t* RoundUp(const uint8_t* p) { + static_assert((alignment & (alignment - 1)) == 0, "alignment is not 2^n"); + constexpr uintptr_t mask = alignment - 1; + const uintptr_t as_uintptr = reinterpret_cast(p); + return reinterpret_cast((as_uintptr + mask) & ~mask); +} + +// Return a newly created CRC32AcceleratedX86ARMCombined if we can use Intel's +// or ARM's CRC acceleration for a given polynomial. Return nullptr otherwise. +CRCImpl* TryNewCRC32AcceleratedX86ARMCombined(); + +// Return all possible hardware accelerated implementations. For testing only. +std::vector> NewCRC32AcceleratedX86ARMCombinedAll(); + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC_INTERNAL_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy.h new file mode 100644 index 0000000000..8e728a6ea3 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy.h @@ -0,0 +1,112 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC_MEMCPY_H_ +#define ABSL_CRC_INTERNAL_CRC_MEMCPY_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/crc/crc32c.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +class CrcMemcpyEngine { + public: + virtual ~CrcMemcpyEngine() = default; + + virtual crc32c_t Compute(void* __restrict dst, const void* __restrict src, + std::size_t length, crc32c_t initial_crc) const = 0; + + protected: + CrcMemcpyEngine() = default; +}; + +class CrcMemcpy { + public: + static crc32c_t CrcAndCopy(void* __restrict dst, const void* __restrict src, + std::size_t length, + crc32c_t initial_crc = ToCrc32c(0), + bool non_temporal = false) { + static const ArchSpecificEngines engines = GetArchSpecificEngines(); + auto* engine = non_temporal ? engines.non_temporal : engines.temporal; + return engine->Compute(dst, src, length, initial_crc); + } + + // For testing only: get an architecture-specific engine for tests. + static std::unique_ptr GetTestEngine(int vector, + int integer); + + private: + struct ArchSpecificEngines { + CrcMemcpyEngine* temporal; + CrcMemcpyEngine* non_temporal; + }; + + static ArchSpecificEngines GetArchSpecificEngines(); +}; + +// Fallback CRC-memcpy engine. +class FallbackCrcMemcpyEngine : public CrcMemcpyEngine { + public: + FallbackCrcMemcpyEngine() = default; + FallbackCrcMemcpyEngine(const FallbackCrcMemcpyEngine&) = delete; + FallbackCrcMemcpyEngine operator=(const FallbackCrcMemcpyEngine&) = delete; + + crc32c_t Compute(void* __restrict dst, const void* __restrict src, + std::size_t length, crc32c_t initial_crc) const override; +}; + +// CRC Non-Temporal-Memcpy engine. +class CrcNonTemporalMemcpyEngine : public CrcMemcpyEngine { + public: + CrcNonTemporalMemcpyEngine() = default; + CrcNonTemporalMemcpyEngine(const CrcNonTemporalMemcpyEngine&) = delete; + CrcNonTemporalMemcpyEngine operator=(const CrcNonTemporalMemcpyEngine&) = + delete; + + crc32c_t Compute(void* __restrict dst, const void* __restrict src, + std::size_t length, crc32c_t initial_crc) const override; +}; + +// CRC Non-Temporal-Memcpy AVX engine. +class CrcNonTemporalMemcpyAVXEngine : public CrcMemcpyEngine { + public: + CrcNonTemporalMemcpyAVXEngine() = default; + CrcNonTemporalMemcpyAVXEngine(const CrcNonTemporalMemcpyAVXEngine&) = delete; + CrcNonTemporalMemcpyAVXEngine operator=( + const CrcNonTemporalMemcpyAVXEngine&) = delete; + + crc32c_t Compute(void* __restrict dst, const void* __restrict src, + std::size_t length, crc32c_t initial_crc) const override; +}; + +// Copy source to destination and return the CRC32C of the data copied. If an +// accelerated version is available, use the accelerated version, otherwise use +// the generic fallback version. +inline crc32c_t Crc32CAndCopy(void* __restrict dst, const void* __restrict src, + std::size_t length, + crc32c_t initial_crc = ToCrc32c(0), + bool non_temporal = false) { + return CrcMemcpy::CrcAndCopy(dst, src, length, initial_crc, non_temporal); +} + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC_MEMCPY_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc new file mode 100644 index 0000000000..4579c164d8 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc @@ -0,0 +1,75 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "absl/base/config.h" +#include "absl/crc/crc32c.h" +#include "absl/crc/internal/crc_memcpy.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +absl::crc32c_t FallbackCrcMemcpyEngine::Compute(void* __restrict dst, + const void* __restrict src, + std::size_t length, + crc32c_t initial_crc) const { + constexpr size_t kBlockSize = 8192; + absl::crc32c_t crc = initial_crc; + + const char* src_bytes = reinterpret_cast(src); + char* dst_bytes = reinterpret_cast(dst); + + // Copy + CRC loop - run 8k chunks until we are out of full chunks. CRC + // then copy was found to be slightly more efficient in our test cases. + std::size_t offset = 0; + for (; offset + kBlockSize < length; offset += kBlockSize) { + crc = absl::ExtendCrc32c(crc, + absl::string_view(src_bytes + offset, kBlockSize)); + memcpy(dst_bytes + offset, src_bytes + offset, kBlockSize); + } + + // Save some work if length is 0. + if (offset < length) { + std::size_t final_copy_size = length - offset; + crc = absl::ExtendCrc32c( + crc, absl::string_view(src_bytes + offset, final_copy_size)); + memcpy(dst_bytes + offset, src_bytes + offset, final_copy_size); + } + + return crc; +} + +// Compile the following only if we don't have +#ifndef __SSE4_2__ + +CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() { + CrcMemcpy::ArchSpecificEngines engines; + engines.temporal = new FallbackCrcMemcpyEngine(); + engines.non_temporal = new FallbackCrcMemcpyEngine(); + return engines; +} + +std::unique_ptr CrcMemcpy::GetTestEngine(int /*vector*/, + int /*integer*/) { + return std::make_unique(); +} + +#endif + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_test.cc new file mode 100644 index 0000000000..708e866612 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_test.cc @@ -0,0 +1,169 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/crc/internal/crc_memcpy.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/crc/crc32c.h" +#include "absl/memory/memory.h" +#include "absl/random/distributions.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" + +namespace { + +enum CrcEngine { + X86 = 0, + NONTEMPORAL = 1, + FALLBACK = 2, +}; + +// Correctness tests: +// - Every source/destination byte alignment 0-15, every size 0-511 bytes +// - Arbitrarily aligned source, large size +template +class CrcMemcpyTest : public testing::Test { + protected: + CrcMemcpyTest() { + source_ = std::make_unique(kSize); + destination_ = std::make_unique(kSize); + } + static constexpr size_t kAlignment = 16; + static constexpr size_t kMaxCopySize = max_size; + static constexpr size_t kSize = kAlignment + kMaxCopySize; + std::unique_ptr source_; + std::unique_ptr destination_; + + absl::BitGen gen_; +}; + +// Small test is slightly larger 4096 bytes to allow coverage of the "large" +// copy function. The minimum size to exercise all code paths in that function +// would be around 256 consecutive tests (getting every possible tail value +// and 0-2 small copy loops after the main block), so testing from 4096-4500 +// will cover all of those code paths multiple times. +typedef CrcMemcpyTest<4500> CrcSmallTest; +typedef CrcMemcpyTest<(1 << 24)> CrcLargeTest; +// Parametrize the small test so that it can be done with all configurations. +template +class x86ParamTestTemplate : public CrcSmallTest, + public ::testing::WithParamInterface { + protected: + x86ParamTestTemplate() { + if (GetParam().crc_engine_selector == FALLBACK) { + engine_ = std::make_unique(); + } else if (GetParam().crc_engine_selector == NONTEMPORAL) { + engine_ = + std::make_unique(); + } else { + engine_ = absl::crc_internal::CrcMemcpy::GetTestEngine( + GetParam().vector_lanes, GetParam().integer_lanes); + } + } + + // Convenience method. + ParamsT GetParam() const { + return ::testing::WithParamInterface::GetParam(); + } + + std::unique_ptr engine_; +}; +struct TestParams { + CrcEngine crc_engine_selector = X86; + int vector_lanes = 0; + int integer_lanes = 0; +}; +using x86ParamTest = x86ParamTestTemplate; +// SmallCorrectness is designed to exercise every possible set of code paths +// in the memcpy code, not including the loop. +TEST_P(x86ParamTest, SmallCorrectnessCheckSourceAlignment) { + constexpr size_t kTestSizes[] = {0, 100, 255, 512, 1024, 4000, kMaxCopySize}; + + for (size_t source_alignment = 0; source_alignment < kAlignment; + source_alignment++) { + for (auto size : kTestSizes) { + char* base_data = static_cast(source_.get()) + source_alignment; + for (size_t i = 0; i < size; i++) { + *(base_data + i) = + static_cast(absl::Uniform(gen_)); + } + absl::crc32c_t initial_crc = + absl::ToCrc32c(absl::Uniform(gen_)); + absl::crc32c_t experiment_crc = + engine_->Compute(destination_.get(), source_.get() + source_alignment, + size, initial_crc); + // Check the memory region to make sure it is the same + int mem_comparison = + memcmp(destination_.get(), source_.get() + source_alignment, size); + SCOPED_TRACE(absl::StrCat("Error in memcpy of size: ", size, + " with source alignment: ", source_alignment)); + ASSERT_EQ(mem_comparison, 0); + absl::crc32c_t baseline_crc = absl::ExtendCrc32c( + initial_crc, + absl::string_view( + static_cast(source_.get()) + source_alignment, size)); + ASSERT_EQ(baseline_crc, experiment_crc); + } + } +} + +TEST_P(x86ParamTest, SmallCorrectnessCheckDestAlignment) { + constexpr size_t kTestSizes[] = {0, 100, 255, 512, 1024, 4000, kMaxCopySize}; + + for (size_t dest_alignment = 0; dest_alignment < kAlignment; + dest_alignment++) { + for (auto size : kTestSizes) { + char* base_data = static_cast(source_.get()); + for (size_t i = 0; i < size; i++) { + *(base_data + i) = + static_cast(absl::Uniform(gen_)); + } + absl::crc32c_t initial_crc = + absl::ToCrc32c(absl::Uniform(gen_)); + absl::crc32c_t experiment_crc = + engine_->Compute(destination_.get() + dest_alignment, source_.get(), + size, initial_crc); + // Check the memory region to make sure it is the same + int mem_comparison = + memcmp(destination_.get() + dest_alignment, source_.get(), size); + SCOPED_TRACE(absl::StrCat("Error in memcpy of size: ", size, + " with dest alignment: ", dest_alignment)); + ASSERT_EQ(mem_comparison, 0); + absl::crc32c_t baseline_crc = absl::ExtendCrc32c( + initial_crc, + absl::string_view(static_cast(source_.get()), size)); + ASSERT_EQ(baseline_crc, experiment_crc); + } + } +} + +INSTANTIATE_TEST_SUITE_P(x86ParamTest, x86ParamTest, + ::testing::Values( + // Tests for configurations that may occur in prod. + TestParams{X86, 3, 0}, TestParams{X86, 1, 2}, + // Fallback test. + TestParams{FALLBACK, 0, 0}, + // Non Temporal + TestParams{NONTEMPORAL, 0, 0})); + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc new file mode 100644 index 0000000000..4680fbce46 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc @@ -0,0 +1,435 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Simultaneous memcopy and CRC-32C for x86-64. Uses integer registers because +// XMM registers do not support the CRC instruction (yet). While copying, +// compute the running CRC of the data being copied. +// +// It is assumed that any CPU running this code has SSE4.2 instructions +// available (for CRC32C). This file will do nothing if that is not true. +// +// The CRC instruction has a 3-byte latency, and we are stressing the ALU ports +// here (unlike a traditional memcopy, which has almost no ALU use), so we will +// need to copy in such a way that the CRC unit is used efficiently. We have two +// regimes in this code: +// 1. For operations of size < kCrcSmallSize, do the CRC then the memcpy +// 2. For operations of size > kCrcSmallSize: +// a) compute an initial CRC + copy on a small amount of data to align the +// destination pointer on a 16-byte boundary. +// b) Split the data into 3 main regions and a tail (smaller than 48 bytes) +// c) Do the copy and CRC of the 3 main regions, interleaving (start with +// full cache line copies for each region, then move to single 16 byte +// pieces per region). +// d) Combine the CRCs with CRC32C::Concat. +// e) Copy the tail and extend the CRC with the CRC of the tail. +// This method is not ideal for op sizes between ~1k and ~8k because CRC::Concat +// takes a significant amount of time. A medium-sized approach could be added +// using 3 CRCs over fixed-size blocks where the zero-extensions required for +// CRC32C::Concat can be precomputed. + +#include +#include + +#include "absl/crc/crc32c.h" +#include "absl/strings/string_view.h" + +#ifdef __SSE4_2__ + +#include +#include + +#include + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/prefetch.h" +#include "absl/base/optimization.h" +#include "absl/crc/internal/cpu_detect.h" +#include "absl/crc/internal/crc_memcpy.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +namespace { + +inline crc32c_t ShortCrcCopy(char* dst, const char* src, std::size_t length, + crc32c_t crc) { + // Small copy: just go 1 byte at a time: being nice to the branch predictor + // is more important here than anything else + uint32_t crc_uint32 = static_cast(crc); + for (std::size_t i = 0; i < length; i++) { + uint8_t data = *reinterpret_cast(src); + crc_uint32 = _mm_crc32_u8(crc_uint32, data); + *reinterpret_cast(dst) = data; + ++src; + ++dst; + } + return ToCrc32c(crc_uint32); +} + +constexpr int kIntLoadsPerVec = sizeof(__m128i) / sizeof(uint64_t); + +// Common function for copying the tails of multiple large regions. +template +inline void LargeTailCopy(crc32c_t* crcs, char** dst, const char** src, + size_t region_size, size_t copy_rounds) { + __m128i data[vec_regions]; + uint64_t int_data[kIntLoadsPerVec * int_regions]; + + while (copy_rounds > 0) { +#pragma unroll_completely + for (int i = 0; i < vec_regions; i++) { + int region = i; + + auto* vsrc = + reinterpret_cast(*src + region_size * region); + auto* vdst = reinterpret_cast<__m128i*>(*dst + region_size * region); + + // Load the blocks, unaligned + data[i] = _mm_loadu_si128(vsrc); + + // Store the blocks, aligned + _mm_store_si128(vdst, data[i]); + + // Compute the running CRC + crcs[region] = ToCrc32c(_mm_crc32_u64(static_cast(crcs[region]), + _mm_extract_epi64(data[i], 0))); + crcs[region] = ToCrc32c(_mm_crc32_u64(static_cast(crcs[region]), + _mm_extract_epi64(data[i], 1))); + } + +#pragma unroll_completely + for (int i = 0; i < int_regions; i++) { + int region = vec_regions + i; + + auto* usrc = + reinterpret_cast(*src + region_size * region); + auto* udst = reinterpret_cast(*dst + region_size * region); + +#pragma unroll_completely + for (int j = 0; j < kIntLoadsPerVec; j++) { + int data_index = i * kIntLoadsPerVec + j; + + int_data[data_index] = *(usrc + j); + crcs[region] = ToCrc32c(_mm_crc32_u64( + static_cast(crcs[region]), int_data[data_index])); + + *(udst + j) = int_data[data_index]; + } + } + + // Increment pointers + *src += sizeof(__m128i); + *dst += sizeof(__m128i); + --copy_rounds; + } +} + +} // namespace + +template +class AcceleratedCrcMemcpyEngine : public CrcMemcpyEngine { + public: + AcceleratedCrcMemcpyEngine() = default; + AcceleratedCrcMemcpyEngine(const AcceleratedCrcMemcpyEngine&) = delete; + AcceleratedCrcMemcpyEngine operator=(const AcceleratedCrcMemcpyEngine&) = + delete; + + crc32c_t Compute(void* __restrict dst, const void* __restrict src, + std::size_t length, crc32c_t initial_crc) const override; +}; + +template +crc32c_t AcceleratedCrcMemcpyEngine::Compute( + void* __restrict dst, const void* __restrict src, std::size_t length, + crc32c_t initial_crc) const { + constexpr std::size_t kRegions = vec_regions + int_regions; + constexpr crc32c_t kCrcDataXor = crc32c_t{0xffffffff}; + constexpr std::size_t kBlockSize = sizeof(__m128i); + constexpr std::size_t kCopyRoundSize = kRegions * kBlockSize; + + // Number of blocks per cacheline. + constexpr std::size_t kBlocksPerCacheLine = ABSL_CACHELINE_SIZE / kBlockSize; + + char* dst_bytes = static_cast(dst); + const char* src_bytes = static_cast(src); + + // Make sure that one prefetch per big block is enough to cover the whole + // dataset, and we don't prefetch too much. + static_assert(ABSL_CACHELINE_SIZE % kBlockSize == 0, + "Cache lines are not divided evenly into blocks, may have " + "unintended behavior!"); + + // Experimentally-determined boundary between a small and large copy. + // Below this number, spin-up and concatenation of CRCs takes enough time that + // it kills the throughput gains of using 3 regions and wide vectors. + constexpr size_t kCrcSmallSize = 256; + + // Experimentally-determined prefetch distance. Main loop copies will + // prefeth data 2 cache lines ahead. + constexpr std::size_t kPrefetchAhead = 2 * ABSL_CACHELINE_SIZE; + + // Small-size CRC-memcpy : just do CRC + memcpy + if (length < kCrcSmallSize) { + crc32c_t crc = + ExtendCrc32c(initial_crc, absl::string_view(src_bytes, length)); + memcpy(dst, src, length); + return crc; + } + + // Start work on the CRC: undo the XOR from the previous calculation or set up + // the initial value of the CRC. + // initial_crc ^= kCrcDataXor; + initial_crc = initial_crc ^ kCrcDataXor; + + // Do an initial alignment copy, so we can use aligned store instructions to + // the destination pointer. We align the destination pointer because the + // penalty for an unaligned load is small compared to the penalty of an + // unaligned store on modern CPUs. + std::size_t bytes_from_last_aligned = + reinterpret_cast(dst) & (kBlockSize - 1); + if (bytes_from_last_aligned != 0) { + std::size_t bytes_for_alignment = kBlockSize - bytes_from_last_aligned; + + // Do the short-sized copy and CRC. + initial_crc = + ShortCrcCopy(dst_bytes, src_bytes, bytes_for_alignment, initial_crc); + src_bytes += bytes_for_alignment; + dst_bytes += bytes_for_alignment; + length -= bytes_for_alignment; + } + + // We are going to do the copy and CRC in kRegions regions to make sure that + // we can saturate the CRC unit. The CRCs will be combined at the end of the + // run. Copying will use the SSE registers, and we will extract words from + // the SSE registers to add to the CRC. Initially, we run the loop one full + // cache line per region at a time, in order to insert prefetches. + + // Initialize CRCs for kRegions regions. + crc32c_t crcs[kRegions]; + crcs[0] = initial_crc; + for (int i = 1; i < kRegions; i++) { + crcs[i] = kCrcDataXor; + } + + // Find the number of rounds to copy and the region size. Also compute the + // tail size here. + int64_t copy_rounds = length / kCopyRoundSize; + + // Find the size of each region and the size of the tail. + const std::size_t region_size = copy_rounds * kBlockSize; + const std::size_t tail_size = length - (kRegions * region_size); + + // Holding registers for data in each region. + __m128i vec_data[vec_regions]; + uint64_t int_data[int_regions * kIntLoadsPerVec]; + + // Main loop. + while (copy_rounds > kBlocksPerCacheLine) { + // Prefetch kPrefetchAhead bytes ahead of each pointer. +#pragma unroll_completely + for (int i = 0; i < kRegions; i++) { + absl::base_internal::PrefetchT0(src_bytes + kPrefetchAhead + + region_size * i); + absl::base_internal::PrefetchT0(dst_bytes + kPrefetchAhead + + region_size * i); + } + + // Load and store data, computing CRC on the way. +#pragma unroll_completely + for (int i = 0; i < kBlocksPerCacheLine; i++) { + // Copy and CRC the data for the CRC regions. +#pragma unroll_completely + for (int j = 0; j < vec_regions; j++) { + // Cycle which regions get vector load/store and integer load/store, to + // engage prefetching logic around vector load/stores and save issue + // slots by using the integer registers. + int region = (j + i) % kRegions; + + auto* src = reinterpret_cast(src_bytes + + region_size * region); + auto* dst = + reinterpret_cast<__m128i*>(dst_bytes + region_size * region); + + // Load and CRC data. + vec_data[j] = _mm_loadu_si128(src + i); + crcs[region] = + ToCrc32c(_mm_crc32_u64(static_cast(crcs[region]), + _mm_extract_epi64(vec_data[j], 0))); + crcs[region] = + ToCrc32c(_mm_crc32_u64(static_cast(crcs[region]), + _mm_extract_epi64(vec_data[j], 1))); + + // Store the data. + _mm_store_si128(dst + i, vec_data[j]); + } + + // Preload the partial CRCs for the CLMUL subregions. +#pragma unroll_completely + for (int j = 0; j < int_regions; j++) { + // Cycle which regions get vector load/store and integer load/store, to + // engage prefetching logic around vector load/stores and save issue + // slots by using the integer registers. + int region = (j + vec_regions + i) % kRegions; + + auto* usrc = + reinterpret_cast(src_bytes + region_size * region); + auto* udst = + reinterpret_cast(dst_bytes + region_size * region); + +#pragma unroll_completely + for (int k = 0; k < kIntLoadsPerVec; k++) { + int data_index = j * kIntLoadsPerVec + k; + + // Load and CRC the data. + int_data[data_index] = *(usrc + i * kIntLoadsPerVec + k); + crcs[region] = ToCrc32c(_mm_crc32_u64( + static_cast(crcs[region]), int_data[data_index])); + + // Store the data. + *(udst + i * kIntLoadsPerVec + k) = int_data[data_index]; + } + } + } + + // Increment pointers + src_bytes += kBlockSize * kBlocksPerCacheLine; + dst_bytes += kBlockSize * kBlocksPerCacheLine; + copy_rounds -= kBlocksPerCacheLine; + } + + // Copy and CRC the tails of each region. + LargeTailCopy(crcs, &dst_bytes, &src_bytes, + region_size, copy_rounds); + + // Move the source and destination pointers to the end of the region + src_bytes += region_size * (kRegions - 1); + dst_bytes += region_size * (kRegions - 1); + + // Finalize the first CRCs: XOR the internal CRCs by the XOR mask to undo the + // XOR done before doing block copy + CRCs. + for (int i = 0; i < kRegions - 1; i++) { + crcs[i] = crcs[i] ^ kCrcDataXor; + } + + // Build a CRC of the first kRegions - 1 regions. + crc32c_t full_crc = crcs[0]; + for (int i = 1; i < kRegions - 1; i++) { + full_crc = ConcatCrc32c(full_crc, crcs[i], region_size); + } + + // Copy and CRC the tail through the XMM registers. + std::size_t tail_blocks = tail_size / kBlockSize; + LargeTailCopy<0, 1>(&crcs[kRegions - 1], &dst_bytes, &src_bytes, 0, + tail_blocks); + + // Final tail copy for under 16 bytes. + crcs[kRegions - 1] = + ShortCrcCopy(dst_bytes, src_bytes, tail_size - tail_blocks * kBlockSize, + crcs[kRegions - 1]); + + // Finalize and concatenate the final CRC, then return. + crcs[kRegions - 1] = crcs[kRegions - 1] ^ kCrcDataXor; + return ConcatCrc32c(full_crc, crcs[kRegions - 1], region_size + tail_size); +} + +CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() { +#ifdef UNDEFINED_BEHAVIOR_SANITIZER + // UBSAN does not play nicely with unaligned loads (which we use a lot). + // Get the underlying architecture. + CpuType cpu_type = GetCpuType(); + switch (cpu_type) { + case CpuType::kUnknown: + case CpuType::kAmdRome: + case CpuType::kAmdNaples: + case CpuType::kIntelCascadelakeXeon: + case CpuType::kIntelSkylakeXeon: + case CpuType::kIntelSkylake: + case CpuType::kIntelBroadwell: + case CpuType::kIntelHaswell: + case CpuType::kIntelIvybridge: + return { + .temporal = new FallbackCrcMemcpyEngine(), + .non_temporal = new CrcNonTemporalMemcpyAVXEngine(), + }; + // INTEL_SANDYBRIDGE performs better with SSE than AVX. + case CpuType::kIntelSandybridge: + return { + .temporal = new FallbackCrcMemcpyEngine(), + .non_temporal = new CrcNonTemporalMemcpyEngine(), + }; + default: + return {.temporal = new FallbackCrcMemcpyEngine(), + .non_temporal = new FallbackCrcMemcpyEngine()}; + } +#else + // Get the underlying architecture. + CpuType cpu_type = GetCpuType(); + switch (cpu_type) { + // On Zen 2, PEXTRQ uses 2 micro-ops, including one on the vector store port + // which data movement from the vector registers to the integer registers + // (where CRC32C happens) to crowd the same units as vector stores. As a + // result, using that path exclusively causes bottlenecking on this port. + // We can avoid this bottleneck by using the integer side of the CPU for + // most operations rather than the vector side. We keep a vector region to + // engage some of the prefetching logic in the cache hierarchy which seems + // to give vector instructions special treatment. These prefetch units see + // strided access to each region, and do the right thing. + case CpuType::kAmdRome: + case CpuType::kAmdNaples: + return { + .temporal = new AcceleratedCrcMemcpyEngine<1, 2>(), + .non_temporal = new CrcNonTemporalMemcpyAVXEngine(), + }; + // PCLMULQDQ is slow and we don't have wide enough issue width to take + // advantage of it. For an unknown architecture, don't risk using CLMULs. + case CpuType::kIntelCascadelakeXeon: + case CpuType::kIntelSkylakeXeon: + case CpuType::kIntelSkylake: + case CpuType::kIntelBroadwell: + case CpuType::kIntelHaswell: + case CpuType::kIntelIvybridge: + return { + .temporal = new AcceleratedCrcMemcpyEngine<3, 0>(), + .non_temporal = new CrcNonTemporalMemcpyAVXEngine(), + }; + // INTEL_SANDYBRIDGE performs better with SSE than AVX. + case CpuType::kIntelSandybridge: + return { + .temporal = new AcceleratedCrcMemcpyEngine<3, 0>(), + .non_temporal = new CrcNonTemporalMemcpyEngine(), + }; + default: + return {.temporal = new FallbackCrcMemcpyEngine(), + .non_temporal = new FallbackCrcMemcpyEngine()}; + } +#endif // UNDEFINED_BEHAVIOR_SANITIZER +} + +// For testing, allow the user to specify which engine they want. +std::unique_ptr CrcMemcpy::GetTestEngine(int vector, + int integer) { + if (vector == 3 && integer == 0) { + return std::make_unique>(); + } else if (vector == 1 && integer == 2) { + return std::make_unique>(); + } + return nullptr; +} + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // __SSE4_2__ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc new file mode 100644 index 0000000000..adc867f6b7 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc @@ -0,0 +1,93 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/base/config.h" +#include "absl/crc/crc32c.h" +#include "absl/crc/internal/crc_memcpy.h" +#include "absl/crc/internal/non_temporal_memcpy.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +crc32c_t CrcNonTemporalMemcpyEngine::Compute(void* __restrict dst, + const void* __restrict src, + std::size_t length, + crc32c_t initial_crc) const { + constexpr size_t kBlockSize = 8192; + crc32c_t crc = initial_crc; + + const char* src_bytes = reinterpret_cast(src); + char* dst_bytes = reinterpret_cast(dst); + + // Copy + CRC loop - run 8k chunks until we are out of full chunks. + std::size_t offset = 0; + for (; offset + kBlockSize < length; offset += kBlockSize) { + crc = absl::ExtendCrc32c(crc, + absl::string_view(src_bytes + offset, kBlockSize)); + non_temporal_store_memcpy(dst_bytes + offset, src_bytes + offset, + kBlockSize); + } + + // Save some work if length is 0. + if (offset < length) { + std::size_t final_copy_size = length - offset; + crc = ExtendCrc32c(crc, + absl::string_view(src_bytes + offset, final_copy_size)); + + non_temporal_store_memcpy(dst_bytes + offset, src_bytes + offset, + final_copy_size); + } + + return crc; +} + +crc32c_t CrcNonTemporalMemcpyAVXEngine::Compute(void* __restrict dst, + const void* __restrict src, + std::size_t length, + crc32c_t initial_crc) const { + constexpr size_t kBlockSize = 8192; + crc32c_t crc = initial_crc; + + const char* src_bytes = reinterpret_cast(src); + char* dst_bytes = reinterpret_cast(dst); + + // Copy + CRC loop - run 8k chunks until we are out of full chunks. + std::size_t offset = 0; + for (; offset + kBlockSize < length; offset += kBlockSize) { + crc = ExtendCrc32c(crc, absl::string_view(src_bytes + offset, kBlockSize)); + + non_temporal_store_memcpy_avx(dst_bytes + offset, src_bytes + offset, + kBlockSize); + } + + // Save some work if length is 0. + if (offset < length) { + std::size_t final_copy_size = length - offset; + crc = ExtendCrc32c(crc, + absl::string_view(src_bytes + offset, final_copy_size)); + + non_temporal_store_memcpy_avx(dst_bytes + offset, src_bytes + offset, + final_copy_size); + } + + return crc; +} + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc new file mode 100644 index 0000000000..06f9c69cd3 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc @@ -0,0 +1,691 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Hardware accelerated CRC32 computation on Intel and ARM architecture. + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/call_once.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/endian.h" +#include "absl/base/internal/prefetch.h" +#include "absl/crc/internal/cpu_detect.h" +#include "absl/crc/internal/crc.h" +#include "absl/crc/internal/crc32_x86_arm_combined_simd.h" +#include "absl/crc/internal/crc_internal.h" +#include "absl/memory/memory.h" +#include "absl/numeric/bits.h" + +#if defined(__aarch64__) && defined(__LITTLE_ENDIAN__) && \ + defined(__ARM_FEATURE_CRC32) && defined(__ARM_NEON) +#define ABSL_INTERNAL_CAN_USE_SIMD_CRC32C +#elif defined(__SSE4_2__) && defined(__PCLMUL__) +#define ABSL_INTERNAL_CAN_USE_SIMD_CRC32C +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { + +#if defined(ABSL_INTERNAL_CAN_USE_SIMD_CRC32C) + +// Implementation details not exported outside of file +namespace { + +// Some machines have CRC acceleration hardware. +// We can do a faster version of Extend() on such machines. +class CRC32AcceleratedX86ARMCombined : public CRC32 { + public: + CRC32AcceleratedX86ARMCombined() {} + ~CRC32AcceleratedX86ARMCombined() override {} + void ExtendByZeroes(uint32_t* crc, size_t length) const override; + uint32_t ComputeZeroConstant(size_t length) const; + + private: + CRC32AcceleratedX86ARMCombined(const CRC32AcceleratedX86ARMCombined&) = + delete; + CRC32AcceleratedX86ARMCombined& operator=( + const CRC32AcceleratedX86ARMCombined&) = delete; +}; + +// Constants for switching between algorithms. +// Chosen by comparing speed at different powers of 2. +constexpr int kSmallCutoff = 256; +constexpr int kMediumCutoff = 2048; + +#define ABSL_INTERNAL_STEP1(crc) \ + do { \ + crc = CRC32_u8(crc, *p++); \ + } while (0) +#define ABSL_INTERNAL_STEP2(crc) \ + do { \ + crc = CRC32_u16(crc, absl::little_endian::Load16(p)); \ + p += 2; \ + } while (0) +#define ABSL_INTERNAL_STEP4(crc) \ + do { \ + crc = CRC32_u32(crc, absl::little_endian::Load32(p)); \ + p += 4; \ + } while (0) +#define ABSL_INTERNAL_STEP8(crc, data) \ + do { \ + crc = CRC32_u64(crc, absl::little_endian::Load64(data)); \ + data += 8; \ + } while (0) +#define ABSL_INTERNAL_STEP8BY2(crc0, crc1, p0, p1) \ + do { \ + ABSL_INTERNAL_STEP8(crc0, p0); \ + ABSL_INTERNAL_STEP8(crc1, p1); \ + } while (0) +#define ABSL_INTERNAL_STEP8BY3(crc0, crc1, crc2, p0, p1, p2) \ + do { \ + ABSL_INTERNAL_STEP8(crc0, p0); \ + ABSL_INTERNAL_STEP8(crc1, p1); \ + ABSL_INTERNAL_STEP8(crc2, p2); \ + } while (0) + +uint32_t multiply(uint32_t a, uint32_t b) { + V128 shifts = V128_From2x64(0, 1); + V128 power = V128_From2x64(0, a); + V128 crc = V128_From2x64(0, b); + V128 res = V128_PMulLow(power, crc); + + // Combine crc values + res = V128_ShiftLeft64(res, shifts); + return V128_Extract32<1>(res) ^ CRC32_u32(0, V128_Low64(res)); +} + +namespace { + +// Powers of crc32c polynomial, for faster ExtendByZeros. +// Verified against folly: +// folly/hash/detail/Crc32CombineDetail.cpp +constexpr uint32_t kCRC32CPowers[] = { + 0x82f63b78, 0x6ea2d55c, 0x18b8ea18, 0x510ac59a, 0xb82be955, 0xb8fdb1e7, + 0x88e56f72, 0x74c360a4, 0xe4172b16, 0x0d65762a, 0x35d73a62, 0x28461564, + 0xbf455269, 0xe2ea32dc, 0xfe7740e6, 0xf946610b, 0x3c204f8f, 0x538586e3, + 0x59726915, 0x734d5309, 0xbc1ac763, 0x7d0722cc, 0xd289cabe, 0xe94ca9bc, + 0x05b74f3f, 0xa51e1f42, 0x40000000, 0x20000000, 0x08000000, 0x00800000, + 0x00008000, 0x82f63b78, 0x6ea2d55c, 0x18b8ea18, 0x510ac59a, 0xb82be955, + 0xb8fdb1e7, 0x88e56f72, 0x74c360a4, 0xe4172b16, 0x0d65762a, 0x35d73a62, + 0x28461564, 0xbf455269, 0xe2ea32dc, 0xfe7740e6, 0xf946610b, 0x3c204f8f, + 0x538586e3, 0x59726915, 0x734d5309, 0xbc1ac763, 0x7d0722cc, 0xd289cabe, + 0xe94ca9bc, 0x05b74f3f, 0xa51e1f42, 0x40000000, 0x20000000, 0x08000000, + 0x00800000, 0x00008000, +}; + +} // namespace + +// Compute a magic constant, so that multiplying by it is the same as +// extending crc by length zeros. +uint32_t CRC32AcceleratedX86ARMCombined::ComputeZeroConstant( + size_t length) const { + // Lowest 2 bits are handled separately in ExtendByZeroes + length >>= 2; + + int index = absl::countr_zero(length); + uint32_t prev = kCRC32CPowers[index]; + length &= length - 1; + + while (length) { + // For each bit of length, extend by 2**n zeros. + index = absl::countr_zero(length); + prev = multiply(prev, kCRC32CPowers[index]); + length &= length - 1; + } + return prev; +} + +void CRC32AcceleratedX86ARMCombined::ExtendByZeroes(uint32_t* crc, + size_t length) const { + uint32_t val = *crc; + // Don't bother with multiplication for small length. + switch (length & 3) { + case 0: + break; + case 1: + val = CRC32_u8(val, 0); + break; + case 2: + val = CRC32_u16(val, 0); + break; + case 3: + val = CRC32_u8(val, 0); + val = CRC32_u16(val, 0); + break; + } + if (length > 3) { + val = multiply(val, ComputeZeroConstant(length)); + } + *crc = val; +} + +// Taken from Intel paper "Fast CRC Computation for iSCSI Polynomial Using CRC32 +// Instruction" +// https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/crc-iscsi-polynomial-crc32-instruction-paper.pdf +// We only need every 4th value, because we unroll loop by 4. +constexpr uint64_t kClmulConstants[] = { + 0x09e4addf8, 0x0ba4fc28e, 0x00d3b6092, 0x09e4addf8, 0x0ab7aff2a, + 0x102f9b8a2, 0x0b9e02b86, 0x00d3b6092, 0x1bf2e8b8a, 0x18266e456, + 0x0d270f1a2, 0x0ab7aff2a, 0x11eef4f8e, 0x083348832, 0x0dd7e3b0c, + 0x0b9e02b86, 0x0271d9844, 0x1b331e26a, 0x06b749fb2, 0x1bf2e8b8a, + 0x0e6fc4e6a, 0x0ce7f39f4, 0x0d7a4825c, 0x0d270f1a2, 0x026f6a60a, + 0x12ed0daac, 0x068bce87a, 0x11eef4f8e, 0x1329d9f7e, 0x0b3e32c28, + 0x0170076fa, 0x0dd7e3b0c, 0x1fae1cc66, 0x010746f3c, 0x086d8e4d2, + 0x0271d9844, 0x0b3af077a, 0x093a5f730, 0x1d88abd4a, 0x06b749fb2, + 0x0c9c8b782, 0x0cec3662e, 0x1ddffc5d4, 0x0e6fc4e6a, 0x168763fa6, + 0x0b0cd4768, 0x19b1afbc4, 0x0d7a4825c, 0x123888b7a, 0x00167d312, + 0x133d7a042, 0x026f6a60a, 0x000bcf5f6, 0x19d34af3a, 0x1af900c24, + 0x068bce87a, 0x06d390dec, 0x16cba8aca, 0x1f16a3418, 0x1329d9f7e, + 0x19fb2a8b0, 0x02178513a, 0x1a0f717c4, 0x0170076fa, +}; + +enum class CutoffStrategy { + // Use 3 CRC streams to fold into 1. + Fold3, + // Unroll CRC instructions for 64 bytes. + Unroll64CRC, +}; + +template +class CRC32AcceleratedX86ARMCombinedMultipleStreams + : public CRC32AcceleratedX86ARMCombined { + ABSL_ATTRIBUTE_HOT + void Extend(uint32_t* crc, const void* bytes, size_t length) const override { + static_assert(num_crc_streams >= 1 && num_crc_streams <= kMaxStreams, + "Invalid number of crc streams"); + static_assert(num_pclmul_streams >= 0 && num_pclmul_streams <= kMaxStreams, + "Invalid number of pclmul streams"); + const uint8_t* p = static_cast(bytes); + const uint8_t* e = p + length; + uint32_t l = *crc; + uint64_t l64; + + // We have dedicated instruction for 1,2,4 and 8 bytes. + if (length & 8) { + ABSL_INTERNAL_STEP8(l, p); + length &= ~8LL; + } + if (length & 4) { + ABSL_INTERNAL_STEP4(l); + length &= ~4LL; + } + if (length & 2) { + ABSL_INTERNAL_STEP2(l); + length &= ~2LL; + } + if (length & 1) { + ABSL_INTERNAL_STEP1(l); + length &= ~1LL; + } + if (length == 0) { + *crc = l; + return; + } + // length is now multiple of 16. + + // For small blocks just run simple loop, because cost of combining multiple + // streams is significant. + if (strategy != CutoffStrategy::Unroll64CRC) { + if (length < kSmallCutoff) { + while (length >= 16) { + ABSL_INTERNAL_STEP8(l, p); + ABSL_INTERNAL_STEP8(l, p); + length -= 16; + } + *crc = l; + return; + } + } + + // For medium blocks we run 3 crc streams and combine them as described in + // Intel paper above. Running 4th stream doesn't help, because crc + // instruction has latency 3 and throughput 1. + if (length < kMediumCutoff) { + l64 = l; + if (strategy == CutoffStrategy::Fold3) { + uint64_t l641 = 0; + uint64_t l642 = 0; + const int blockSize = 32; + int64_t bs = (e - p) / kGroupsSmall / blockSize; + const uint8_t* p1 = p + bs * blockSize; + const uint8_t* p2 = p1 + bs * blockSize; + + for (int64_t i = 0; i < bs - 1; ++i) { + ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2); + ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2); + ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2); + ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2); + } + // Don't run crc on last 8 bytes. + ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2); + ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2); + ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2); + ABSL_INTERNAL_STEP8BY2(l64, l641, p, p1); + + V128 magic = *(reinterpret_cast(kClmulConstants) + bs - 1); + + V128 tmp = V128_From2x64(0, l64); + + V128 res1 = V128_PMulLow(tmp, magic); + + tmp = V128_From2x64(0, l641); + + V128 res2 = V128_PMul10(tmp, magic); + V128 x = V128_Xor(res1, res2); + l64 = V128_Low64(x) ^ absl::little_endian::Load64(p2); + l64 = CRC32_u64(l642, l64); + + p = p2 + 8; + } else if (strategy == CutoffStrategy::Unroll64CRC) { + while ((e - p) >= 64) { + l64 = Process64BytesCRC(p, l64); + p += 64; + } + } + } else { + // There is a lot of data, we can ignore combine costs and run all + // requested streams (num_crc_streams + num_pclmul_streams), + // using prefetch. CRC and PCLMULQDQ use different cpu execution units, + // so on some cpus it makes sense to execute both of them for different + // streams. + + // Point x at first 8-byte aligned byte in string. + const uint8_t* x = RoundUp<8>(p); + // Process bytes until p is 8-byte aligned, if that isn't past the end. + while (p != x) { + ABSL_INTERNAL_STEP1(l); + } + + int64_t bs = (e - p) / (num_crc_streams + num_pclmul_streams) / 64; + const uint8_t* crc_streams[kMaxStreams]; + const uint8_t* pclmul_streams[kMaxStreams]; + // We are guaranteed to have at least one crc stream. + crc_streams[0] = p; + for (int i = 1; i < num_crc_streams; i++) { + crc_streams[i] = crc_streams[i - 1] + bs * 64; + } + pclmul_streams[0] = crc_streams[num_crc_streams - 1] + bs * 64; + for (int i = 1; i < num_pclmul_streams; i++) { + pclmul_streams[i] = pclmul_streams[i - 1] + bs * 64; + } + + // Per stream crc sums. + uint64_t l64_crc[kMaxStreams] = {l}; + uint64_t l64_pclmul[kMaxStreams] = {0}; + + // Peel first iteration, because PCLMULQDQ stream, needs setup. + for (int i = 0; i < num_crc_streams; i++) { + l64_crc[i] = Process64BytesCRC(crc_streams[i], l64_crc[i]); + crc_streams[i] += 16 * 4; + } + + V128 partialCRC[kMaxStreams][4]; + for (int i = 0; i < num_pclmul_streams; i++) { + partialCRC[i][0] = V128_LoadU( + reinterpret_cast(pclmul_streams[i] + 16 * 0)); + partialCRC[i][1] = V128_LoadU( + reinterpret_cast(pclmul_streams[i] + 16 * 1)); + partialCRC[i][2] = V128_LoadU( + reinterpret_cast(pclmul_streams[i] + 16 * 2)); + partialCRC[i][3] = V128_LoadU( + reinterpret_cast(pclmul_streams[i] + 16 * 3)); + pclmul_streams[i] += 16 * 4; + } + + for (int64_t i = 1; i < bs; i++) { + // Prefetch data for next itterations. + for (int j = 0; j < num_crc_streams; j++) { + base_internal::PrefetchT0( + reinterpret_cast(crc_streams[j] + kPrefetchHorizon)); + } + for (int j = 0; j < num_pclmul_streams; j++) { + base_internal::PrefetchT0(reinterpret_cast( + pclmul_streams[j] + kPrefetchHorizon)); + } + + // We process each stream in 64 byte blocks. This can be written as + // for (int i = 0; i < num_pclmul_streams; i++) { + // Process64BytesPclmul(pclmul_streams[i], partialCRC[i]); + // pclmul_streams[i] += 16 * 4; + // } + // for (int i = 0; i < num_crc_streams; i++) { + // l64_crc[i] = Process64BytesCRC(crc_streams[i], l64_crc[i]); + // crc_streams[i] += 16*4; + // } + // But unrolling and interleaving PCLMULQDQ and CRC blocks manually + // gives ~2% performance boost. + l64_crc[0] = Process64BytesCRC(crc_streams[0], l64_crc[0]); + crc_streams[0] += 16 * 4; + if (num_pclmul_streams > 0) { + Process64BytesPclmul(pclmul_streams[0], partialCRC[0]); + pclmul_streams[0] += 16 * 4; + } + if (num_crc_streams > 1) { + l64_crc[1] = Process64BytesCRC(crc_streams[1], l64_crc[1]); + crc_streams[1] += 16 * 4; + } + if (num_pclmul_streams > 1) { + Process64BytesPclmul(pclmul_streams[1], partialCRC[1]); + pclmul_streams[1] += 16 * 4; + } + if (num_crc_streams > 2) { + l64_crc[2] = Process64BytesCRC(crc_streams[2], l64_crc[2]); + crc_streams[2] += 16 * 4; + } + if (num_pclmul_streams > 2) { + Process64BytesPclmul(pclmul_streams[2], partialCRC[2]); + pclmul_streams[2] += 16 * 4; + } + } + + // PCLMULQDQ based streams require special final step; + // CRC based don't. + for (int i = 0; i < num_pclmul_streams; i++) { + l64_pclmul[i] = FinalizePclmulStream(partialCRC[i]); + } + + // Combine all streams into single result. + uint32_t magic = ComputeZeroConstant(bs * 64); + l64 = l64_crc[0]; + for (int i = 1; i < num_crc_streams; i++) { + l64 = multiply(l64, magic); + l64 ^= l64_crc[i]; + } + for (int i = 0; i < num_pclmul_streams; i++) { + l64 = multiply(l64, magic); + l64 ^= l64_pclmul[i]; + } + + // Update p. + if (num_pclmul_streams > 0) { + p = pclmul_streams[num_pclmul_streams - 1]; + } else { + p = crc_streams[num_crc_streams - 1]; + } + } + l = l64; + + while ((e - p) >= 16) { + ABSL_INTERNAL_STEP8(l, p); + ABSL_INTERNAL_STEP8(l, p); + } + // Process the last few bytes + while (p != e) { + ABSL_INTERNAL_STEP1(l); + } + +#undef ABSL_INTERNAL_STEP8BY3 +#undef ABSL_INTERNAL_STEP8BY2 +#undef ABSL_INTERNAL_STEP8 +#undef ABSL_INTERNAL_STEP4 +#undef ABSL_INTERNAL_STEP2 +#undef ABSL_INTERNAL_STEP1 + + *crc = l; + } + + private: + // Update partialCRC with crc of 64 byte block. Calling FinalizePclmulStream + // would produce a single crc checksum, but it is expensive. PCLMULQDQ has a + // high latency, so we run 4 128-bit partial checksums that can be reduced to + // a single value by FinalizePclmulStream later. Computing crc for arbitrary + // polynomialas with PCLMULQDQ is described in Intel paper "Fast CRC + // Computation for Generic Polynomials Using PCLMULQDQ Instruction" + // https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf + // We are applying it to CRC32C polynomial. + ABSL_ATTRIBUTE_ALWAYS_INLINE void Process64BytesPclmul( + const uint8_t* p, V128* partialCRC) const { + V128 loopMultiplicands = V128_Load(reinterpret_cast(k1k2)); + + V128 partialCRC1 = partialCRC[0]; + V128 partialCRC2 = partialCRC[1]; + V128 partialCRC3 = partialCRC[2]; + V128 partialCRC4 = partialCRC[3]; + + V128 tmp1 = V128_PMulHi(partialCRC1, loopMultiplicands); + V128 tmp2 = V128_PMulHi(partialCRC2, loopMultiplicands); + V128 tmp3 = V128_PMulHi(partialCRC3, loopMultiplicands); + V128 tmp4 = V128_PMulHi(partialCRC4, loopMultiplicands); + V128 data1 = V128_LoadU(reinterpret_cast(p + 16 * 0)); + V128 data2 = V128_LoadU(reinterpret_cast(p + 16 * 1)); + V128 data3 = V128_LoadU(reinterpret_cast(p + 16 * 2)); + V128 data4 = V128_LoadU(reinterpret_cast(p + 16 * 3)); + partialCRC1 = V128_PMulLow(partialCRC1, loopMultiplicands); + partialCRC2 = V128_PMulLow(partialCRC2, loopMultiplicands); + partialCRC3 = V128_PMulLow(partialCRC3, loopMultiplicands); + partialCRC4 = V128_PMulLow(partialCRC4, loopMultiplicands); + partialCRC1 = V128_Xor(tmp1, partialCRC1); + partialCRC2 = V128_Xor(tmp2, partialCRC2); + partialCRC3 = V128_Xor(tmp3, partialCRC3); + partialCRC4 = V128_Xor(tmp4, partialCRC4); + partialCRC1 = V128_Xor(partialCRC1, data1); + partialCRC2 = V128_Xor(partialCRC2, data2); + partialCRC3 = V128_Xor(partialCRC3, data3); + partialCRC4 = V128_Xor(partialCRC4, data4); + partialCRC[0] = partialCRC1; + partialCRC[1] = partialCRC2; + partialCRC[2] = partialCRC3; + partialCRC[3] = partialCRC4; + } + + // Reduce partialCRC produced by Process64BytesPclmul into a single value, + // that represents crc checksum of all the processed bytes. + ABSL_ATTRIBUTE_ALWAYS_INLINE uint64_t + FinalizePclmulStream(V128* partialCRC) const { + V128 partialCRC1 = partialCRC[0]; + V128 partialCRC2 = partialCRC[1]; + V128 partialCRC3 = partialCRC[2]; + V128 partialCRC4 = partialCRC[3]; + + // Combine 4 vectors of partial crc into a single vector. + V128 reductionMultiplicands = + V128_Load(reinterpret_cast(k5k6)); + + V128 low = V128_PMulLow(reductionMultiplicands, partialCRC1); + V128 high = V128_PMulHi(reductionMultiplicands, partialCRC1); + + partialCRC1 = V128_Xor(low, high); + partialCRC1 = V128_Xor(partialCRC1, partialCRC2); + + low = V128_PMulLow(reductionMultiplicands, partialCRC3); + high = V128_PMulHi(reductionMultiplicands, partialCRC3); + + partialCRC3 = V128_Xor(low, high); + partialCRC3 = V128_Xor(partialCRC3, partialCRC4); + + reductionMultiplicands = V128_Load(reinterpret_cast(k3k4)); + + low = V128_PMulLow(reductionMultiplicands, partialCRC1); + high = V128_PMulHi(reductionMultiplicands, partialCRC1); + V128 fullCRC = V128_Xor(low, high); + fullCRC = V128_Xor(fullCRC, partialCRC3); + + // Reduce fullCRC into scalar value. + reductionMultiplicands = V128_Load(reinterpret_cast(k5k6)); + + V128 mask = V128_Load(reinterpret_cast(kMask)); + + V128 tmp = V128_PMul01(reductionMultiplicands, fullCRC); + fullCRC = V128_ShiftRight<8>(fullCRC); + fullCRC = V128_Xor(fullCRC, tmp); + + reductionMultiplicands = V128_Load(reinterpret_cast(k7k0)); + + tmp = V128_ShiftRight<4>(fullCRC); + fullCRC = V128_And(fullCRC, mask); + fullCRC = V128_PMulLow(reductionMultiplicands, fullCRC); + fullCRC = V128_Xor(tmp, fullCRC); + + reductionMultiplicands = V128_Load(reinterpret_cast(kPoly)); + + tmp = V128_And(fullCRC, mask); + tmp = V128_PMul01(reductionMultiplicands, tmp); + tmp = V128_And(tmp, mask); + tmp = V128_PMulLow(reductionMultiplicands, tmp); + + fullCRC = V128_Xor(tmp, fullCRC); + + return V128_Extract32<1>(fullCRC); + } + + // Update crc with 64 bytes of data from p. + ABSL_ATTRIBUTE_ALWAYS_INLINE uint64_t Process64BytesCRC(const uint8_t* p, + uint64_t crc) const { + for (int i = 0; i < 8; i++) { + crc = CRC32_u64(crc, absl::little_endian::Load64(p)); + p += 8; + } + return crc; + } + + // Generated by crc32c_x86_test --crc32c_generate_constants=true + // and verified against constants in linux kernel for S390: + // https://github.com/torvalds/linux/blob/master/arch/s390/crypto/crc32le-vx.S + alignas(16) static constexpr uint64_t k1k2[2] = {0x0740eef02, 0x09e4addf8}; + alignas(16) static constexpr uint64_t k3k4[2] = {0x1384aa63a, 0x0ba4fc28e}; + alignas(16) static constexpr uint64_t k5k6[2] = {0x0f20c0dfe, 0x14cd00bd6}; + alignas(16) static constexpr uint64_t k7k0[2] = {0x0dd45aab8, 0x000000000}; + alignas(16) static constexpr uint64_t kPoly[2] = {0x105ec76f0, 0x0dea713f1}; + alignas(16) static constexpr uint32_t kMask[4] = {~0u, 0u, ~0u, 0u}; + + // Medium runs of bytes are broken into groups of kGroupsSmall blocks of same + // size. Each group is CRCed in parallel then combined at the end of the + // block. + static constexpr int kGroupsSmall = 3; + // For large runs we use up to kMaxStreams blocks computed with CRC + // instruction, and up to kMaxStreams blocks computed with PCLMULQDQ, which + // are combined in the end. + static constexpr int kMaxStreams = 3; +}; + +} // namespace + +// Intel processors with SSE4.2 have an instruction for one particular +// 32-bit CRC polynomial: crc32c +CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() { + CpuType type = GetCpuType(); + switch (type) { + case CpuType::kIntelHaswell: + case CpuType::kAmdRome: + case CpuType::kAmdNaples: + case CpuType::kAmdMilan: + return new CRC32AcceleratedX86ARMCombinedMultipleStreams< + 3, 1, CutoffStrategy::Fold3>(); + // PCLMULQDQ is fast, use combined PCLMULQDQ + CRC implementation. + case CpuType::kIntelCascadelakeXeon: + case CpuType::kIntelSkylakeXeon: + case CpuType::kIntelBroadwell: + case CpuType::kIntelSkylake: + return new CRC32AcceleratedX86ARMCombinedMultipleStreams< + 3, 2, CutoffStrategy::Fold3>(); + // PCLMULQDQ is slow, don't use it. + case CpuType::kIntelIvybridge: + case CpuType::kIntelSandybridge: + case CpuType::kIntelWestmere: + return new CRC32AcceleratedX86ARMCombinedMultipleStreams< + 3, 0, CutoffStrategy::Fold3>(); + case CpuType::kArmNeoverseN1: + return new CRC32AcceleratedX86ARMCombinedMultipleStreams< + 1, 1, CutoffStrategy::Unroll64CRC>(); +#if defined(__aarch64__) + default: + // Not all ARM processors support the needed instructions, so check here + // before trying to use an accelerated implementation. + if (SupportsArmCRC32PMULL()) { + return new CRC32AcceleratedX86ARMCombinedMultipleStreams< + 1, 1, CutoffStrategy::Unroll64CRC>(); + } else { + return nullptr; + } +#else + default: + // Something else, play it safe and assume slow PCLMULQDQ. + return new CRC32AcceleratedX86ARMCombinedMultipleStreams< + 3, 0, CutoffStrategy::Fold3>(); +#endif + } +} + +std::vector> NewCRC32AcceleratedX86ARMCombinedAll() { + auto ret = std::vector>(); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + ret.push_back(absl::make_unique>()); + + return ret; +} + +#else // !ABSL_INTERNAL_CAN_USE_SIMD_CRC32C + +std::vector> NewCRC32AcceleratedX86ARMCombinedAll() { + return std::vector>(); +} + +// no hardware acceleration available +CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() { return nullptr; } + +#endif + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_arm_intrinsics.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_arm_intrinsics.h new file mode 100644 index 0000000000..92632a3341 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_arm_intrinsics.h @@ -0,0 +1,77 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_ +#define ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_ + +#ifdef __aarch64__ +#include + +typedef int64x2_t __m128i; /* 128-bit vector containing integers */ +#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x) +#define vreinterpretq_s64_m128i(x) (x) + +// Guarantees that every preceding store is globally visible before any +// subsequent store. +// https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx +static inline __attribute__((always_inline)) void _mm_sfence(void) { + __sync_synchronize(); +} + +// Load 128-bits of integer data from unaligned memory into dst. This intrinsic +// may perform better than _mm_loadu_si128 when the data crosses a cache line +// boundary. +// +// dst[127:0] := MEM[mem_addr+127:mem_addr] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lddqu_si128 +#define _mm_lddqu_si128 _mm_loadu_si128 + +// Loads 128-bit value. : +// https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx +static inline __attribute__((always_inline)) __m128i _mm_loadu_si128( + const __m128i *p) { + return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *)p)); +} + +// Stores the data in a to the address p without polluting the caches. If the +// cache line containing address p is already in the cache, the cache will be +// updated. +// https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx +static inline __attribute__((always_inline)) void _mm_stream_si128(__m128i *p, + __m128i a) { +#if __has_builtin(__builtin_nontemporal_store) + __builtin_nontemporal_store(a, p); +#else + vst1q_s64((int64_t *)p, vreinterpretq_s64_m128i(a)); +#endif +} + +// Sets the 16 signed 8-bit integer values. +// https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx +static inline __attribute__((always_inline)) __m128i _mm_set_epi8( + signed char b15, signed char b14, signed char b13, signed char b12, + signed char b11, signed char b10, signed char b9, signed char b8, + signed char b7, signed char b6, signed char b5, signed char b4, + signed char b3, signed char b2, signed char b1, signed char b0) { + int8_t __attribute__((aligned(16))) + data[16] = {(int8_t)b0, (int8_t)b1, (int8_t)b2, (int8_t)b3, + (int8_t)b4, (int8_t)b5, (int8_t)b6, (int8_t)b7, + (int8_t)b8, (int8_t)b9, (int8_t)b10, (int8_t)b11, + (int8_t)b12, (int8_t)b13, (int8_t)b14, (int8_t)b15}; + return (__m128i)vld1q_s8(data); +} +#endif // __aarch64__ + +#endif // ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h new file mode 100644 index 0000000000..0c6d7655bb --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h @@ -0,0 +1,172 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_ +#define ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +#ifdef __SSE__ +// Only include if we're running on a CPU that supports SSE ISA, needed for +// sfence +#include // IWYU pragma: keep +#endif +#ifdef __SSE2__ +// Only include if we're running on a CPU that supports SSE2 ISA, needed for +// movdqa, movdqu, movntdq +#include // IWYU pragma: keep +#endif +#ifdef __aarch64__ +// Only include if we're running on a CPU that supports ARM NEON ISA, needed for +// sfence, movdqa, movdqu, movntdq +#include "absl/crc/internal/non_temporal_arm_intrinsics.h" +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace crc_internal { +// This non-temporal memcpy does regular load and non-temporal store memory +// copy. It is compatible to both 16-byte aligned and unaligned addresses. If +// data at the destination is not immediately accessed, using non-temporal +// memcpy can save 1 DRAM load of the destination cacheline. + +constexpr int kCacheLineSize = ABSL_CACHELINE_SIZE; + +// If the objects overlap, the behavior is undefined. +// MSVC does not have proper header support for some of these intrinsics, +// so it should go to fallback +inline void *non_temporal_store_memcpy(void *__restrict dst, + const void *__restrict src, size_t len) { +#if (defined(__SSE3__) || defined(__aarch64__)) && !defined(_MSC_VER) + uint8_t *d = reinterpret_cast(dst); + const uint8_t *s = reinterpret_cast(src); + + // memcpy() the misaligned header. At the end of this if block, is + // aligned to a 64-byte cacheline boundary or == 0. + if (reinterpret_cast(d) & (kCacheLineSize - 1)) { + uintptr_t bytes_before_alignment_boundary = + kCacheLineSize - + (reinterpret_cast(d) & (kCacheLineSize - 1)); + int header_len = (std::min)(bytes_before_alignment_boundary, len); + assert(bytes_before_alignment_boundary < kCacheLineSize); + memcpy(d, s, header_len); + d += header_len; + s += header_len; + len -= header_len; + } + + if (len >= kCacheLineSize) { + _mm_sfence(); + __m128i *dst_cacheline = reinterpret_cast<__m128i *>(d); + const __m128i *src_cacheline = reinterpret_cast(s); + constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m128i); + uint64_t loops = len / kCacheLineSize; + + while (len >= kCacheLineSize) { + __m128i temp1, temp2, temp3, temp4; + temp1 = _mm_lddqu_si128(src_cacheline + 0); + temp2 = _mm_lddqu_si128(src_cacheline + 1); + temp3 = _mm_lddqu_si128(src_cacheline + 2); + temp4 = _mm_lddqu_si128(src_cacheline + 3); + _mm_stream_si128(dst_cacheline + 0, temp1); + _mm_stream_si128(dst_cacheline + 1, temp2); + _mm_stream_si128(dst_cacheline + 2, temp3); + _mm_stream_si128(dst_cacheline + 3, temp4); + src_cacheline += kOpsPerCacheLine; + dst_cacheline += kOpsPerCacheLine; + len -= kCacheLineSize; + } + d += loops * kCacheLineSize; + s += loops * kCacheLineSize; + _mm_sfence(); + } + + // memcpy the tail. + if (len) { + memcpy(d, s, len); + } + return dst; +#else + // Fallback to regular memcpy when SSE2/3 & aarch64 is not available. + return memcpy(dst, src, len); +#endif // __SSE3__ || __aarch64__ +} + +// MSVC does not have proper header support for some of these intrinsics, +// so it should go to fallback +inline void *non_temporal_store_memcpy_avx(void *__restrict dst, + const void *__restrict src, + size_t len) { +#if defined(__AVX__) && !defined(_MSC_VER) + uint8_t *d = reinterpret_cast(dst); + const uint8_t *s = reinterpret_cast(src); + + // memcpy() the misaligned header. At the end of this if block, is + // aligned to a 64-byte cacheline boundary or == 0. + if (reinterpret_cast(d) & (kCacheLineSize - 1)) { + uintptr_t bytes_before_alignment_boundary = + kCacheLineSize - + (reinterpret_cast(d) & (kCacheLineSize - 1)); + int header_len = (std::min)(bytes_before_alignment_boundary, len); + assert(bytes_before_alignment_boundary < kCacheLineSize); + memcpy(d, s, header_len); + d += header_len; + s += header_len; + len -= header_len; + } + + if (len >= kCacheLineSize) { + _mm_sfence(); + __m256i *dst_cacheline = reinterpret_cast<__m256i *>(d); + const __m256i *src_cacheline = reinterpret_cast(s); + constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m256i); + int loops = len / kCacheLineSize; + + while (len >= kCacheLineSize) { + __m256i temp1, temp2; + temp1 = _mm256_lddqu_si256(src_cacheline + 0); + temp2 = _mm256_lddqu_si256(src_cacheline + 1); + _mm256_stream_si256(dst_cacheline + 0, temp1); + _mm256_stream_si256(dst_cacheline + 1, temp2); + src_cacheline += kOpsPerCacheLine; + dst_cacheline += kOpsPerCacheLine; + len -= kCacheLineSize; + } + d += loops * kCacheLineSize; + s += loops * kCacheLineSize; + _mm_sfence(); + } + + // memcpy the tail. + if (len) { + memcpy(d, s, len); + } + return dst; +#else + // Fallback to regular memcpy when AVX is not available. + return memcpy(dst, src, len); +#endif // __AVX__ +} + +} // namespace crc_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_memcpy_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_memcpy_test.cc new file mode 100644 index 0000000000..eb07a559a5 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/crc/internal/non_temporal_memcpy_test.cc @@ -0,0 +1,88 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/crc/internal/non_temporal_memcpy.h" + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +namespace { + +struct TestParam { + size_t copy_size; + uint32_t src_offset; + uint32_t dst_offset; +}; + +class NonTemporalMemcpyTest : public testing::TestWithParam { + protected: + void SetUp() override { + // Make buf_size multiple of 16 bytes. + size_t buf_size = ((std::max(GetParam().src_offset, GetParam().dst_offset) + + GetParam().copy_size) + + 15) / + 16 * 16; + a_.resize(buf_size); + b_.resize(buf_size); + for (size_t i = 0; i < buf_size; i++) { + a_[i] = static_cast(i % 256); + b_[i] = ~a_[i]; + } + } + + std::vector a_, b_; +}; + +TEST_P(NonTemporalMemcpyTest, SSEEquality) { + uint8_t *src = a_.data() + GetParam().src_offset; + uint8_t *dst = b_.data() + GetParam().dst_offset; + absl::crc_internal::non_temporal_store_memcpy(dst, src, GetParam().copy_size); + for (size_t i = 0; i < GetParam().copy_size; i++) { + EXPECT_EQ(src[i], dst[i]); + } +} + +TEST_P(NonTemporalMemcpyTest, AVXEquality) { + uint8_t* src = a_.data() + GetParam().src_offset; + uint8_t* dst = b_.data() + GetParam().dst_offset; + + absl::crc_internal::non_temporal_store_memcpy_avx(dst, src, + GetParam().copy_size); + for (size_t i = 0; i < GetParam().copy_size; i++) { + EXPECT_EQ(src[i], dst[i]); + } +} + +// 63B is smaller than one cacheline operation thus the non-temporal routine +// will not be called. +// 4352B is sufficient for testing 4092B data copy with room for offsets. +constexpr TestParam params[] = { + {63, 0, 0}, {58, 5, 5}, {61, 2, 0}, {61, 0, 2}, + {58, 5, 2}, {4096, 0, 0}, {4096, 0, 1}, {4096, 0, 2}, + {4096, 0, 3}, {4096, 0, 4}, {4096, 0, 5}, {4096, 0, 6}, + {4096, 0, 7}, {4096, 0, 8}, {4096, 0, 9}, {4096, 0, 10}, + {4096, 0, 11}, {4096, 0, 12}, {4096, 0, 13}, {4096, 0, 14}, + {4096, 0, 15}, {4096, 7, 7}, {4096, 3, 0}, {4096, 1, 0}, + {4096, 9, 3}, {4096, 9, 11}, {8192, 0, 0}, {8192, 5, 2}, + {1024768, 7, 11}, {1, 0, 0}, {1, 0, 1}, {1, 1, 0}, + {1, 1, 1}}; + +INSTANTIATE_TEST_SUITE_P(ParameterizedNonTemporalMemcpyTest, + NonTemporalMemcpyTest, testing::ValuesIn(params)); + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/CMakeLists.txt index b16fa007e3..e823f15b89 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/CMakeLists.txt @@ -14,6 +14,8 @@ # limitations under the License. # +find_library(EXECINFO_LIBRARY execinfo) + absl_cc_library( NAME stacktrace @@ -33,10 +35,13 @@ absl_cc_library( "stacktrace.cc" COPTS ${ABSL_DEFAULT_COPTS} + LINKOPTS + $<$:${EXECINFO_LIBRARY}> DEPS absl::debugging_internal absl::config absl::core_headers + absl::raw_logging_internal PUBLIC ) @@ -57,7 +62,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} - $<$:"dbghelp"> + $<$:-ldbghelp> DEPS absl::debugging_internal absl::demangle_internal @@ -93,6 +98,7 @@ absl_cc_test( GTest::gmock ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME examine_stack @@ -125,7 +131,6 @@ absl_cc_library( absl::base absl::config absl::core_headers - absl::errno_saver absl::raw_logging_internal PUBLIC ) @@ -147,6 +152,7 @@ absl_cc_test( GTest::gmock ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME debugging_internal @@ -168,6 +174,7 @@ absl_cc_library( absl::raw_logging_internal ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME demangle_internal @@ -215,42 +222,6 @@ absl_cc_library( PUBLIC ) -absl_cc_library( - NAME - leak_check_disable - SRCS - "leak_check_disable.cc" - COPTS - ${ABSL_DEFAULT_COPTS} - PUBLIC -) - -absl_cc_library( - NAME - leak_check_api_enabled_for_testing - HDRS - "leak_check.h" - SRCS - "leak_check.cc" - COPTS - ${ABSL_DEFAULT_COPTS} - $<$:-DLEAK_SANITIZER> - TESTONLY -) - -absl_cc_library( - NAME - leak_check_api_disabled_for_testing - HDRS - "leak_check.h" - SRCS - "leak_check.cc" - COPTS - ${ABSL_DEFAULT_COPTS} - "-ULEAK_SANITIZER" - TESTONLY -) - absl_cc_test( NAME leak_check_test @@ -258,46 +229,15 @@ absl_cc_test( "leak_check_test.cc" COPTS ${ABSL_TEST_COPTS} - "$<$:-DABSL_EXPECT_LEAK_SANITIZER>" LINKOPTS - "${ABSL_LSAN_LINKOPTS}" + ${ABSL_DEFAULT_LINKOPTS} DEPS - absl::leak_check_api_enabled_for_testing + absl::leak_check absl::base GTest::gmock_main ) -absl_cc_test( - NAME - leak_check_no_lsan_test - SRCS - "leak_check_test.cc" - COPTS - ${ABSL_TEST_COPTS} - "-UABSL_EXPECT_LEAK_SANITIZER" - DEPS - absl::leak_check_api_disabled_for_testing - absl::base - GTest::gmock_main -) - -absl_cc_test( - NAME - disabled_leak_check_test - SRCS - "leak_check_fail_test.cc" - COPTS - ${ABSL_TEST_COPTS} - LINKOPTS - "${ABSL_LSAN_LINKOPTS}" - DEPS - absl::leak_check_api_enabled_for_testing - absl::leak_check_disable - absl::base - absl::raw_logging_internal - GTest::gmock_main -) - +# Internal-only target, do not depend on directly. absl_cc_library( NAME stack_consumption diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/failure_signal_handler.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/failure_signal_handler.cc index 689e5979e7..ef8ab9e5a8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/failure_signal_handler.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/failure_signal_handler.cc @@ -42,7 +42,6 @@ #include #include "absl/base/attributes.h" -#include "absl/base/internal/errno_saver.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/sysinfo.h" #include "absl/debugging/internal/examine_stack.h" @@ -51,8 +50,10 @@ #ifndef _WIN32 #define ABSL_HAVE_SIGACTION // Apple WatchOS and TVOS don't allow sigaltstack -#if !(defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) && \ - !(defined(TARGET_OS_TV) && TARGET_OS_TV) +// Apple macOS has sigaltstack, but using it makes backtrace() unusable. +#if !(defined(TARGET_OS_OSX) && TARGET_OS_OSX) && \ + !(defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) && \ + !(defined(TARGET_OS_TV) && TARGET_OS_TV) && !defined(__QNX__) #define ABSL_HAVE_SIGALTSTACK #endif #endif @@ -134,10 +135,11 @@ static bool SetupAlternateStackOnce() { #if defined(__wasm__) || defined (__asjms__) const size_t page_mask = getpagesize() - 1; #else - const size_t page_mask = sysconf(_SC_PAGESIZE) - 1; + const size_t page_mask = static_cast(sysconf(_SC_PAGESIZE)) - 1; #endif size_t stack_size = - (std::max(SIGSTKSZ, 65536) + page_mask) & ~page_mask; + (std::max(static_cast(SIGSTKSZ), size_t{65536}) + page_mask) & + ~page_mask; #if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER) // Account for sanitizer instrumentation requiring additional stack space. @@ -217,8 +219,7 @@ static void InstallOneFailureHandler(FailureSignalData* data, #endif static void WriteToStderr(const char* data) { - absl::base_internal::ErrnoSaver errno_saver; - absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data)); + absl::raw_log_internal::AsyncSignalSafeWriteToStderr(data, strlen(data)); } static void WriteSignalMessage(int signo, int cpu, @@ -291,7 +292,7 @@ static void WriteFailureInfo(int signo, void* ucontext, int cpu, // some platforms. static void PortableSleepForSeconds(int seconds) { #ifdef _WIN32 - Sleep(seconds * 1000); + Sleep(static_cast(seconds * 1000)); #else struct timespec sleep_time; sleep_time.tv_sec = seconds; @@ -325,9 +326,9 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) { const GetTidType this_tid = absl::base_internal::GetTID(); GetTidType previous_failed_tid = 0; - if (!failed_tid.compare_exchange_strong( - previous_failed_tid, static_cast(this_tid), - std::memory_order_acq_rel, std::memory_order_relaxed)) { + if (!failed_tid.compare_exchange_strong(previous_failed_tid, this_tid, + std::memory_order_acq_rel, + std::memory_order_relaxed)) { ABSL_RAW_LOG( ERROR, "Signal %d raised at PC=%p while already in AbslFailureSignalHandler()", @@ -356,7 +357,7 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) { if (fsh_options.alarm_on_failure_secs > 0) { alarm(0); // Cancel any existing alarms. signal(SIGALRM, ImmediateAbortSignalHandler); - alarm(fsh_options.alarm_on_failure_secs); + alarm(static_cast(fsh_options.alarm_on_failure_secs)); } #endif diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc index 329c285f3b..91eaa76f8a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc @@ -30,16 +30,12 @@ bool AddressIsReadable(const void* /* addr */) { return true; } ABSL_NAMESPACE_END } // namespace absl -#else +#else // __linux__ && !__ANDROID__ -#include -#include +#include +#include #include -#include -#include -#include - #include "absl/base/internal/errno_saver.h" #include "absl/base/internal/raw_logging.h" @@ -47,93 +43,54 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { -// Pack a pid and two file descriptors into a 64-bit word, -// using 16, 24, and 24 bits for each respectively. -static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) { - ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0, - "fd out of range"); - return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff); -} - -// Unpack x into a pid and two file descriptors, where x was created with -// Pack(). -static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) { - *pid = x >> 48; - *read_fd = (x >> 24) & 0xffffff; - *write_fd = x & 0xffffff; -} - -// Return whether the byte at *addr is readable, without faulting. -// Save and restores errno. Returns true on systems where -// unimplemented. -// This is a namespace-scoped variable for correct zero-initialization. -static std::atomic pid_and_fds; // initially 0, an invalid pid. - +// NOTE: be extra careful about adding any interposable function calls here +// (such as open(), read(), etc.). These symbols may be interposed and will get +// invoked in contexts they don't expect. +// +// NOTE: any new system calls here may also require sandbox reconfiguration. +// bool AddressIsReadable(const void *addr) { + // Align address on 8-byte boundary. On aarch64, checking last + // byte before inaccessible page returned unexpected EFAULT. + const uintptr_t u_addr = reinterpret_cast(addr) & ~uintptr_t{7}; + addr = reinterpret_cast(u_addr); + + // rt_sigprocmask below will succeed for this input. + if (addr == nullptr) return false; + absl::base_internal::ErrnoSaver errno_saver; - // We test whether a byte is readable by using write(). Normally, this would - // be done via a cached file descriptor to /dev/null, but linux fails to - // check whether the byte is readable when the destination is /dev/null, so - // we use a cached pipe. We store the pid of the process that created the - // pipe to handle the case where a process forks, and the child closes all - // the file descriptors and then calls this routine. This is not perfect: - // the child could use the routine, then close all file descriptors and then - // use this routine again. But the likely use of this routine is when - // crashing, to test the validity of pages when dumping the stack. Beware - // that we may leak file descriptors, but we're unlikely to leak many. - int bytes_written; - int current_pid = getpid() & 0xffff; // we use only the low order 16 bits - do { // until we do not get EBADF trying to use file descriptors - int pid; - int read_fd; - int write_fd; - uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire); - Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd); - while (current_pid != pid) { - int p[2]; - // new pipe - if (pipe(p) != 0) { - ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno); - } - fcntl(p[0], F_SETFD, FD_CLOEXEC); - fcntl(p[1], F_SETFD, FD_CLOEXEC); - uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]); - if (pid_and_fds.compare_exchange_strong( - local_pid_and_fds, new_pid_and_fds, std::memory_order_release, - std::memory_order_relaxed)) { - local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads - } else { // fds not exposed to other threads; we can close them. - close(p[0]); - close(p[1]); - local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire); - } - Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd); - } - errno = 0; - // Use syscall(SYS_write, ...) instead of write() to prevent ASAN - // and other checkers from complaining about accesses to arbitrary - // memory. - do { - bytes_written = syscall(SYS_write, write_fd, addr, 1); - } while (bytes_written == -1 && errno == EINTR); - if (bytes_written == 1) { // remove the byte from the pipe - char c; - while (read(read_fd, &c, 1) == -1 && errno == EINTR) { - } - } - if (errno == EBADF) { // Descriptors invalid. - // If pid_and_fds contains the problematic file descriptors we just used, - // this call will forget them, and the loop will try again. - pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0, - std::memory_order_release, - std::memory_order_relaxed); - } - } while (errno == EBADF); - return bytes_written == 1; + + // Here we probe with some syscall which + // - accepts an 8-byte region of user memory as input + // - tests for EFAULT before other validation + // - has no problematic side-effects + // + // rt_sigprocmask(2) works for this. It copies sizeof(kernel_sigset_t)==8 + // bytes from the address into the kernel memory before any validation. + // + // The call can never succeed, since the `how` parameter is not one of + // SIG_BLOCK, SIG_UNBLOCK, SIG_SETMASK. + // + // This strategy depends on Linux implementation details, + // so we rely on the test to alert us if it stops working. + // + // Some discarded past approaches: + // - msync() doesn't reject PROT_NONE regions + // - write() on /dev/null doesn't return EFAULT + // - write() on a pipe requires creating it and draining the writes + // - connect() works but is problematic for sandboxes and needs a valid + // file descriptor + // + // This can never succeed (invalid first argument to sigprocmask). + ABSL_RAW_CHECK(syscall(SYS_rt_sigprocmask, ~0, addr, nullptr, + /*sizeof(kernel_sigset_t)*/ 8) == -1, + "unexpected success"); + ABSL_RAW_CHECK(errno == EFAULT || errno == EINVAL, "unexpected errno"); + return errno != EFAULT; } } // namespace debugging_internal ABSL_NAMESPACE_END } // namespace absl -#endif +#endif // __linux__ && !__ANDROID__ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle.cc index 93ae32796c..f2832915bf 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle.cc @@ -151,12 +151,12 @@ static const AbbrevPair kSubstitutionList[] = { // State needed for demangling. This struct is copied in almost every stack // frame, so every byte counts. typedef struct { - int mangled_idx; // Cursor of mangled name. - int out_cur_idx; // Cursor of output string. - int prev_name_idx; // For constructors/destructors. - signed int prev_name_length : 16; // For constructors/destructors. - signed int nest_level : 15; // For nested names. - unsigned int append : 1; // Append flag. + int mangled_idx; // Cursor of mangled name. + int out_cur_idx; // Cursor of output string. + int prev_name_idx; // For constructors/destructors. + unsigned int prev_name_length : 16; // For constructors/destructors. + signed int nest_level : 15; // For nested names. + unsigned int append : 1; // Append flag. // Note: for some reason MSVC can't pack "bool append : 1" into the same int // with the above two fields, so we use an int instead. Amusingly it can pack // "signed bool" as expected, but relying on that to continue to be a legal @@ -235,8 +235,8 @@ static size_t StrLen(const char *str) { } // Returns true if "str" has at least "n" characters remaining. -static bool AtLeastNumCharsRemaining(const char *str, int n) { - for (int i = 0; i < n; ++i) { +static bool AtLeastNumCharsRemaining(const char *str, size_t n) { + for (size_t i = 0; i < n; ++i) { if (str[i] == '\0') { return false; } @@ -253,18 +253,20 @@ static bool StrPrefix(const char *str, const char *prefix) { return prefix[i] == '\0'; // Consumed everything in "prefix". } -static void InitState(State *state, const char *mangled, char *out, - int out_size) { +static void InitState(State* state, + const char* mangled, + char* out, + size_t out_size) { state->mangled_begin = mangled; state->out = out; - state->out_end_idx = out_size; + state->out_end_idx = static_cast(out_size); state->recursion_depth = 0; state->steps = 0; state->parse_state.mangled_idx = 0; state->parse_state.out_cur_idx = 0; state->parse_state.prev_name_idx = 0; - state->parse_state.prev_name_length = -1; + state->parse_state.prev_name_length = 0; state->parse_state.nest_level = -1; state->parse_state.append = true; } @@ -356,8 +358,8 @@ static bool ZeroOrMore(ParseFunc parse_func, State *state) { // Append "str" at "out_cur_idx". If there is an overflow, out_cur_idx is // set to out_end_idx+1. The output string is ensured to // always terminate with '\0' as long as there is no overflow. -static void Append(State *state, const char *const str, const int length) { - for (int i = 0; i < length; ++i) { +static void Append(State *state, const char *const str, const size_t length) { + for (size_t i = 0; i < length; ++i) { if (state->parse_state.out_cur_idx + 1 < state->out_end_idx) { // +1 for '\0' state->out[state->parse_state.out_cur_idx++] = str[i]; @@ -420,7 +422,7 @@ static bool EndsWith(State *state, const char chr) { // Append "str" with some tweaks, iff "append" state is true. static void MaybeAppendWithLength(State *state, const char *const str, - const int length) { + const size_t length) { if (state->parse_state.append && length > 0) { // Append a space if the output buffer ends with '<' and "str" // starts with '<' to avoid <<<. @@ -432,14 +434,14 @@ static void MaybeAppendWithLength(State *state, const char *const str, if (state->parse_state.out_cur_idx < state->out_end_idx && (IsAlpha(str[0]) || str[0] == '_')) { state->parse_state.prev_name_idx = state->parse_state.out_cur_idx; - state->parse_state.prev_name_length = length; + state->parse_state.prev_name_length = static_cast(length); } Append(state, str, length); } } // Appends a positive decimal number to the output if appending is enabled. -static bool MaybeAppendDecimal(State *state, unsigned int val) { +static bool MaybeAppendDecimal(State *state, int val) { // Max {32-64}-bit unsigned int is 20 digits. constexpr size_t kMaxLength = 20; char buf[kMaxLength]; @@ -451,12 +453,12 @@ static bool MaybeAppendDecimal(State *state, unsigned int val) { // one-past-the-end and manipulate one character before the pointer. char *p = &buf[kMaxLength]; do { // val=0 is the only input that should write a leading zero digit. - *--p = (val % 10) + '0'; + *--p = static_cast((val % 10) + '0'); val /= 10; } while (p > buf && val != 0); // 'p' landed on the last character we set. How convenient. - Append(state, p, kMaxLength - (p - buf)); + Append(state, p, kMaxLength - static_cast(p - buf)); } return true; @@ -466,7 +468,7 @@ static bool MaybeAppendDecimal(State *state, unsigned int val) { // Returns true so that it can be placed in "if" conditions. static bool MaybeAppend(State *state, const char *const str) { if (state->parse_state.append) { - int length = StrLen(str); + size_t length = StrLen(str); MaybeAppendWithLength(state, str, length); } return true; @@ -521,10 +523,10 @@ static void MaybeCancelLastSeparator(State *state) { // Returns true if the identifier of the given length pointed to by // "mangled_cur" is anonymous namespace. -static bool IdentifierIsAnonymousNamespace(State *state, int length) { +static bool IdentifierIsAnonymousNamespace(State *state, size_t length) { // Returns true if "anon_prefix" is a proper prefix of "mangled_cur". static const char anon_prefix[] = "_GLOBAL__N_"; - return (length > static_cast(sizeof(anon_prefix) - 1) && + return (length > (sizeof(anon_prefix) - 1) && StrPrefix(RemainingInput(state), anon_prefix)); } @@ -542,12 +544,13 @@ static bool ParseUnnamedTypeName(State *state); static bool ParseNumber(State *state, int *number_out); static bool ParseFloatNumber(State *state); static bool ParseSeqId(State *state); -static bool ParseIdentifier(State *state, int length); +static bool ParseIdentifier(State *state, size_t length); static bool ParseOperatorName(State *state, int *arity); static bool ParseSpecialName(State *state); static bool ParseCallOffset(State *state); static bool ParseNVOffset(State *state); static bool ParseVOffset(State *state); +static bool ParseAbiTags(State *state); static bool ParseCtorDtorName(State *state); static bool ParseDecltype(State *state); static bool ParseType(State *state); @@ -601,7 +604,7 @@ static bool ParseSubstitution(State *state, bool accept_std); // // Reference: // - Itanium C++ ABI -// +// // ::= _Z static bool ParseMangledName(State *state) { @@ -741,17 +744,42 @@ static bool ParsePrefix(State *state) { return true; } -// ::= -// ::= -// ::= -// ::= // GCC extension; see below. -// ::= +// ::= [] +// ::= [] +// ::= [] +// ::= [] +// ::= [] +// +// is a GCC extension; see below. static bool ParseUnqualifiedName(State *state) { ComplexityGuard guard(state); if (guard.IsTooComplex()) return false; - return (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) || - ParseSourceName(state) || ParseLocalSourceName(state) || - ParseUnnamedTypeName(state)); + if (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) || + ParseSourceName(state) || ParseLocalSourceName(state) || + ParseUnnamedTypeName(state)) { + return ParseAbiTags(state); + } + return false; +} + +// ::= [] +// ::= B +static bool ParseAbiTags(State *state) { + ComplexityGuard guard(state); + if (guard.IsTooComplex()) return false; + + while (ParseOneCharToken(state, 'B')) { + ParseState copy = state->parse_state; + MaybeAppend(state, "[abi:"); + + if (!ParseSourceName(state)) { + state->parse_state = copy; + return false; + } + MaybeAppend(state, "]"); + } + + return true; } // ::= @@ -760,7 +788,8 @@ static bool ParseSourceName(State *state) { if (guard.IsTooComplex()) return false; ParseState copy = state->parse_state; int length = -1; - if (ParseNumber(state, &length) && ParseIdentifier(state, length)) { + if (ParseNumber(state, &length) && + ParseIdentifier(state, static_cast(length))) { return true; } state->parse_state = copy; @@ -838,7 +867,7 @@ static bool ParseNumber(State *state, int *number_out) { uint64_t number = 0; for (; *p != '\0'; ++p) { if (IsDigit(*p)) { - number = number * 10 + (*p - '0'); + number = number * 10 + static_cast(*p - '0'); } else { break; } @@ -853,7 +882,7 @@ static bool ParseNumber(State *state, int *number_out) { state->parse_state.mangled_idx += p - RemainingInput(state); if (number_out != nullptr) { // Note: possibly truncate "number". - *number_out = number; + *number_out = static_cast(number); } return true; } @@ -897,10 +926,10 @@ static bool ParseSeqId(State *state) { } // ::= (of given length) -static bool ParseIdentifier(State *state, int length) { +static bool ParseIdentifier(State *state, size_t length) { ComplexityGuard guard(state); if (guard.IsTooComplex()) return false; - if (length < 0 || !AtLeastNumCharsRemaining(RemainingInput(state), length)) { + if (!AtLeastNumCharsRemaining(RemainingInput(state), length)) { return false; } if (IdentifierIsAnonymousNamespace(state, length)) { @@ -1947,7 +1976,7 @@ static bool Overflowed(const State *state) { } // The demangler entry point. -bool Demangle(const char *mangled, char *out, int out_size) { +bool Demangle(const char* mangled, char* out, size_t out_size) { State state; InitState(&state, mangled, out, out_size); return ParseTopLevelMangledName(&state) && !Overflowed(&state) && diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle.h index c314d9bc23..e1f156989f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle.h @@ -62,7 +62,7 @@ namespace debugging_internal { // Demangle `mangled`. On success, return true and write the // demangled symbol name to `out`. Otherwise, return false. // `out` is modified even if demangling is unsuccessful. -bool Demangle(const char *mangled, char *out, int out_size); +bool Demangle(const char* mangled, char* out, size_t out_size); } // namespace debugging_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle_test.cc index 6b142902ca..8463a2b7d1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/demangle_test.cc @@ -102,6 +102,30 @@ TEST(Demangle, Clones) { EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp))); } +// Test the GNU abi_tag extension. +TEST(Demangle, AbiTags) { + char tmp[80]; + + // Mangled name generated via: + // struct [[gnu::abi_tag("abc")]] A{}; + // A a; + EXPECT_TRUE(Demangle("_Z1aB3abc", tmp, sizeof(tmp))); + EXPECT_STREQ("a[abi:abc]", tmp); + + // Mangled name generated via: + // struct B { + // B [[gnu::abi_tag("xyz")]] (){}; + // }; + // B b; + EXPECT_TRUE(Demangle("_ZN1BC2B3xyzEv", tmp, sizeof(tmp))); + EXPECT_STREQ("B::B[abi:xyz]()", tmp); + + // Mangled name generated via: + // [[gnu::abi_tag("foo", "bar")]] void C() {} + EXPECT_TRUE(Demangle("_Z1CB3barB3foov", tmp, sizeof(tmp))); + EXPECT_STREQ("C[abi:bar][abi:foo]()", tmp); +} + // Tests that verify that Demangle footprint is within some limit. // They are not to be run under sanitizers as the sanitizers increase // stack consumption by about 4x. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc index 24cc01302d..42dcd3cde9 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc @@ -22,6 +22,7 @@ #include #include #include +#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" // From binutils/include/elf/common.h (this doesn't appear to be documented @@ -43,11 +44,11 @@ namespace debugging_internal { namespace { -#if __WORDSIZE == 32 +#if __SIZEOF_POINTER__ == 4 const int kElfClass = ELFCLASS32; int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); } int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); } -#elif __WORDSIZE == 64 +#elif __SIZEOF_POINTER__ == 8 const int kElfClass = ELFCLASS64; int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); } int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); } @@ -90,7 +91,7 @@ int ElfMemImage::GetNumSymbols() const { return 0; } // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash - return hash_[1]; + return static_cast(hash_[1]); } const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const { @@ -104,11 +105,9 @@ const ElfW(Versym) *ElfMemImage::GetVersym(int index) const { } const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const { - ABSL_RAW_CHECK(index < ehdr_->e_phnum, "index out of range"); - return GetTableElement(ehdr_, - ehdr_->e_phoff, - ehdr_->e_phentsize, - index); + ABSL_RAW_CHECK(index >= 0 && index < ehdr_->e_phnum, "index out of range"); + return GetTableElement(ehdr_, ehdr_->e_phoff, ehdr_->e_phentsize, + static_cast(index)); } const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const { @@ -158,7 +157,8 @@ void ElfMemImage::Init(const void *base) { hash_ = nullptr; strsize_ = 0; verdefnum_ = 0; - link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this. + // Sentinel: PT_LOAD .p_vaddr can't possibly be this. + link_base_ = ~ElfW(Addr){0}; // NOLINT(readability/braces) if (!base) { return; } @@ -175,17 +175,17 @@ void ElfMemImage::Init(const void *base) { } switch (base_as_char[EI_DATA]) { case ELFDATA2LSB: { - if (__LITTLE_ENDIAN != __BYTE_ORDER) { - assert(false); - return; - } +#ifndef ABSL_IS_LITTLE_ENDIAN + assert(false); + return; +#endif break; } case ELFDATA2MSB: { - if (__BIG_ENDIAN != __BYTE_ORDER) { - assert(false); - return; - } +#ifndef ABSL_IS_BIG_ENDIAN + assert(false); + return; +#endif break; } default: { @@ -217,11 +217,11 @@ void ElfMemImage::Init(const void *base) { } ptrdiff_t relocation = base_as_char - reinterpret_cast(link_base_); - ElfW(Dyn) *dynamic_entry = - reinterpret_cast(dynamic_program_header->p_vaddr + - relocation); + ElfW(Dyn)* dynamic_entry = reinterpret_cast( + static_cast(dynamic_program_header->p_vaddr) + relocation); for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) { - const ElfW(Xword) value = dynamic_entry->d_un.d_val + relocation; + const auto value = + static_cast(dynamic_entry->d_un.d_val) + relocation; switch (dynamic_entry->d_tag) { case DT_HASH: hash_ = reinterpret_cast(value); @@ -239,10 +239,10 @@ void ElfMemImage::Init(const void *base) { verdef_ = reinterpret_cast(value); break; case DT_VERDEFNUM: - verdefnum_ = dynamic_entry->d_un.d_val; + verdefnum_ = static_cast(dynamic_entry->d_un.d_val); break; case DT_STRSZ: - strsize_ = dynamic_entry->d_un.d_val; + strsize_ = static_cast(dynamic_entry->d_un.d_val); break; default: // Unrecognized entries explicitly ignored. @@ -350,7 +350,11 @@ void ElfMemImage::SymbolIterator::Update(int increment) { const ElfW(Versym) *version_symbol = image->GetVersym(index_); ABSL_RAW_CHECK(symbol && version_symbol, ""); const char *const symbol_name = image->GetDynstr(symbol->st_name); +#if defined(__NetBSD__) + const int version_index = version_symbol->vs_vers & VERSYM_VERSION; +#else const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION; +#endif const ElfW(Verdef) *version_definition = nullptr; const char *version_name = ""; if (symbol->st_shndx == SHN_UNDEF) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.h index 46bfade350..113071a9d1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.h @@ -31,8 +31,9 @@ #error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set #endif -#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \ - !defined(__asmjs__) && !defined(__wasm__) +#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \ + !defined(__native_client__) && !defined(__asmjs__) && \ + !defined(__wasm__) && !defined(__HAIKU__) #define ABSL_HAVE_ELF_MEM_IMAGE 1 #endif @@ -40,6 +41,10 @@ #include // for ElfW +#if defined(__FreeBSD__) && !defined(ElfW) +#define ElfW(x) __ElfN(x) +#endif + namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc index 589a3ef367..57863228d8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc @@ -20,7 +20,13 @@ #include #endif -#ifdef __APPLE__ +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_MMAP +#include +#endif + +#if defined(__linux__) || defined(__APPLE__) #include #endif @@ -37,10 +43,115 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { +namespace { +constexpr int kDefaultDumpStackFramesLimit = 64; +// The %p field width for printf() functions is two characters per byte, +// and two extra for the leading "0x". +constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*); + +ABSL_CONST_INIT SymbolizeUrlEmitter debug_stack_trace_hook = nullptr; + +// Async-signal safe mmap allocator. +void* Allocate(size_t num_bytes) { +#ifdef ABSL_HAVE_MMAP + void* p = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + return p == MAP_FAILED ? nullptr : p; +#else + (void)num_bytes; + return nullptr; +#endif // ABSL_HAVE_MMAP +} + +void Deallocate(void* p, size_t size) { +#ifdef ABSL_HAVE_MMAP + ::munmap(p, size); +#else + (void)p; + (void)size; +#endif // ABSL_HAVE_MMAP +} + +// Print a program counter only. +void DumpPC(OutputWriter* writer, void* writer_arg, void* const pc, + const char* const prefix) { + char buf[100]; + snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth, pc); + writer(buf, writer_arg); +} + +// Print a program counter and the corresponding stack frame size. +void DumpPCAndFrameSize(OutputWriter* writer, void* writer_arg, void* const pc, + int framesize, const char* const prefix) { + char buf[100]; + if (framesize <= 0) { + snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix, + kPrintfPointerFieldWidth, pc); + } else { + snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix, + kPrintfPointerFieldWidth, pc, framesize); + } + writer(buf, writer_arg); +} + +// Print a program counter and the corresponding symbol. +void DumpPCAndSymbol(OutputWriter* writer, void* writer_arg, void* const pc, + const char* const prefix) { + char tmp[1024]; + const char* symbol = "(unknown)"; + // Symbolizes the previous address of pc because pc may be in the + // next function. The overrun happens when the function ends with + // a call to a function annotated noreturn (e.g. CHECK). + // If symbolization of pc-1 fails, also try pc on the off-chance + // that we crashed on the first instruction of a function (that + // actually happens very often for e.g. __restore_rt). + const uintptr_t prev_pc = reinterpret_cast(pc) - 1; + if (absl::Symbolize(reinterpret_cast(prev_pc), tmp, + sizeof(tmp)) || + absl::Symbolize(pc, tmp, sizeof(tmp))) { + symbol = tmp; + } + char buf[1024]; + snprintf(buf, sizeof(buf), "%s@ %*p %s\n", prefix, kPrintfPointerFieldWidth, + pc, symbol); + writer(buf, writer_arg); +} + +// Print a program counter, its stack frame size, and its symbol name. +// Note that there is a separate symbolize_pc argument. Return addresses may be +// at the end of the function, and this allows the caller to back up from pc if +// appropriate. +void DumpPCAndFrameSizeAndSymbol(OutputWriter* writer, void* writer_arg, + void* const pc, void* const symbolize_pc, + int framesize, const char* const prefix) { + char tmp[1024]; + const char* symbol = "(unknown)"; + if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) { + symbol = tmp; + } + char buf[1024]; + if (framesize <= 0) { + snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix, + kPrintfPointerFieldWidth, pc, symbol); + } else { + snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix, + kPrintfPointerFieldWidth, pc, framesize, symbol); + } + writer(buf, writer_arg); +} + +} // namespace + +void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook) { + debug_stack_trace_hook = hook; +} + +SymbolizeUrlEmitter GetDebugStackTraceHook() { return debug_stack_trace_hook; } + // Returns the program counter from signal context, nullptr if // unknown. vuc is a ucontext_t*. We use void* to avoid the use of // ucontext_t on non-POSIX systems. -void* GetProgramCounter(void* vuc) { +void* GetProgramCounter(void* const vuc) { #ifdef __linux__ if (vuc != nullptr) { ucontext_t* context = reinterpret_cast(vuc); @@ -82,6 +193,8 @@ void* GetProgramCounter(void* vuc) { return reinterpret_cast(context->uc_mcontext.gregs[16]); #elif defined(__e2k__) return reinterpret_cast(context->uc_mcontext.cr0_hi); +#elif defined(__loongarch__) + return reinterpret_cast(context->uc_mcontext.__pc); #else #error "Undefined Architecture." #endif @@ -120,59 +233,17 @@ void* GetProgramCounter(void* vuc) { return nullptr; } -// The %p field width for printf() functions is two characters per byte, -// and two extra for the leading "0x". -static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*); - -// Print a program counter, its stack frame size, and its symbol name. -// Note that there is a separate symbolize_pc argument. Return addresses may be -// at the end of the function, and this allows the caller to back up from pc if -// appropriate. -static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*), - void* writerfn_arg, void* pc, - void* symbolize_pc, int framesize, - const char* const prefix) { - char tmp[1024]; - const char* symbol = "(unknown)"; - if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) { - symbol = tmp; - } - char buf[1024]; - if (framesize <= 0) { - snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix, - kPrintfPointerFieldWidth, pc, symbol); - } else { - snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix, - kPrintfPointerFieldWidth, pc, framesize, symbol); - } - writerfn(buf, writerfn_arg); -} - -// Print a program counter and the corresponding stack frame size. -static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*), - void* writerfn_arg, void* pc, int framesize, - const char* const prefix) { - char buf[100]; - if (framesize <= 0) { - snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix, - kPrintfPointerFieldWidth, pc); - } else { - snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix, - kPrintfPointerFieldWidth, pc, framesize); - } - writerfn(buf, writerfn_arg); -} - -void DumpPCAndFrameSizesAndStackTrace( - void* pc, void* const stack[], int frame_sizes[], int depth, - int min_dropped_frames, bool symbolize_stacktrace, - void (*writerfn)(const char*, void*), void* writerfn_arg) { +void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], + int frame_sizes[], int depth, + int min_dropped_frames, + bool symbolize_stacktrace, + OutputWriter* writer, void* writer_arg) { if (pc != nullptr) { // We don't know the stack frame size for PC, use 0. if (symbolize_stacktrace) { - DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: "); + DumpPCAndFrameSizeAndSymbol(writer, writer_arg, pc, pc, 0, "PC: "); } else { - DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: "); + DumpPCAndFrameSize(writer, writer_arg, pc, 0, "PC: "); } } for (int i = 0; i < depth; i++) { @@ -182,22 +253,65 @@ void DumpPCAndFrameSizesAndStackTrace( // call to a function annotated noreturn (e.g. CHECK). Note that we don't // do this for pc above, as the adjustment is only correct for return // addresses. - DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i], + DumpPCAndFrameSizeAndSymbol(writer, writer_arg, stack[i], reinterpret_cast(stack[i]) - 1, frame_sizes[i], " "); } else { - DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i], - " "); + DumpPCAndFrameSize(writer, writer_arg, stack[i], frame_sizes[i], " "); } } if (min_dropped_frames > 0) { char buf[100]; snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n", min_dropped_frames); - writerfn(buf, writerfn_arg); + writer(buf, writer_arg); } } +// Dump current stack trace as directed by writer. +// Make sure this function is not inlined to avoid skipping too many top frames. +ABSL_ATTRIBUTE_NOINLINE +void DumpStackTrace(int min_dropped_frames, int max_num_frames, + bool symbolize_stacktrace, OutputWriter* writer, + void* writer_arg) { + // Print stack trace + void* stack_buf[kDefaultDumpStackFramesLimit]; + void** stack = stack_buf; + int num_stack = kDefaultDumpStackFramesLimit; + size_t allocated_bytes = 0; + + if (num_stack >= max_num_frames) { + // User requested fewer frames than we already have space for. + num_stack = max_num_frames; + } else { + const size_t needed_bytes = + static_cast(max_num_frames) * sizeof(stack[0]); + void* p = Allocate(needed_bytes); + if (p != nullptr) { // We got the space. + num_stack = max_num_frames; + stack = reinterpret_cast(p); + allocated_bytes = needed_bytes; + } + } + + int depth = absl::GetStackTrace(stack, num_stack, min_dropped_frames + 1); + for (int i = 0; i < depth; i++) { + if (symbolize_stacktrace) { + DumpPCAndSymbol(writer, writer_arg, stack[static_cast(i)], + " "); + } else { + DumpPC(writer, writer_arg, stack[static_cast(i)], " "); + } + } + + auto hook = GetDebugStackTraceHook(); + if (hook != nullptr) { + (*hook)(stack, depth, writer, writer_arg); + } + + if (allocated_bytes != 0) Deallocate(stack, allocated_bytes); +} + } // namespace debugging_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/examine_stack.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/examine_stack.h index 393369131f..190af87f1c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/examine_stack.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/examine_stack.h @@ -23,17 +23,39 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { +// Type of function used for printing in stack trace dumping, etc. +// We avoid closures to keep things simple. +typedef void OutputWriter(const char*, void*); + +// RegisterDebugStackTraceHook() allows to register a single routine +// `hook` that is called each time DumpStackTrace() is called. +// `hook` may be called from a signal handler. +typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth, + OutputWriter* writer, void* writer_arg); + +// Registration of SymbolizeUrlEmitter for use inside of a signal handler. +// This is inherently unsafe and must be signal safe code. +void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook); +SymbolizeUrlEmitter GetDebugStackTraceHook(); + // Returns the program counter from signal context, or nullptr if // unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of // ucontext_t on non-POSIX systems. -void* GetProgramCounter(void* vuc); +void* GetProgramCounter(void* const vuc); -// Uses `writerfn` to dump the program counter, stack trace, and stack +// Uses `writer` to dump the program counter, stack trace, and stack // frame sizes. -void DumpPCAndFrameSizesAndStackTrace( - void* pc, void* const stack[], int frame_sizes[], int depth, - int min_dropped_frames, bool symbolize_stacktrace, - void (*writerfn)(const char*, void*), void* writerfn_arg); +void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], + int frame_sizes[], int depth, + int min_dropped_frames, + bool symbolize_stacktrace, + OutputWriter* writer, void* writer_arg); + +// Dump current stack trace omitting the topmost `min_dropped_frames` stack +// frames. +void DumpStackTrace(int min_dropped_frames, int max_num_frames, + bool symbolize_stacktrace, OutputWriter* writer, + void* writer_arg); } // namespace debugging_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc index f4859d7c21..71cdaf0940 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc @@ -19,7 +19,7 @@ #include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems #include "absl/debugging/stacktrace.h" -static const uintptr_t kUnknownFrameSize = 0; +static const size_t kUnknownFrameSize = 0; #if defined(__linux__) // Returns the address of the VDSO __kernel_rt_sigreturn function, if present. @@ -65,11 +65,12 @@ static const unsigned char* GetKernelRtSigreturnAddress() { // Compute the size of a stack frame in [low..high). We assume that // low < high. Return size of kUnknownFrameSize. template -static inline uintptr_t ComputeStackFrameSize(const T* low, - const T* high) { +static inline size_t ComputeStackFrameSize(const T* low, + const T* high) { const char* low_char_ptr = reinterpret_cast(low); const char* high_char_ptr = reinterpret_cast(high); - return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize; + return low < high ? static_cast(high_char_ptr - low_char_ptr) + : kUnknownFrameSize; } // Given a pointer to a stack frame, locate and return the calling @@ -110,15 +111,15 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc) { } #endif - // aarch64 ABI requires stack pointer to be 16-byte-aligned. - if ((reinterpret_cast(new_frame_pointer) & 15) != 0) + // The frame pointer should be 8-byte aligned. + if ((reinterpret_cast(new_frame_pointer) & 7) != 0) return nullptr; // Check frame size. In strict mode, we assume frames to be under // 100,000 bytes. In non-strict mode, we relax the limit to 1MB. if (check_frame_size) { - const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000; - const uintptr_t frame_size = + const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000; + const size_t frame_size = ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); if (frame_size == kUnknownFrameSize || frame_size > max_size) return nullptr; @@ -165,7 +166,8 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, } else { result[n] = prev_return_address; if (IS_STACK_FRAMES) { - sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer); + sizes[n] = static_cast( + ComputeStackFrameSize(frame_pointer, next_frame_pointer)); } n++; } @@ -176,12 +178,17 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 200; - int j = 0; - for (; frame_pointer != nullptr && j < kMaxUnwind; j++) { + int num_dropped_frames = 0; + for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } frame_pointer = NextStackFrame(frame_pointer, ucp); } - *min_dropped_frames = j; + *min_dropped_frames = num_dropped_frames; } return n; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc index 2a1bf2e886..102a2a1251 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc @@ -112,11 +112,16 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 200; - int j = 0; - for (; sp != nullptr && j < kMaxUnwind; j++) { + int num_dropped_frames = 0; + for (int j = 0; sp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } sp = NextStackFrame(sp); } - *min_dropped_frames = j; + *min_dropped_frames = num_dropped_frames; } return n; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_config.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_config.h index 29b26bdd65..3929b1b734 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_config.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_config.h @@ -35,9 +35,10 @@ // Thread local support required for UnwindImpl. #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" -#endif +#endif // defined(ABSL_HAVE_THREAD_LOCAL) -#elif defined(__EMSCRIPTEN__) +// Emscripten stacktraces rely on JS. Do not use them in standalone mode. +#elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM) #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_emscripten-inl.inc" @@ -55,7 +56,7 @@ // Note: When using glibc this may require -funwind-tables to function properly. #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" -#endif +#endif // __has_include() #elif defined(__i386__) || defined(__x86_64__) #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_x86-inl.inc" @@ -73,9 +74,10 @@ // Note: When using glibc this may require -funwind-tables to function properly. #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" -#endif -#endif -#endif +#endif // __has_include() +#endif // defined(__has_include) + +#endif // defined(__linux__) && !defined(__ANDROID__) // Fallback to the empty implementation. #if !defined(ABSL_STACKTRACE_INL_HEADER) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc index b2792a1f3a..5fa169a7ec 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc @@ -80,7 +80,7 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, if (IS_STACK_FRAMES) { // No implementation for finding out the stack frame sizes yet. - memset(sizes, 0, sizeof(*sizes) * result_count); + memset(sizes, 0, sizeof(*sizes) * static_cast(result_count)); } if (min_dropped_frames != nullptr) { if (size - skip_count - max_depth > 0) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc index cf8c05160c..085cef6702 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc @@ -231,11 +231,16 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 1000; - int j = 0; - for (; next_sp != nullptr && j < kMaxUnwind; j++) { + int num_dropped_frames = 0; + for (int j = 0; next_sp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } next_sp = NextStackFrame(next_sp, ucp); } - *min_dropped_frames = j; + *min_dropped_frames = num_dropped_frames; } return n; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc index 8cbc78548c..20183fa321 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc @@ -30,56 +30,14 @@ #include #include #include +#include +#include #include "absl/base/attributes.h" -#include "absl/debugging/internal/address_is_readable.h" -#include "absl/debugging/internal/vdso_support.h" #include "absl/debugging/stacktrace.h" static const uintptr_t kUnknownFrameSize = 0; -#if defined(__linux__) -// Returns the address of the VDSO __kernel_rt_sigreturn function, if present. -static const unsigned char *GetKernelRtSigreturnAddress() { - constexpr uintptr_t kImpossibleAddress = 0; - ABSL_CONST_INIT static std::atomic memoized(kImpossibleAddress); - uintptr_t address = memoized.load(std::memory_order_relaxed); - if (address != kImpossibleAddress) { - return reinterpret_cast(address); - } - - address = reinterpret_cast(nullptr); - -#if ABSL_HAVE_VDSO_SUPPORT - absl::debugging_internal::VDSOSupport vdso; - if (vdso.IsPresent()) { - absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; - // Symbol versioning pulled from arch/riscv/kernel/vdso/vdso.lds at v5.10. - auto lookup = [&](int type) { - return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_4.15", type, - &symbol_info); - }; - if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) || - symbol_info.address == nullptr) { - // Unexpected: VDSO is present, yet the expected symbol is missing or - // null. - assert(false && "VDSO is present, but doesn't have expected symbol"); - } else { - if (reinterpret_cast(symbol_info.address) != - kImpossibleAddress) { - address = reinterpret_cast(symbol_info.address); - } else { - assert(false && "VDSO returned invalid address"); - } - } - } -#endif - - memoized.store(address, std::memory_order_relaxed); - return reinterpret_cast(address); -} -#endif // __linux__ - // Compute the size of a stack frame in [low..high). We assume that low < high. // Return size of kUnknownFrameSize. template @@ -96,7 +54,8 @@ static inline uintptr_t ComputeStackFrameSize(const T *low, const T *high) { template ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. -static void ** NextStackFrame(void **old_frame_pointer, const void *uc) { +static void ** NextStackFrame(void **old_frame_pointer, const void *uc, + const std::pair range) { // . // . // . @@ -114,55 +73,43 @@ static void ** NextStackFrame(void **old_frame_pointer, const void *uc) { // $sp ->| ... | // +----------------+ void **new_frame_pointer = reinterpret_cast(old_frame_pointer[-2]); - bool check_frame_size = true; - -#if defined(__linux__) - if (WITH_CONTEXT && uc != nullptr) { - // Check to see if next frame's return address is __kernel_rt_sigreturn. - if (old_frame_pointer[-1] == GetKernelRtSigreturnAddress()) { - const ucontext_t *ucv = static_cast(uc); - // old_frame_pointer is not suitable for unwinding, look at ucontext to - // discover frame pointer before signal. - // - // RISCV ELF psABI has the frame pointer at x8/fp/s0. - // -- RISCV psABI Table 18.2 - void **const pre_signal_frame_pointer = - reinterpret_cast(ucv->uc_mcontext.__gregs[8]); - - // Check the alleged frame pointer is actually readable. This is to - // prevent "double fault" in case we hit the first fault due to stack - // corruption. - if (!absl::debugging_internal::AddressIsReadable( - pre_signal_frame_pointer)) - return nullptr; - - // Alleged frame pointer is readable, use it for further unwinding. - new_frame_pointer = pre_signal_frame_pointer; - - // Skip frame size check if we return from a signal. We may be using an - // alterate stack for signals. - check_frame_size = false; - } - } -#endif + uintptr_t frame_pointer = reinterpret_cast(new_frame_pointer); // The RISCV ELF psABI mandates that the stack pointer is always 16-byte // aligned. - // FIXME(abdulras) this doesn't hold for ILP32E which only mandates a 4-byte + // TODO(#1236) this doesn't hold for ILP32E which only mandates a 4-byte // alignment. - if ((reinterpret_cast(new_frame_pointer) & 15) != 0) + if (frame_pointer & 15) return nullptr; + // If the new frame pointer matches the signal context, avoid terminating + // early to deal with alternate signal stacks. + if (WITH_CONTEXT) + if (const ucontext_t *ucv = static_cast(uc)) + // RISCV ELF psABI has the frame pointer at x8/fp/s0. + // -- RISCV psABI Table 18.2 + if (ucv->uc_mcontext.__gregs[8] == frame_pointer) + return new_frame_pointer; + // Check frame size. In strict mode, we assume frames to be under 100,000 // bytes. In non-strict mode, we relax the limit to 1MB. - if (check_frame_size) { - const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000; - const uintptr_t frame_size = - ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); - if (frame_size == kUnknownFrameSize || frame_size > max_size) + const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000; + const uintptr_t frame_size = + ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); + if (frame_size == kUnknownFrameSize) { + if (STRICT_UNWINDING) + return nullptr; + + // In non-strict mode permit non-contiguous stacks (e.g. alternate signal + // frame handling). + if (reinterpret_cast(new_frame_pointer) < range.first || + reinterpret_cast(new_frame_pointer) > range.second) return nullptr; } + if (frame_size > max_size) + return nullptr; + return new_frame_pointer; } @@ -171,55 +118,65 @@ ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, const void *ucp, int *min_dropped_frames) { + // The `frame_pointer` that is computed here points to the top of the frame. + // The two words preceding the address are the return address and the previous + // frame pointer. #if defined(__GNUC__) void **frame_pointer = reinterpret_cast(__builtin_frame_address(0)); #else #error reading stack pointer not yet supported on this platform #endif - skip_count++; // Skip the frame for this function. + std::pair stack = { + // assume that the first page is not the stack. + static_cast(sysconf(_SC_PAGESIZE)), + std::numeric_limits::max() - sizeof(void *) + }; + int n = 0; - - // The `frame_pointer` that is computed here points to the top of the frame. - // The two words preceding the address are the return address and the previous - // frame pointer. To find a PC value associated with the current frame, we - // need to go down a level in the call chain. So we remember the return - // address of the last frame seen. This does not work for the first stack - // frame, which belongs to `UnwindImp()` but we skip the frame for - // `UnwindImp()` anyway. - void *prev_return_address = nullptr; - + void *return_address = nullptr; while (frame_pointer && n < max_depth) { - // The absl::GetStackFrames routine si called when we are in some + return_address = frame_pointer[-1]; + + // The absl::GetStackFrames routine is called when we are in some // informational context (the failure signal handler for example). Use the // non-strict unwinding rules to produce a stack trace that is as complete // as possible (even if it contains a few bogus entries in some rare cases). void **next_frame_pointer = - NextStackFrame(frame_pointer, ucp); + NextStackFrame(frame_pointer, ucp, + stack); if (skip_count > 0) { skip_count--; } else { - result[n] = prev_return_address; + result[n] = return_address; if (IS_STACK_FRAMES) { sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer); } n++; } - prev_return_address = frame_pointer[-1]; + frame_pointer = next_frame_pointer; } + if (min_dropped_frames != nullptr) { // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 200; - int j = 0; - for (; frame_pointer != nullptr && j < kMaxUnwind; j++) { + int num_dropped_frames = 0; + for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } frame_pointer = - NextStackFrame(frame_pointer, ucp); + NextStackFrame(frame_pointer, ucp, + stack); } - *min_dropped_frames = j; + *min_dropped_frames = num_dropped_frames; } + return n; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc index 1c666c8b56..ef2b973ec3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc @@ -63,11 +63,12 @@ static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = template static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, const void*, int* min_dropped_frames) { - int n = 0; - if (!RtlCaptureStackBackTrace_fn) { - // can't find a stacktrace with no function to call + USHORT n = 0; + if (!RtlCaptureStackBackTrace_fn || skip_count < 0 || max_depth < 0) { + // can't get a stacktrace with no function/invalid args } else { - n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0); + n = RtlCaptureStackBackTrace_fn(static_cast(skip_count) + 2, + static_cast(max_depth), result, 0); } if (IS_STACK_FRAMES) { // No implementation for finding out the stack frame sizes yet. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc index 70f79dfcb8..2f8bf428fb 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc @@ -27,15 +27,15 @@ #include #include +#include +#include "absl/base/attributes.h" #include "absl/base/macros.h" #include "absl/base/port.h" #include "absl/debugging/internal/address_is_readable.h" #include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems #include "absl/debugging/stacktrace.h" -#include "absl/base/internal/raw_logging.h" - using absl::debugging_internal::AddressIsReadable; #if defined(__linux__) && defined(__i386__) @@ -139,13 +139,14 @@ static uintptr_t GetFP(const void *vuc) { // TODO(bcmills): -momit-leaf-frame-pointer is currently the default // behavior when building with clang. Talk to the C++ toolchain team about // fixing that. - if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp; + if (bp >= sp && bp - sp <= kMaxFrameBytes) + return static_cast(bp); // If bp isn't a plausible frame pointer, return the stack pointer instead. // If we're lucky, it points to the start of a stack frame; otherwise, we'll // get one frame of garbage in the stack trace and fail the sanity check on // the next iteration. - return sp; + return static_cast(sp); } #endif return 0; @@ -158,7 +159,8 @@ static uintptr_t GetFP(const void *vuc) { template ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. -static void **NextStackFrame(void **old_fp, const void *uc) { +static void **NextStackFrame(void **old_fp, const void *uc, + size_t stack_low, size_t stack_high) { void **new_fp = (void **)*old_fp; #if defined(__linux__) && defined(__i386__) @@ -257,6 +259,18 @@ static void **NextStackFrame(void **old_fp, const void *uc) { // at a greater address that the current one. if (new_fp_u <= old_fp_u) return nullptr; if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr; + + if (stack_low < old_fp_u && old_fp_u <= stack_high) { + // Old BP was in the expected stack region... + if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { + // ... but new BP is outside of expected stack region. + // It is most likely bogus. + return nullptr; + } + } else { + // We may be here if we are executing in a co-routine with a + // separate stack. We can't do safety checks in this case. + } } else { if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below // In the non-strict mode, allow discontiguous stack frames. @@ -296,20 +310,27 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, int n = 0; void **fp = reinterpret_cast(__builtin_frame_address(0)); + // Assume that the first page is not stack. + size_t stack_low = static_cast(getpagesize()); + size_t stack_high = std::numeric_limits::max() - sizeof(void *); + while (fp && n < max_depth) { if (*(fp + 1) == reinterpret_cast(0)) { // In 64-bit code, we often see a frame that // points to itself and has a return address of 0. break; } - void **next_fp = NextStackFrame(fp, ucp); + void **next_fp = NextStackFrame( + fp, ucp, stack_low, stack_high); if (skip_count > 0) { skip_count--; } else { result[n] = *(fp + 1); if (IS_STACK_FRAMES) { if (next_fp > fp) { - sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp; + sizes[n] = static_cast( + reinterpret_cast(next_fp) - + reinterpret_cast(fp)); } else { // A frame-size of 0 is used to indicate unknown frame size. sizes[n] = 0; @@ -323,11 +344,17 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, // Implementation detail: we clamp the max of frames we are willing to // count, so as not to spend too much time in the loop below. const int kMaxUnwind = 1000; - int j = 0; - for (; fp != nullptr && j < kMaxUnwind; j++) { - fp = NextStackFrame(fp, ucp); + int num_dropped_frames = 0; + for (int j = 0; fp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + fp = NextStackFrame(fp, ucp, stack_low, + stack_high); } - *min_dropped_frames = j; + *min_dropped_frames = num_dropped_frames; } return n; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc index 6be16d9072..8a588eaffe 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc @@ -20,12 +20,25 @@ #ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h +#if !defined(__has_include) +#define __has_include(header) 0 +#endif + #include #include +#if __has_include() +#include +#elif __has_include() #include +#endif #include -#if __GLIBC_PREREQ(2, 16) // GLIBC-2.16 implements getauxval. +#if !defined(__UCLIBC__) && defined(__GLIBC__) && \ + (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16)) +#define ABSL_HAVE_GETAUXVAL +#endif + +#ifdef ABSL_HAVE_GETAUXVAL #include #endif @@ -37,6 +50,17 @@ #define AT_SYSINFO_EHDR 33 // for crosstoolv10 #endif +#if defined(__NetBSD__) +using Elf32_auxv_t = Aux32Info; +using Elf64_auxv_t = Aux64Info; +#endif +#if defined(__FreeBSD__) +#if defined(__ELF_WORD_SIZE) && __ELF_WORD_SIZE == 64 +using Elf64_auxv_t = Elf64_Auxinfo; +#endif +using Elf32_auxv_t = Elf32_Auxinfo; +#endif + namespace absl { ABSL_NAMESPACE_BEGIN namespace debugging_internal { @@ -45,7 +69,9 @@ ABSL_CONST_INIT std::atomic VDSOSupport::vdso_base_( debugging_internal::ElfMemImage::kInvalidBase); -std::atomic VDSOSupport::getcpu_fn_(&InitAndGetCPU); +ABSL_CONST_INIT std::atomic VDSOSupport::getcpu_fn_( + &InitAndGetCPU); + VDSOSupport::VDSOSupport() // If vdso_base_ is still set to kInvalidBase, we got here // before VDSOSupport::Init has been called. Call it now. @@ -65,7 +91,7 @@ VDSOSupport::VDSOSupport() // the operation should be idempotent. const void *VDSOSupport::Init() { const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase; -#if __GLIBC_PREREQ(2, 16) +#ifdef ABSL_HAVE_GETAUXVAL if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) { errno = 0; const void *const sysinfo_ehdr = @@ -74,7 +100,7 @@ const void *VDSOSupport::Init() { vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed); } } -#endif // __GLIBC_PREREQ(2, 16) +#endif // ABSL_HAVE_GETAUXVAL if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) { int fd = open("/proc/self/auxv", O_RDONLY); if (fd == -1) { @@ -86,8 +112,13 @@ const void *VDSOSupport::Init() { ElfW(auxv_t) aux; while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) { if (aux.a_type == AT_SYSINFO_EHDR) { +#if defined(__NetBSD__) + vdso_base_.store(reinterpret_cast(aux.a_v), + std::memory_order_relaxed); +#else vdso_base_.store(reinterpret_cast(aux.a_un.a_val), std::memory_order_relaxed); +#endif break; } } @@ -162,8 +193,9 @@ long VDSOSupport::InitAndGetCPU(unsigned *cpu, // NOLINT(runtime/int) ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY int GetCPU() { unsigned cpu; - int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr); - return ret_code == 0 ? cpu : ret_code; + long ret_code = // NOLINT(runtime/int) + (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr); + return ret_code == 0 ? static_cast(cpu) : static_cast(ret_code); } } // namespace debugging_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check.cc index 764ca0ad00..195e82bf16 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check.cc @@ -11,29 +11,19 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - +// // Wrappers around lsan_interface functions. -// When lsan is not linked in, these functions are not available, -// therefore Abseil code which depends on these functions is conditioned on the -// definition of LEAK_SANITIZER. -#include "absl/base/attributes.h" +// +// These are always-available run-time functions manipulating the LeakSanitizer, +// even when the lsan_interface (and LeakSanitizer) is not available. When +// LeakSanitizer is not linked in, these functions become no-op stubs. + #include "absl/debugging/leak_check.h" -#ifndef LEAK_SANITIZER +#include "absl/base/attributes.h" +#include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -bool HaveLeakSanitizer() { return false; } -bool LeakCheckerIsActive() { return false; } -void DoIgnoreLeak(const void*) { } -void RegisterLivePointers(const void*, size_t) { } -void UnRegisterLivePointers(const void*, size_t) { } -LeakCheckDisabler::LeakCheckDisabler() { } -LeakCheckDisabler::~LeakCheckDisabler() { } -ABSL_NAMESPACE_END -} // namespace absl - -#else +#if defined(ABSL_HAVE_LEAK_SANITIZER) #include @@ -66,4 +56,18 @@ LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); } ABSL_NAMESPACE_END } // namespace absl -#endif // LEAK_SANITIZER +#else // defined(ABSL_HAVE_LEAK_SANITIZER) + +namespace absl { +ABSL_NAMESPACE_BEGIN +bool HaveLeakSanitizer() { return false; } +bool LeakCheckerIsActive() { return false; } +void DoIgnoreLeak(const void*) { } +void RegisterLivePointers(const void*, size_t) { } +void UnRegisterLivePointers(const void*, size_t) { } +LeakCheckDisabler::LeakCheckDisabler() { } +LeakCheckDisabler::~LeakCheckDisabler() { } +ABSL_NAMESPACE_END +} // namespace absl + +#endif // defined(ABSL_HAVE_LEAK_SANITIZER) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check.h index 5fc2b052e4..eff162f67f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check.h @@ -24,7 +24,24 @@ // Note: this leak checking API is not yet supported in MSVC. // Leak checking is enabled by default in all ASan builds. // -// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// https://clang.llvm.org/docs/LeakSanitizer.html +// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// +// GCC and Clang both automatically enable LeakSanitizer when AddressSanitizer +// is enabled. To use the mode, simply pass `-fsanitize=address` to both the +// compiler and linker. An example Bazel command could be +// +// $ bazel test --copt=-fsanitize=address --linkopt=-fsanitize=address ... +// +// GCC and Clang auto support a standalone LeakSanitizer mode (a mode which does +// not also use AddressSanitizer). To use the mode, simply pass +// `-fsanitize=leak` to both the compiler and linker. Since GCC does not +// currently provide a way of detecting this mode at compile-time, GCC users +// must also pass -DLEAK_SANIITIZER to the compiler. An example Bazel command +// could be +// +// $ bazel test --copt=-DLEAK_SANITIZER --copt=-fsanitize=leak +// --linkopt=-fsanitize=leak ... // // ----------------------------------------------------------------------------- #ifndef ABSL_DEBUGGING_LEAK_CHECK_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check_test.cc index 9fcfc8e50b..6a42e31bad 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check_test.cc @@ -15,27 +15,24 @@ #include #include "gtest/gtest.h" +#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/debugging/leak_check.h" namespace { -TEST(LeakCheckTest, DetectLeakSanitizer) { -#ifdef ABSL_EXPECT_LEAK_SANITIZER - EXPECT_TRUE(absl::HaveLeakSanitizer()); - EXPECT_TRUE(absl::LeakCheckerIsActive()); -#else - EXPECT_FALSE(absl::HaveLeakSanitizer()); - EXPECT_FALSE(absl::LeakCheckerIsActive()); -#endif -} - TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) { + if (!absl::LeakCheckerIsActive()) { + GTEST_SKIP() << "LeakChecker is not active"; + } auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string")); ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str()); } TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) { + if (!absl::LeakCheckerIsActive()) { + GTEST_SKIP() << "LeakChecker is not active"; + } absl::LeakCheckDisabler disabler; auto foo = new std::string("some string leaked while checks are disabled"); ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str()); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/stacktrace_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/stacktrace_benchmark.cc new file mode 100644 index 0000000000..9360bafe34 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/stacktrace_benchmark.cc @@ -0,0 +1,55 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/debugging/stacktrace.h" +#include "benchmark/benchmark.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +static constexpr int kMaxStackDepth = 100; +static constexpr int kCacheSize = (1 << 16); +void* pcs[kMaxStackDepth]; + +ABSL_ATTRIBUTE_NOINLINE void func(benchmark::State& state, int x, int depth) { + if (x <= 0) { + // Touch a significant amount of memory so that the stack is likely to be + // not cached in the L1 cache. + state.PauseTiming(); + int* arr = new int[kCacheSize]; + for (int i = 0; i < kCacheSize; ++i) benchmark::DoNotOptimize(arr[i] = 100); + delete[] arr; + state.ResumeTiming(); + benchmark::DoNotOptimize(absl::GetStackTrace(pcs, depth, 0)); + return; + } + ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); + func(state, --x, depth); +} + +void BM_GetStackTrace(benchmark::State& state) { + int depth = state.range(0); + for (auto s : state) { + func(state, depth, depth); + } +} + +BENCHMARK(BM_GetStackTrace)->DenseRange(10, kMaxStackDepth, 10); +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize.cc index f1abdfda59..638d3954ae 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize.cc @@ -23,6 +23,11 @@ #endif #endif +// Emscripten symbolization relies on JS. Do not use them in standalone mode. +#if defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM) +#define ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM +#endif + #if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) #include "absl/debugging/symbolize_elf.inc" #elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32) @@ -31,7 +36,7 @@ #include "absl/debugging/symbolize_win32.inc" #elif defined(__APPLE__) #include "absl/debugging/symbolize_darwin.inc" -#elif defined(__EMSCRIPTEN__) +#elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM) #include "absl/debugging/symbolize_emscripten.inc" #else #include "absl/debugging/symbolize_unimplemented.inc" diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_darwin.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_darwin.inc index 443ce9efc4..cf63d1919b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_darwin.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_darwin.inc @@ -83,13 +83,14 @@ bool Symbolize(const void* pc, char* out, int out_size) { memmove(out, tmp_buf, len + 1); } } else { - strncpy(out, symbol.c_str(), out_size); + strncpy(out, symbol.c_str(), static_cast(out_size)); } if (out[out_size - 1] != '\0') { // strncpy() does not '\0' terminate when it truncates. static constexpr char kEllipsis[] = "..."; - int ellipsis_size = std::min(sizeof(kEllipsis) - 1, out_size - 1); + size_t ellipsis_size = + std::min(sizeof(kEllipsis) - 1, static_cast(out_size) - 1); memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); out[out_size - 1] = '\0'; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc index 87dbd078b9..ffb4eecfb9 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc @@ -77,6 +77,10 @@ #include "absl/debugging/internal/vdso_support.h" #include "absl/strings/string_view.h" +#if defined(__FreeBSD__) && !defined(ElfW) +#define ElfW(x) __ElfN(x) +#endif + namespace absl { ABSL_NAMESPACE_BEGIN @@ -201,7 +205,8 @@ struct ObjFile { // PT_LOAD program header describing executable code. // Normally we expect just one, but SWIFT binaries have two. - std::array phdr; + // CUDA binaries have 3 (see cr/473913254 description). + std::array phdr; }; // Build 4-way associative cache for symbols. Within each cache line, symbols @@ -248,21 +253,21 @@ class AddrMap { public: AddrMap() : size_(0), allocated_(0), obj_(nullptr) {} ~AddrMap() { base_internal::LowLevelAlloc::Free(obj_); } - int Size() const { return size_; } - ObjFile *At(int i) { return &obj_[i]; } + size_t Size() const { return size_; } + ObjFile *At(size_t i) { return &obj_[i]; } ObjFile *Add(); void Clear(); private: - int size_; // count of valid elements (<= allocated_) - int allocated_; // count of allocated elements - ObjFile *obj_; // array of allocated_ elements + size_t size_; // count of valid elements (<= allocated_) + size_t allocated_; // count of allocated elements + ObjFile *obj_; // array of allocated_ elements AddrMap(const AddrMap &) = delete; AddrMap &operator=(const AddrMap &) = delete; }; void AddrMap::Clear() { - for (int i = 0; i != size_; i++) { + for (size_t i = 0; i != size_; i++) { At(i)->~ObjFile(); } size_ = 0; @@ -270,7 +275,7 @@ void AddrMap::Clear() { ObjFile *AddrMap::Add() { if (size_ == allocated_) { - int new_allocated = allocated_ * 2 + 50; + size_t new_allocated = allocated_ * 2 + 50; ObjFile *new_obj_ = static_cast(base_internal::LowLevelAlloc::AllocWithArena( new_allocated * sizeof(*new_obj_), SigSafeArena())); @@ -296,7 +301,7 @@ class Symbolizer { private: char *CopyString(const char *s) { - int len = strlen(s); + size_t len = strlen(s); char *dst = static_cast( base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena())); ABSL_RAW_CHECK(dst != nullptr, "out of memory"); @@ -317,8 +322,9 @@ class Symbolizer { FindSymbolResult GetSymbolFromObjectFile(const ObjFile &obj, const void *const pc, const ptrdiff_t relocation, - char *out, int out_size, - char *tmp_buf, int tmp_buf_size); + char *out, size_t out_size, + char *tmp_buf, size_t tmp_buf_size); + const char *GetUncachedSymbol(const void *pc); enum { SYMBOL_BUF_SIZE = 3072, @@ -348,11 +354,11 @@ static std::atomic g_cached_symbolizer; } // namespace -static int SymbolizerSize() { +static size_t SymbolizerSize() { #if defined(__wasm__) || defined(__asmjs__) - int pagesize = getpagesize(); + auto pagesize = static_cast(getpagesize()); #else - int pagesize = sysconf(_SC_PAGESIZE); + auto pagesize = static_cast(sysconf(_SC_PAGESIZE)); #endif return ((sizeof(Symbolizer) - 1) / pagesize + 1) * pagesize; } @@ -424,7 +430,7 @@ static ssize_t ReadPersistent(int fd, void *buf, size_t count) { if (len == 0) { // Reached EOF. break; } - num_bytes += len; + num_bytes += static_cast(len); } SAFE_ASSERT(num_bytes <= count); return static_cast(num_bytes); @@ -437,8 +443,8 @@ static ssize_t ReadFromOffset(const int fd, void *buf, const size_t count, const off_t offset) { off_t off = lseek(fd, offset, SEEK_SET); if (off == (off_t)-1) { - ABSL_RAW_LOG(WARNING, "lseek(%d, %ju, SEEK_SET) failed: errno=%d", fd, - static_cast(offset), errno); + ABSL_RAW_LOG(WARNING, "lseek(%d, %jd, SEEK_SET) failed: errno=%d", fd, + static_cast(offset), errno); return -1; } return ReadPersistent(fd, buf, count); @@ -473,29 +479,37 @@ static int FileGetElfType(const int fd) { // inlined. static ABSL_ATTRIBUTE_NOINLINE bool GetSectionHeaderByType( const int fd, ElfW(Half) sh_num, const off_t sh_offset, ElfW(Word) type, - ElfW(Shdr) * out, char *tmp_buf, int tmp_buf_size) { + ElfW(Shdr) * out, char *tmp_buf, size_t tmp_buf_size) { ElfW(Shdr) *buf = reinterpret_cast(tmp_buf); - const int buf_entries = tmp_buf_size / sizeof(buf[0]); - const int buf_bytes = buf_entries * sizeof(buf[0]); + const size_t buf_entries = tmp_buf_size / sizeof(buf[0]); + const size_t buf_bytes = buf_entries * sizeof(buf[0]); - for (int i = 0; i < sh_num;) { - const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]); - const ssize_t num_bytes_to_read = + for (size_t i = 0; static_cast(i) < sh_num;) { + const size_t num_bytes_left = + (static_cast(sh_num) - i) * sizeof(buf[0]); + const size_t num_bytes_to_read = (buf_bytes > num_bytes_left) ? num_bytes_left : buf_bytes; - const off_t offset = sh_offset + i * sizeof(buf[0]); + const off_t offset = sh_offset + static_cast(i * sizeof(buf[0])); const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read, offset); - if (len % sizeof(buf[0]) != 0) { + if (len < 0) { ABSL_RAW_LOG( WARNING, - "Reading %zd bytes from offset %ju returned %zd which is not a " + "Reading %zu bytes from offset %ju returned %zd which is negative.", + num_bytes_to_read, static_cast(offset), len); + return false; + } + if (static_cast(len) % sizeof(buf[0]) != 0) { + ABSL_RAW_LOG( + WARNING, + "Reading %zu bytes from offset %jd returned %zd which is not a " "multiple of %zu.", - num_bytes_to_read, static_cast(offset), len, + num_bytes_to_read, static_cast(offset), len, sizeof(buf[0])); return false; } - const ssize_t num_headers_in_buf = len / sizeof(buf[0]); + const size_t num_headers_in_buf = static_cast(len) / sizeof(buf[0]); SAFE_ASSERT(num_headers_in_buf <= buf_entries); - for (int j = 0; j < num_headers_in_buf; ++j) { + for (size_t j = 0; j < num_headers_in_buf; ++j) { if (buf[j].sh_type == type) { *out = buf[j]; return true; @@ -519,8 +533,8 @@ bool ForEachSection(int fd, } ElfW(Shdr) shstrtab; - off_t shstrtab_offset = - (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx); + off_t shstrtab_offset = static_cast(elf_header.e_shoff) + + elf_header.e_shentsize * elf_header.e_shstrndx; if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) { return false; } @@ -528,22 +542,23 @@ bool ForEachSection(int fd, for (int i = 0; i < elf_header.e_shnum; ++i) { ElfW(Shdr) out; off_t section_header_offset = - (elf_header.e_shoff + elf_header.e_shentsize * i); + static_cast(elf_header.e_shoff) + elf_header.e_shentsize * i; if (!ReadFromOffsetExact(fd, &out, sizeof(out), section_header_offset)) { return false; } - off_t name_offset = shstrtab.sh_offset + out.sh_name; + off_t name_offset = static_cast(shstrtab.sh_offset) + out.sh_name; char header_name[kMaxSectionNameLen]; ssize_t n_read = ReadFromOffset(fd, &header_name, kMaxSectionNameLen, name_offset); - if (n_read == -1) { + if (n_read < 0) { return false; } else if (n_read > kMaxSectionNameLen) { // Long read? return false; } - absl::string_view name(header_name, strnlen(header_name, n_read)); + absl::string_view name(header_name, + strnlen(header_name, static_cast(n_read))); if (!callback(name, out)) { break; } @@ -570,19 +585,19 @@ bool GetSectionHeaderByName(int fd, const char *name, size_t name_len, } ElfW(Shdr) shstrtab; - off_t shstrtab_offset = - (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx); + off_t shstrtab_offset = static_cast(elf_header.e_shoff) + + elf_header.e_shentsize * elf_header.e_shstrndx; if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) { return false; } for (int i = 0; i < elf_header.e_shnum; ++i) { off_t section_header_offset = - (elf_header.e_shoff + elf_header.e_shentsize * i); + static_cast(elf_header.e_shoff) + elf_header.e_shentsize * i; if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) { return false; } - off_t name_offset = shstrtab.sh_offset + out->sh_name; + off_t name_offset = static_cast(shstrtab.sh_offset) + out->sh_name; ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset); if (n_read < 0) { return false; @@ -640,10 +655,10 @@ static bool InSection(const void *address, const ElfW(Shdr) * section) { } static const char *ComputeOffset(const char *base, ptrdiff_t offset) { - // Note: cast to uintptr_t to avoid undefined behavior when base evaluates to + // Note: cast to intptr_t to avoid undefined behavior when base evaluates to // zero and offset is non-zero. - return reinterpret_cast( - reinterpret_cast(base) + offset); + return reinterpret_cast(reinterpret_cast(base) + + offset); } // Read a symbol table and look for the symbol containing the @@ -656,18 +671,18 @@ static const char *ComputeOffset(const char *base, ptrdiff_t offset) { // To keep stack consumption low, we would like this function to not get // inlined. static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( - const void *const pc, const int fd, char *out, int out_size, + const void *const pc, const int fd, char *out, size_t out_size, ptrdiff_t relocation, const ElfW(Shdr) * strtab, const ElfW(Shdr) * symtab, - const ElfW(Shdr) * opd, char *tmp_buf, int tmp_buf_size) { + const ElfW(Shdr) * opd, char *tmp_buf, size_t tmp_buf_size) { if (symtab == nullptr) { return SYMBOL_NOT_FOUND; } // Read multiple symbols at once to save read() calls. ElfW(Sym) *buf = reinterpret_cast(tmp_buf); - const int buf_entries = tmp_buf_size / sizeof(buf[0]); + const size_t buf_entries = tmp_buf_size / sizeof(buf[0]); - const int num_symbols = symtab->sh_size / symtab->sh_entsize; + const size_t num_symbols = symtab->sh_size / symtab->sh_entsize; // On platforms using an .opd section (PowerPC & IA64), a function symbol // has the address of a function descriptor, which contains the real @@ -682,16 +697,19 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( ElfW(Sym) best_match; SafeMemZero(&best_match, sizeof(best_match)); bool found_match = false; - for (int i = 0; i < num_symbols;) { - off_t offset = symtab->sh_offset + i * symtab->sh_entsize; - const int num_remaining_symbols = num_symbols - i; - const int entries_in_chunk = std::min(num_remaining_symbols, buf_entries); - const int bytes_in_chunk = entries_in_chunk * sizeof(buf[0]); + for (size_t i = 0; i < num_symbols;) { + off_t offset = + static_cast(symtab->sh_offset + i * symtab->sh_entsize); + const size_t num_remaining_symbols = num_symbols - i; + const size_t entries_in_chunk = + std::min(num_remaining_symbols, buf_entries); + const size_t bytes_in_chunk = entries_in_chunk * sizeof(buf[0]); const ssize_t len = ReadFromOffset(fd, buf, bytes_in_chunk, offset); - SAFE_ASSERT(len % sizeof(buf[0]) == 0); - const ssize_t num_symbols_in_buf = len / sizeof(buf[0]); + SAFE_ASSERT(len >= 0); + SAFE_ASSERT(static_cast(len) % sizeof(buf[0]) == 0); + const size_t num_symbols_in_buf = static_cast(len) / sizeof(buf[0]); SAFE_ASSERT(num_symbols_in_buf <= entries_in_chunk); - for (int j = 0; j < num_symbols_in_buf; ++j) { + for (size_t j = 0; j < num_symbols_in_buf; ++j) { const ElfW(Sym) &symbol = buf[j]; // For a DSO, a symbol address is relocated by the loading address. @@ -708,7 +726,7 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( // about what encoding is being used; we just want the real start address // of the function. start_address = reinterpret_cast( - reinterpret_cast(start_address) & ~1); + reinterpret_cast(start_address) & ~1u); #endif if (deref_function_descriptor_pointer && @@ -721,7 +739,8 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( // If pc is inside the .opd section, it points to a function descriptor. const size_t size = pc_in_opd ? kFunctionDescriptorSize : symbol.st_size; - const void *const end_address = ComputeOffset(start_address, size); + const void *const end_address = + ComputeOffset(start_address, static_cast(size)); if (symbol.st_value != 0 && // Skip null value symbols. symbol.st_shndx != 0 && // Skip undefined symbols. #ifdef STT_TLS @@ -739,16 +758,18 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( } if (found_match) { - const size_t off = strtab->sh_offset + best_match.st_name; + const off_t off = + static_cast(strtab->sh_offset) + best_match.st_name; const ssize_t n_read = ReadFromOffset(fd, out, out_size, off); if (n_read <= 0) { // This should never happen. ABSL_RAW_LOG(WARNING, - "Unable to read from fd %d at offset %zu: n_read = %zd", fd, - off, n_read); + "Unable to read from fd %d at offset %lld: n_read = %zd", fd, + static_cast(off), n_read); return SYMBOL_NOT_FOUND; } - ABSL_RAW_CHECK(n_read <= out_size, "ReadFromOffset read too much data."); + ABSL_RAW_CHECK(static_cast(n_read) <= out_size, + "ReadFromOffset read too much data."); // strtab->sh_offset points into .strtab-like section that contains // NUL-terminated strings: '\0foo\0barbaz\0...". @@ -756,7 +777,7 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( // sh_offset+st_name points to the start of symbol name, but we don't know // how long the symbol is, so we try to read as much as we have space for, // and usually over-read (i.e. there is a NUL somewhere before n_read). - if (memchr(out, '\0', n_read) == nullptr) { + if (memchr(out, '\0', static_cast(n_read)) == nullptr) { // Either out_size was too small (n_read == out_size and no NUL), or // we tried to read past the EOF (n_read < out_size) and .strtab is // corrupt (missing terminating NUL; should never happen for valid ELF). @@ -774,7 +795,7 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( // See FindSymbol() comment for description of return value. FindSymbolResult Symbolizer::GetSymbolFromObjectFile( const ObjFile &obj, const void *const pc, const ptrdiff_t relocation, - char *out, int out_size, char *tmp_buf, int tmp_buf_size) { + char *out, size_t out_size, char *tmp_buf, size_t tmp_buf_size) { ElfW(Shdr) symtab; ElfW(Shdr) strtab; ElfW(Shdr) opd; @@ -797,13 +818,15 @@ FindSymbolResult Symbolizer::GetSymbolFromObjectFile( // Consult a regular symbol table, then fall back to the dynamic symbol table. for (const auto symbol_table_type : {SHT_SYMTAB, SHT_DYNSYM}) { if (!GetSectionHeaderByType(obj.fd, obj.elf_header.e_shnum, - obj.elf_header.e_shoff, symbol_table_type, + static_cast(obj.elf_header.e_shoff), + static_cast(symbol_table_type), &symtab, tmp_buf, tmp_buf_size)) { continue; } if (!ReadFromOffsetExact( obj.fd, &strtab, sizeof(strtab), - obj.elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) { + static_cast(obj.elf_header.e_shoff + + symtab.sh_link * sizeof(symtab)))) { continue; } const FindSymbolResult rc = @@ -828,7 +851,7 @@ class FileDescriptor { ~FileDescriptor() { if (fd_ >= 0) { - NO_INTR(close(fd_)); + close(fd_); } } @@ -845,7 +868,7 @@ class FileDescriptor { // and snprintf(). class LineReader { public: - explicit LineReader(int fd, char *buf, int buf_len) + explicit LineReader(int fd, char *buf, size_t buf_len) : fd_(fd), buf_len_(buf_len), buf_(buf), @@ -873,12 +896,12 @@ class LineReader { bol_ = eol_ + 1; // Advance to the next line in the buffer. SAFE_ASSERT(bol_ <= eod_); // "bol_" can point to "eod_". if (!HasCompleteLine()) { - const int incomplete_line_length = eod_ - bol_; + const auto incomplete_line_length = static_cast(eod_ - bol_); // Move the trailing incomplete line to the beginning. memmove(buf_, bol_, incomplete_line_length); // Read text from file and append it. char *const append_pos = buf_ + incomplete_line_length; - const int capacity_left = buf_len_ - incomplete_line_length; + const size_t capacity_left = buf_len_ - incomplete_line_length; const ssize_t num_bytes = ReadPersistent(fd_, append_pos, capacity_left); if (num_bytes <= 0) { // EOF or error. @@ -901,7 +924,8 @@ class LineReader { private: char *FindLineFeed() const { - return reinterpret_cast(memchr(bol_, '\n', eod_ - bol_)); + return reinterpret_cast( + memchr(bol_, '\n', static_cast(eod_ - bol_))); } bool BufferIsEmpty() const { return buf_ == eod_; } @@ -911,7 +935,7 @@ class LineReader { } const int fd_; - const int buf_len_; + const size_t buf_len_; char *const buf_; char *bol_; char *eol_; @@ -929,7 +953,8 @@ static const char *GetHex(const char *start, const char *end, int ch = *p; if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) { - hex = (hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9); + hex = (hex << 4) | + static_cast(ch < 'A' ? ch - '0' : (ch & 0xF) + 9); } else { // Encountered the first non-hex character. break; } @@ -961,7 +986,7 @@ static bool ShouldUseMapping(const char *const flags) { static ABSL_ATTRIBUTE_NOINLINE bool ReadAddrMap( bool (*callback)(const char *filename, const void *const start_addr, const void *const end_addr, uint64_t offset, void *arg), - void *arg, void *tmp_buf, int tmp_buf_size) { + void *arg, void *tmp_buf, size_t tmp_buf_size) { // Use /proc/self/task//maps instead of /proc/self/maps. The latter // requires kernel to stop all threads, and is significantly slower when there // are 1000s of threads. @@ -1076,10 +1101,10 @@ ObjFile *Symbolizer::FindObjFile(const void *const addr, size_t len) { } } - int lo = 0; - int hi = addr_map_.Size(); + size_t lo = 0; + size_t hi = addr_map_.Size(); while (lo < hi) { - int mid = (lo + hi) / 2; + size_t mid = (lo + hi) / 2; if (addr < addr_map_.At(mid)->end_addr) { hi = mid; } else { @@ -1101,11 +1126,11 @@ ObjFile *Symbolizer::FindObjFile(const void *const addr, size_t len) { } void Symbolizer::ClearAddrMap() { - for (int i = 0; i != addr_map_.Size(); i++) { + for (size_t i = 0; i != addr_map_.Size(); i++) { ObjFile *o = addr_map_.At(i); base_internal::LowLevelAlloc::Free(o->filename); if (o->fd >= 0) { - NO_INTR(close(o->fd)); + close(o->fd); } } addr_map_.Clear(); @@ -1121,7 +1146,7 @@ bool Symbolizer::RegisterObjFile(const char *filename, // Files are supposed to be added in the increasing address order. Make // sure that's the case. - int addr_map_size = impl->addr_map_.Size(); + size_t addr_map_size = impl->addr_map_.Size(); if (addr_map_size != 0) { ObjFile *old = impl->addr_map_.At(addr_map_size - 1); if (old->end_addr > end_addr) { @@ -1141,6 +1166,14 @@ bool Symbolizer::RegisterObjFile(const char *filename, reinterpret_cast(old->end_addr), old->filename); } return true; + } else if (old->end_addr == start_addr && + reinterpret_cast(old->start_addr) - old->offset == + reinterpret_cast(start_addr) - offset && + strcmp(old->filename, filename) == 0) { + // Two contiguous map entries that span a contiguous region of the file, + // perhaps because some part of the file was mlock()ed. Combine them. + old->end_addr = end_addr; + return true; } } ObjFile *obj = impl->addr_map_.Add(); @@ -1157,12 +1190,12 @@ bool Symbolizer::RegisterObjFile(const char *filename, // where the input symbol is demangled in-place. // To keep stack consumption low, we would like this function to not // get inlined. -static ABSL_ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size, +static ABSL_ATTRIBUTE_NOINLINE void DemangleInplace(char *out, size_t out_size, char *tmp_buf, - int tmp_buf_size) { + size_t tmp_buf_size) { if (Demangle(out, tmp_buf, tmp_buf_size)) { // Demangling succeeded. Copy to out if the space allows. - int len = strlen(tmp_buf); + size_t len = strlen(tmp_buf); if (len + 1 <= out_size) { // +1 for '\0'. SAFE_ASSERT(len < tmp_buf_size); memmove(out, tmp_buf, len + 1); @@ -1205,7 +1238,8 @@ const char *Symbolizer::InsertSymbolInCache(const void *const pc, SymbolCacheLine *line = GetCacheLine(pc); uint32_t max_age = 0; - int oldest_index = -1; + size_t oldest_index = 0; + bool found_oldest_index = false; for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) { if (line->pc[i] == nullptr) { AgeSymbols(line); @@ -1217,11 +1251,12 @@ const char *Symbolizer::InsertSymbolInCache(const void *const pc, if (line->age[i] >= max_age) { max_age = line->age[i]; oldest_index = i; + found_oldest_index = true; } } AgeSymbols(line); - ABSL_RAW_CHECK(oldest_index >= 0, "Corrupt cache"); + ABSL_RAW_CHECK(found_oldest_index, "Corrupt cache"); base_internal::LowLevelAlloc::Free(line->name[oldest_index]); line->pc[oldest_index] = pc; line->name[oldest_index] = CopyString(name); @@ -1290,7 +1325,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) { } const int phnum = obj->elf_header.e_phnum; const int phentsize = obj->elf_header.e_phentsize; - size_t phoff = obj->elf_header.e_phoff; + auto phoff = static_cast(obj->elf_header.e_phoff); size_t num_executable_load_segments = 0; for (int j = 0; j < phnum; j++) { ElfW(Phdr) phdr; @@ -1308,8 +1343,9 @@ static bool MaybeInitializeObjFile(ObjFile *obj) { if (num_executable_load_segments < obj->phdr.size()) { memcpy(&obj->phdr[num_executable_load_segments++], &phdr, sizeof(phdr)); } else { - ABSL_RAW_LOG(WARNING, "%s: too many executable LOAD segments", - obj->filename); + ABSL_RAW_LOG( + WARNING, "%s: too many executable LOAD segments: %zu >= %zu", + obj->filename, num_executable_load_segments, obj->phdr.size()); break; } } @@ -1329,13 +1365,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) { // they are called here as well. // To keep stack consumption low, we would like this function to not // get inlined. -const char *Symbolizer::GetSymbol(const void *const pc) { - const char *entry = FindSymbolInCache(pc); - if (entry != nullptr) { - return entry; - } - symbol_buf_[0] = '\0'; - +const char *Symbolizer::GetUncachedSymbol(const void *pc) { ObjFile *const obj = FindObjFile(pc, 1); ptrdiff_t relocation = 0; int fd = -1; @@ -1347,7 +1377,7 @@ const char *Symbolizer::GetSymbol(const void *const pc) { // // For obj->offset > 0, adjust the relocation since a mapping at offset // X in the file will have a start address of [true relocation]+X. - relocation = start_addr - obj->offset; + relocation = static_cast(start_addr - obj->offset); // Note: some binaries have multiple "rx" LOAD segments. We must // find the right one. @@ -1423,6 +1453,42 @@ const char *Symbolizer::GetSymbol(const void *const pc) { return InsertSymbolInCache(pc, symbol_buf_); } +const char *Symbolizer::GetSymbol(const void *pc) { + const char *entry = FindSymbolInCache(pc); + if (entry != nullptr) { + return entry; + } + symbol_buf_[0] = '\0'; + +#ifdef __hppa__ + { + // In some contexts (e.g., return addresses), PA-RISC uses the lowest two + // bits of the address to indicate the privilege level. Clear those bits + // before trying to symbolize. + const auto pc_bits = reinterpret_cast(pc); + const auto address = pc_bits & ~0x3; + entry = GetUncachedSymbol(reinterpret_cast(address)); + if (entry != nullptr) { + return entry; + } + + // In some contexts, PA-RISC also uses bit 1 of the address to indicate that + // this is a cross-DSO function pointer. Such function pointers actually + // point to a procedure label, a struct whose first 32-bit (pointer) element + // actually points to the function text. With no symbol found for this + // address so far, try interpreting it as a cross-DSO function pointer and + // see how that goes. + if (pc_bits & 0x2) { + return GetUncachedSymbol(*reinterpret_cast(address)); + } + + return nullptr; + } +#else + return GetUncachedSymbol(pc); +#endif +} + bool RemoveAllSymbolDecorators(void) { if (!g_decorators_mu.TryLock()) { // Someone else is using decorators. Get out. @@ -1486,7 +1552,7 @@ bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset ret = false; } else { // TODO(ckennelly): Move this into a string copy routine. - int len = strlen(filename); + size_t len = strlen(filename); char *dst = static_cast( base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena())); ABSL_RAW_CHECK(dst != nullptr, "out of memory"); @@ -1542,16 +1608,17 @@ bool Symbolize(const void *pc, char *out, int out_size) { const char *name = s->GetSymbol(pc); bool ok = false; if (name != nullptr && out_size > 0) { - strncpy(out, name, out_size); + strncpy(out, name, static_cast(out_size)); ok = true; - if (out[out_size - 1] != '\0') { + if (out[static_cast(out_size) - 1] != '\0') { // strncpy() does not '\0' terminate when it truncates. Do so, with // trailing ellipsis. static constexpr char kEllipsis[] = "..."; - int ellipsis_size = - std::min(implicit_cast(strlen(kEllipsis)), out_size - 1); - memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); - out[out_size - 1] = '\0'; + size_t ellipsis_size = + std::min(strlen(kEllipsis), static_cast(out_size) - 1); + memcpy(out + static_cast(out_size) - ellipsis_size - 1, kEllipsis, + ellipsis_size); + out[static_cast(out_size) - 1] = '\0'; } } debugging_internal::FreeSymbolizer(s); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_test.cc index c710a3da81..3165c6ede1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_test.cc @@ -392,12 +392,14 @@ TEST(Symbolize, InstallAndRemoveSymbolDecorators) { DummySymbolDecorator, &c_message), 0); - char *address = reinterpret_cast(1); - EXPECT_STREQ("abc", TrySymbolize(address++)); + // Use addresses 4 and 8 here to ensure that we always use valid addresses + // even on systems that require instructions to be 32-bit aligned. + char *address = reinterpret_cast(4); + EXPECT_STREQ("abc", TrySymbolize(address)); EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_b)); - EXPECT_STREQ("ac", TrySymbolize(address++)); + EXPECT_STREQ("ac", TrySymbolize(address + 4)); // Cleanup: remove all remaining decorators so other stack traces don't // get mystery "ac" decoration. @@ -481,7 +483,8 @@ void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() { } } -#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) +#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \ + ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP)) // Test that we correctly identify bounds of Thumb functions on ARM. // // Thumb functions have the lowest-order bit set in their addresses in the ELF @@ -500,6 +503,10 @@ void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() { // bit in the Thumb function's entry point. It will correctly compute the end of // the Thumb function, it will find no overlap between the Thumb and ARM // functions, and it will return the name of the ARM function. +// +// Unfortunately we cannot perform this test on armv6 or lower systems that use +// the hard float ABI because gcc refuses to compile thumb functions on such +// systems with a "sorry, unimplemented: Thumb-1 hard-float VFP ABI" error. __attribute__((target("thumb"))) int ArmThumbOverlapThumb(int x) { return x * x * x; @@ -519,7 +526,8 @@ void ABSL_ATTRIBUTE_NOINLINE TestArmThumbOverlap() { #endif } -#endif // defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) +#endif // defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && ((__ARM_ARCH >= 7) + // || !defined(__ARM_PCS_VFP)) #elif defined(_WIN32) #if !defined(ABSL_CONSUME_DLL) @@ -594,7 +602,8 @@ int main(int argc, char **argv) { TestWithPCInsideInlineFunction(); TestWithPCInsideNonInlineFunction(); TestWithReturnAddress(); -#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) +#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \ + ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP)) TestArmThumbOverlap(); #endif #endif diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_win32.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_win32.inc index c3df46f606..53a099a181 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_win32.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/symbolize_win32.inc @@ -65,14 +65,15 @@ bool Symbolize(const void* pc, char* out, int out_size) { if (!SymFromAddr(process, reinterpret_cast(pc), nullptr, symbol)) { return false; } - strncpy(out, symbol->Name, out_size); - if (out[out_size - 1] != '\0') { + const size_t out_size_t = static_cast(out_size); + strncpy(out, symbol->Name, out_size_t); + if (out[out_size_t - 1] != '\0') { // strncpy() does not '\0' terminate when it truncates. static constexpr char kEllipsis[] = "..."; - int ellipsis_size = - std::min(sizeof(kEllipsis) - 1, out_size - 1); - memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); - out[out_size - 1] = '\0'; + size_t ellipsis_size = + std::min(sizeof(kEllipsis) - 1, out_size_t - 1); + memcpy(out + out_size_t - ellipsis_size - 1, kEllipsis, ellipsis_size); + out[out_size_t - 1] = '\0'; } return true; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/CMakeLists.txt index 956f70f868..3e9d5adff8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/CMakeLists.txt @@ -87,6 +87,7 @@ absl_cc_library( absl::config absl::core_headers absl::log_severity + absl::optional absl::strings absl::str_format ) @@ -105,6 +106,7 @@ absl_cc_library( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::config + absl::dynamic_annotations absl::fast_type_id ) @@ -202,6 +204,7 @@ absl_cc_library( HDRS "declare.h" "flag.h" + "internal/flag_msvc.inc" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS @@ -463,5 +466,5 @@ absl_cc_test( absl::flags_reflection absl::flags_usage absl::strings - GTest::gtest + GTest::gmock ) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/config.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/config.h index 5ab1f311dc..14c4235bb3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/config.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/config.h @@ -45,14 +45,6 @@ #define ABSL_FLAGS_STRIP_HELP ABSL_FLAGS_STRIP_NAMES #endif -// ABSL_FLAGS_INTERNAL_HAS_RTTI macro is used for selecting if we can use RTTI -// for flag type identification. -#ifdef ABSL_FLAGS_INTERNAL_HAS_RTTI -#error ABSL_FLAGS_INTERNAL_HAS_RTTI cannot be directly set -#elif !defined(__GNUC__) || defined(__GXX_RTTI) -#define ABSL_FLAGS_INTERNAL_HAS_RTTI 1 -#endif // !defined(__GNUC__) || defined(__GXX_RTTI) - // These macros represent the "source of truth" for the list of supported // built-in types. #define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/declare.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/declare.h index b9794d8b85..d1437bb9f6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/declare.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/declare.h @@ -60,6 +60,14 @@ ABSL_NAMESPACE_END // The ABSL_DECLARE_FLAG(type, name) macro expands to: // // extern absl::Flag FLAGS_name; -#define ABSL_DECLARE_FLAG(type, name) extern ::absl::Flag FLAGS_##name +#define ABSL_DECLARE_FLAG(type, name) ABSL_DECLARE_FLAG_INTERNAL(type, name) + +// Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its +// arguments. Clients must use ABSL_DECLARE_FLAG instead. +#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ + extern absl::Flag FLAGS_##name; \ + namespace absl /* block flags in namespaces */ {} \ + /* second redeclaration is to allow applying attributes */ \ + extern absl::Flag FLAGS_##name #endif // ABSL_FLAGS_DECLARE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/flag.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/flag.h index 14209e7ba7..b7f94be7c5 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/flag.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/flag.h @@ -67,105 +67,15 @@ ABSL_NAMESPACE_BEGIN // ABSL_FLAG(int, count, 0, "Count of items to process"); // // No public methods of `absl::Flag` are part of the Abseil Flags API. +// +// For type support of Abseil Flags, see the marshalling.h header file, which +// discusses supported standard types, optional flags, and additional Abseil +// type support. #if !defined(_MSC_VER) || defined(__clang__) template using Flag = flags_internal::Flag; #else -// MSVC debug builds do not implement initialization with constexpr constructors -// correctly. To work around this we add a level of indirection, so that the -// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias -// to that class) and dynamically allocates an instance when necessary. We also -// forward all calls to internal::Flag methods via trampoline methods. In this -// setup the `absl::Flag` class does not have constructor and virtual methods, -// all the data members are public and thus MSVC is able to initialize it at -// link time. To deal with multiple threads accessing the flag for the first -// time concurrently we use an atomic boolean indicating if flag object is -// initialized. We also employ the double-checked locking pattern where the -// second level of protection is a global Mutex, so if two threads attempt to -// construct the flag concurrently only one wins. -// This solution is based on a recomendation here: -// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454 - -namespace flags_internal { -absl::Mutex* GetGlobalConstructionGuard(); -} // namespace flags_internal - -template -class Flag { - public: - // No constructor and destructor to ensure this is an aggregate type. - // Visual Studio 2015 still requires the constructor for class to be - // constexpr initializable. -#if _MSC_VER <= 1900 - constexpr Flag(const char* name, const char* filename, - const flags_internal::HelpGenFunc help_gen, - const flags_internal::FlagDfltGenFunc default_value_gen) - : name_(name), - filename_(filename), - help_gen_(help_gen), - default_value_gen_(default_value_gen), - inited_(false), - impl_(nullptr) {} -#endif - - flags_internal::Flag& GetImpl() const { - if (!inited_.load(std::memory_order_acquire)) { - absl::MutexLock l(flags_internal::GetGlobalConstructionGuard()); - - if (inited_.load(std::memory_order_acquire)) { - return *impl_; - } - - impl_ = new flags_internal::Flag( - name_, filename_, - {flags_internal::FlagHelpMsg(help_gen_), - flags_internal::FlagHelpKind::kGenFunc}, - {flags_internal::FlagDefaultSrc(default_value_gen_), - flags_internal::FlagDefaultKind::kGenFunc}); - inited_.store(true, std::memory_order_release); - } - - return *impl_; - } - - // Public methods of `absl::Flag` are NOT part of the Abseil Flags API. - // See https://abseil.io/docs/cpp/guides/flags - bool IsRetired() const { return GetImpl().IsRetired(); } - absl::string_view Name() const { return GetImpl().Name(); } - std::string Help() const { return GetImpl().Help(); } - bool IsModified() const { return GetImpl().IsModified(); } - bool IsSpecifiedOnCommandLine() const { - return GetImpl().IsSpecifiedOnCommandLine(); - } - std::string Filename() const { return GetImpl().Filename(); } - std::string DefaultValue() const { return GetImpl().DefaultValue(); } - std::string CurrentValue() const { return GetImpl().CurrentValue(); } - template - inline bool IsOfType() const { - return GetImpl().template IsOfType(); - } - T Get() const { - return flags_internal::FlagImplPeer::InvokeGet(GetImpl()); - } - void Set(const T& v) { - flags_internal::FlagImplPeer::InvokeSet(GetImpl(), v); - } - void InvokeCallback() { GetImpl().InvokeCallback(); } - - const CommandLineFlag& Reflect() const { - return flags_internal::FlagImplPeer::InvokeReflect(GetImpl()); - } - - // The data members are logically private, but they need to be public for - // this to be an aggregate type. - const char* name_; - const char* filename_; - const flags_internal::HelpGenFunc help_gen_; - const flags_internal::FlagDfltGenFunc default_value_gen_; - - mutable std::atomic inited_; - mutable flags_internal::Flag* impl_; -}; +#include "absl/flags/internal/flag_msvc.inc" #endif // GetFlag() @@ -335,8 +245,8 @@ ABSL_NAMESPACE_END /* default value argument. That keeps temporaries alive */ \ /* long enough for NonConst to work correctly. */ \ static constexpr absl::string_view Value( \ - absl::string_view v = ABSL_FLAG_IMPL_FLAGHELP(txt)) { \ - return v; \ + absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt)) { \ + return absl_flag_help; \ } \ static std::string NonConst() { return std::string(Value()); } \ }; \ @@ -348,8 +258,8 @@ ABSL_NAMESPACE_END #define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ struct AbslFlagDefaultGenFor##name { \ Type value = absl::flags_internal::InitDefaultValue(default_value); \ - static void Gen(void* p) { \ - new (p) Type(AbslFlagDefaultGenFor##name{}.value); \ + static void Gen(void* absl_flag_default_loc) { \ + new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value); \ } \ }; @@ -359,6 +269,7 @@ ABSL_NAMESPACE_END // global name for FLAGS_no symbol, thus preventing the possibility // of defining two flags with names foo and nofoo. #define ABSL_FLAG_IMPL(Type, name, default_value, help) \ + extern ::absl::Flag FLAGS_##name; \ namespace absl /* block flags in namespaces */ {} \ ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/flag_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/flag_test.cc index 6e974a5b5e..845b4ebac6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/flag_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/flag_test.cc @@ -854,7 +854,9 @@ ABSL_RETIRED_FLAG(bool, old_bool_flag, true, "old descr"); ABSL_RETIRED_FLAG(int, old_int_flag, (int)std::sqrt(10), "old descr"); ABSL_RETIRED_FLAG(std::string, old_str_flag, "", absl::StrCat("old ", "descr")); -bool initializaion_order_fiasco_test = [] { +namespace { + +bool initialization_order_fiasco_test ABSL_ATTRIBUTE_UNUSED = [] { // Iterate over all the flags during static initialization. // This should not trigger ASan's initialization-order-fiasco. auto* handle1 = absl::FindCommandLineFlag("flag_on_separate_file"); @@ -865,8 +867,6 @@ bool initializaion_order_fiasco_test = [] { return true; }(); -namespace { - TEST_F(FlagTest, TestRetiredFlagRegistration) { auto* handle = absl::FindCommandLineFlag("old_bool_flag"); EXPECT_TRUE(handle->IsOfType()); @@ -977,3 +977,190 @@ TEST_F(FlagTest, TesTypeWrappingEnum) { value = absl::GetFlag(FLAGS_test_enum_wrapper_flag); EXPECT_EQ(value.e, B); } + +// This is a compile test to ensure macros are expanded within ABSL_FLAG and +// ABSL_DECLARE_FLAG. +#define FLAG_NAME_MACRO(name) prefix_ ## name +ABSL_DECLARE_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag)); +ABSL_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag), 0, + "Testing macro expansion within ABSL_FLAG"); + +TEST_F(FlagTest, MacroWithinAbslFlag) { + EXPECT_EQ(absl::GetFlag(FLAGS_prefix_test_macro_named_flag), 0); + absl::SetFlag(&FLAGS_prefix_test_macro_named_flag, 1); + EXPECT_EQ(absl::GetFlag(FLAGS_prefix_test_macro_named_flag), 1); +} + +// -------------------------------------------------------------------- + +#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ <= 5 +#define ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG +#endif + +#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG +ABSL_FLAG(absl::optional, optional_bool, absl::nullopt, "help"); +#endif +ABSL_FLAG(absl::optional, optional_int, {}, "help"); +ABSL_FLAG(absl::optional, optional_double, 9.3, "help"); +ABSL_FLAG(absl::optional, optional_string, absl::nullopt, "help"); +ABSL_FLAG(absl::optional, optional_duration, absl::nullopt, + "help"); +ABSL_FLAG(absl::optional>, optional_optional_int, + absl::nullopt, "help"); +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) +ABSL_FLAG(std::optional, std_optional_int64, std::nullopt, "help"); +#endif + +namespace { + +#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG +TEST_F(FlagTest, TestOptionalBool) { + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt); + + absl::SetFlag(&FLAGS_optional_bool, false); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_bool).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), false); + + absl::SetFlag(&FLAGS_optional_bool, true); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_bool).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), true); + + absl::SetFlag(&FLAGS_optional_bool, absl::nullopt); + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt); +} + +// -------------------------------------------------------------------- +#endif + +TEST_F(FlagTest, TestOptionalInt) { + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), absl::nullopt); + + absl::SetFlag(&FLAGS_optional_int, 0); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_int).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), 0); + + absl::SetFlag(&FLAGS_optional_int, 10); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_int).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), 10); + + absl::SetFlag(&FLAGS_optional_int, absl::nullopt); + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), absl::nullopt); +} + +// -------------------------------------------------------------------- + +TEST_F(FlagTest, TestOptionalDouble) { + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value()); + EXPECT_DOUBLE_EQ(*absl::GetFlag(FLAGS_optional_double), 9.3); + + absl::SetFlag(&FLAGS_optional_double, 0.0); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_double), 0.0); + + absl::SetFlag(&FLAGS_optional_double, 1.234); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value()); + EXPECT_DOUBLE_EQ(*absl::GetFlag(FLAGS_optional_double), 1.234); + + absl::SetFlag(&FLAGS_optional_double, absl::nullopt); + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_double).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_double), absl::nullopt); +} + +// -------------------------------------------------------------------- + +TEST_F(FlagTest, TestOptionalString) { + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_string).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), absl::nullopt); + + // Setting optional string to "" leads to undefined behavior. + + absl::SetFlag(&FLAGS_optional_string, " "); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_string).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), " "); + + absl::SetFlag(&FLAGS_optional_string, "QWERTY"); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_string).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), "QWERTY"); + + absl::SetFlag(&FLAGS_optional_string, absl::nullopt); + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_string).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), absl::nullopt); +} + +// -------------------------------------------------------------------- + +TEST_F(FlagTest, TestOptionalDuration) { + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_duration).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::nullopt); + + absl::SetFlag(&FLAGS_optional_duration, absl::ZeroDuration()); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_duration).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::Seconds(0)); + + absl::SetFlag(&FLAGS_optional_duration, absl::Hours(3)); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_duration).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::Hours(3)); + + absl::SetFlag(&FLAGS_optional_duration, absl::nullopt); + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_duration).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::nullopt); +} + +// -------------------------------------------------------------------- + +TEST_F(FlagTest, TestOptionalOptional) { + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::nullopt); + + absl::optional nullint{absl::nullopt}; + + absl::SetFlag(&FLAGS_optional_optional_int, nullint); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); + EXPECT_NE(absl::GetFlag(FLAGS_optional_optional_int), nullint); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), + absl::optional>{nullint}); + + absl::SetFlag(&FLAGS_optional_optional_int, 0); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), 0); + + absl::SetFlag(&FLAGS_optional_optional_int, absl::optional{0}); + EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), 0); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::optional{0}); + + absl::SetFlag(&FLAGS_optional_optional_int, absl::nullopt); + EXPECT_FALSE(absl::GetFlag(FLAGS_optional_optional_int).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::nullopt); +} + +// -------------------------------------------------------------------- + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) + +TEST_F(FlagTest, TestStdOptional) { + EXPECT_FALSE(absl::GetFlag(FLAGS_std_optional_int64).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), std::nullopt); + + absl::SetFlag(&FLAGS_std_optional_int64, 0); + EXPECT_TRUE(absl::GetFlag(FLAGS_std_optional_int64).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), 0); + + absl::SetFlag(&FLAGS_std_optional_int64, 0xFFFFFFFFFF16); + EXPECT_TRUE(absl::GetFlag(FLAGS_std_optional_int64).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), 0xFFFFFFFFFF16); + + absl::SetFlag(&FLAGS_std_optional_int64, std::nullopt); + EXPECT_FALSE(absl::GetFlag(FLAGS_std_optional_int64).has_value()); + EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), std::nullopt); +} + +// -------------------------------------------------------------------- + +#endif + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag.cc index 1515022d11..cc656f9d13 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag.cc @@ -30,6 +30,7 @@ #include "absl/base/call_once.h" #include "absl/base/casts.h" #include "absl/base/config.h" +#include "absl/base/dynamic_annotations.h" #include "absl/base/optimization.h" #include "absl/flags/config.h" #include "absl/flags/internal/commandlineflag.h" @@ -160,6 +161,8 @@ void FlagImpl::Init() { std::memcpy(buf.data() + Sizeof(op_), &initialized, sizeof(initialized)); } + // Type can contain valid uninitialized bits, e.g. padding. + ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buf.data(), buf.size()); OneWordValue().store(absl::bit_cast(buf), std::memory_order_release); break; @@ -205,7 +208,7 @@ void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id, if (lhs_runtime_type_id == rhs_runtime_type_id) return; -#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI) +#ifdef ABSL_INTERNAL_HAS_RTTI if (*lhs_runtime_type_id == *rhs_runtime_type_id) return; #endif @@ -403,7 +406,7 @@ template StorageT* FlagImpl::OffsetValue() const { char* p = reinterpret_cast(const_cast(this)); // The offset is deduced via Flag value type specific op_. - size_t offset = flags_internal::ValueOffset(op_); + ptrdiff_t offset = flags_internal::ValueOffset(op_); return reinterpret_cast(p + offset); } @@ -483,7 +486,7 @@ bool FlagImpl::ReadOneBool() const { } void FlagImpl::ReadSequenceLockedData(void* dst) const { - int size = Sizeof(op_); + size_t size = Sizeof(op_); // Attempt to read using the sequence lock. if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) { return; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag.h index 8636fadcdb..6154638c41 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag.h @@ -163,7 +163,7 @@ inline ptrdiff_t ValueOffset(FlagOpFn op) { // Returns an address of RTTI's typeid(T). template inline const std::type_info* GenRuntimeTypeId() { -#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI) +#ifdef ABSL_INTERNAL_HAS_RTTI return &typeid(T); #else return nullptr; @@ -290,7 +290,7 @@ constexpr T InitDefaultValue(EmptyBraces) { template ::value, int>::type = - (GenT{}, 0)> + ((void)GenT{}, 0)> constexpr FlagDefaultArg DefaultArg(int) { return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord}; } @@ -303,7 +303,9 @@ constexpr FlagDefaultArg DefaultArg(char) { /////////////////////////////////////////////////////////////////////////////// // Flag current value auxiliary structs. -constexpr int64_t UninitializedFlagValue() { return 0xababababababababll; } +constexpr int64_t UninitializedFlagValue() { + return static_cast(0xababababababababll); +} template using FlagUseValueAndInitBitStorage = std::integral_constant< @@ -755,8 +757,8 @@ void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) { case FlagOp::kValueOffset: { // Round sizeof(FlagImp) to a multiple of alignof(FlagValue) to get the // offset of the data. - ptrdiff_t round_to = alignof(FlagValue); - ptrdiff_t offset = + size_t round_to = alignof(FlagValue); + size_t offset = (sizeof(FlagImpl) + round_to - 1) / round_to * round_to; return reinterpret_cast(offset); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag_msvc.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag_msvc.inc new file mode 100644 index 0000000000..c31bd27fd8 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/flag_msvc.inc @@ -0,0 +1,116 @@ +// +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Do not include this file directly. +// Include absl/flags/flag.h instead. + +// MSVC debug builds do not implement initialization with constexpr constructors +// correctly. To work around this we add a level of indirection, so that the +// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias +// to that class) and dynamically allocates an instance when necessary. We also +// forward all calls to internal::Flag methods via trampoline methods. In this +// setup the `absl::Flag` class does not have constructor and virtual methods, +// all the data members are public and thus MSVC is able to initialize it at +// link time. To deal with multiple threads accessing the flag for the first +// time concurrently we use an atomic boolean indicating if flag object is +// initialized. We also employ the double-checked locking pattern where the +// second level of protection is a global Mutex, so if two threads attempt to +// construct the flag concurrently only one wins. +// +// This solution is based on a recomendation here: +// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454 + +namespace flags_internal { +absl::Mutex* GetGlobalConstructionGuard(); +} // namespace flags_internal + +// Public methods of `absl::Flag` are NOT part of the Abseil Flags API. +// See https://abseil.io/docs/cpp/guides/flags +template +class Flag { + public: + // No constructor and destructor to ensure this is an aggregate type. + // Visual Studio 2015 still requires the constructor for class to be + // constexpr initializable. +#if _MSC_VER <= 1900 + constexpr Flag(const char* name, const char* filename, + const flags_internal::HelpGenFunc help_gen, + const flags_internal::FlagDfltGenFunc default_value_gen) + : name_(name), + filename_(filename), + help_gen_(help_gen), + default_value_gen_(default_value_gen), + inited_(false), + impl_(nullptr) {} +#endif + + flags_internal::Flag& GetImpl() const { + if (!inited_.load(std::memory_order_acquire)) { + absl::MutexLock l(flags_internal::GetGlobalConstructionGuard()); + + if (inited_.load(std::memory_order_acquire)) { + return *impl_; + } + + impl_ = new flags_internal::Flag( + name_, filename_, + {flags_internal::FlagHelpMsg(help_gen_), + flags_internal::FlagHelpKind::kGenFunc}, + {flags_internal::FlagDefaultSrc(default_value_gen_), + flags_internal::FlagDefaultKind::kGenFunc}); + inited_.store(true, std::memory_order_release); + } + + return *impl_; + } + + // Public methods of `absl::Flag` are NOT part of the Abseil Flags API. + // See https://abseil.io/docs/cpp/guides/flags + bool IsRetired() const { return GetImpl().IsRetired(); } + absl::string_view Name() const { return GetImpl().Name(); } + std::string Help() const { return GetImpl().Help(); } + bool IsModified() const { return GetImpl().IsModified(); } + bool IsSpecifiedOnCommandLine() const { + return GetImpl().IsSpecifiedOnCommandLine(); + } + std::string Filename() const { return GetImpl().Filename(); } + std::string DefaultValue() const { return GetImpl().DefaultValue(); } + std::string CurrentValue() const { return GetImpl().CurrentValue(); } + template + inline bool IsOfType() const { + return GetImpl().template IsOfType(); + } + T Get() const { + return flags_internal::FlagImplPeer::InvokeGet(GetImpl()); + } + void Set(const T& v) { + flags_internal::FlagImplPeer::InvokeSet(GetImpl(), v); + } + void InvokeCallback() { GetImpl().InvokeCallback(); } + + const CommandLineFlag& Reflect() const { + return flags_internal::FlagImplPeer::InvokeReflect(GetImpl()); + } + + // The data members are logically private, but they need to be public for + // this to be an aggregate type. + const char* name_; + const char* filename_; + const flags_internal::HelpGenFunc help_gen_; + const flags_internal::FlagDfltGenFunc default_value_gen_; + + mutable std::atomic inited_; + mutable flags_internal::Flag* impl_; +}; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/usage.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/usage.cc index 949709e883..5efc7b07a3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/usage.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/usage.cc @@ -17,7 +17,9 @@ #include +#include #include +#include #include #include #include @@ -33,6 +35,7 @@ #include "absl/flags/internal/program_name.h" #include "absl/flags/internal/registry.h" #include "absl/flags/usage_config.h" +#include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" @@ -148,8 +151,7 @@ class FlagHelpPrettyPrinter { } // Write the token, ending the string first if necessary/possible. - if (!new_line && - (line_len_ + static_cast(token.size()) >= max_line_len_)) { + if (!new_line && (line_len_ + token.size() >= max_line_len_)) { EndLine(); new_line = true; } @@ -344,7 +346,7 @@ void FlagHelp(std::ostream& out, const CommandLineFlag& flag, void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format, absl::string_view program_usage_message) { flags_internal::FlagKindFilter filter_cb = [&](absl::string_view filename) { - return filter.empty() || filename.find(filter) != absl::string_view::npos; + return filter.empty() || absl::StrContains(filename, filter); }; flags_internal::FlagsHelpImpl(out, filter_cb, format, program_usage_message); } @@ -466,7 +468,7 @@ void SetFlagsHelpFormat(HelpFormat format) { // function. bool DeduceUsageFlags(absl::string_view name, absl::string_view value) { if (absl::ConsumePrefix(&name, "help")) { - if (name == "") { + if (name.empty()) { if (value.empty()) { SetFlagsHelpMode(HelpMode::kImportant); } else { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/usage_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/usage_test.cc index 044d71c87d..209a7be9b6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/usage_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/internal/usage_test.cc @@ -20,6 +20,7 @@ #include #include +#include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/flags/flag.h" #include "absl/flags/internal/parse.h" @@ -47,8 +48,10 @@ struct UDT { UDT(const UDT&) = default; UDT& operator=(const UDT&) = default; }; -bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; } -std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; } +static bool AbslParseFlag(absl::string_view, UDT*, std::string*) { + return true; +} +static std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; } ABSL_FLAG(UDT, usage_reporting_test_flag_05, {}, "usage_reporting_test_flag_05 help message"); @@ -103,14 +106,19 @@ class UsageReportingTest : public testing::Test { using UsageReportingDeathTest = UsageReportingTest; TEST_F(UsageReportingDeathTest, TestSetProgramUsageMessage) { +#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL + // Check for kTestUsageMessage set in main() below. EXPECT_EQ(absl::ProgramUsageMessage(), kTestUsageMessage); +#else + // Check for part of the usage message set by GoogleTest. + EXPECT_THAT(absl::ProgramUsageMessage(), + ::testing::HasSubstr( + "This program contains tests written using Google Test")); +#endif -#ifndef _WIN32 - // TODO(rogeeff): figure out why this does not work on Windows. EXPECT_DEATH_IF_SUPPORTED( absl::SetProgramUsageMessage("custom usage message"), - ".*SetProgramUsageMessage\\(\\) called twice.*"); -#endif + ::testing::HasSubstr("SetProgramUsageMessage() called twice")); } // -------------------------------------------------------------------- @@ -487,8 +495,10 @@ path. int main(int argc, char* argv[]) { (void)absl::GetFlag(FLAGS_undefok); // Force linking of parse.cc flags::SetProgramInvocationName("usage_test"); +#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL + // GoogleTest calls absl::SetProgramUsageMessage() already. absl::SetProgramUsageMessage(kTestUsageMessage); +#endif ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/marshalling.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/marshalling.h index 7cbc136d57..325e75e516 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/marshalling.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/marshalling.h @@ -33,6 +33,7 @@ // * `double` // * `std::string` // * `std::vector` +// * `std::optional` // * `absl::LogSeverity` (provided natively for layering reasons) // // Note that support for integral types is implemented using overloads for @@ -65,6 +66,42 @@ // below.) // // ----------------------------------------------------------------------------- +// Optional Flags +// ----------------------------------------------------------------------------- +// +// The Abseil flags library supports flags of type `std::optional` where +// `T` is a type of one of the supported flags. We refer to this flag type as +// an "optional flag." An optional flag is either "valueless", holding no value +// of type `T` (indicating that the flag has not been set) or a value of type +// `T`. The valueless state in C++ code is represented by a value of +// `std::nullopt` for the optional flag. +// +// Using `std::nullopt` as an optional flag's default value allows you to check +// whether such a flag was ever specified on the command line: +// +// if (absl::GetFlag(FLAGS_foo).has_value()) { +// // flag was set on command line +// } else { +// // flag was not passed on command line +// } +// +// Using an optional flag in this manner avoids common workarounds for +// indicating such an unset flag (such as using sentinel values to indicate this +// state). +// +// An optional flag also allows a developer to pass a flag in an "unset" +// valueless state on the command line, allowing the flag to later be set in +// binary logic. An optional flag's valueless state is indicated by the special +// notation of passing the value as an empty string through the syntax `--flag=` +// or `--flag ""`. +// +// $ binary_with_optional --flag_in_unset_state= +// $ binary_with_optional --flag_in_unset_state "" +// +// Note: as a result of the above syntax requirements, an optional flag cannot +// be set to a `T` of any value which unparses to the empty string. +// +// ----------------------------------------------------------------------------- // Adding Type Support for Abseil Flags // ----------------------------------------------------------------------------- // @@ -162,14 +199,27 @@ #ifndef ABSL_FLAGS_MARSHALLING_H_ #define ABSL_FLAGS_MARSHALLING_H_ +#include "absl/base/config.h" + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) +#include +#endif #include #include -#include "absl/base/config.h" #include "absl/strings/string_view.h" +#include "absl/types/optional.h" namespace absl { ABSL_NAMESPACE_BEGIN + +// Forward declaration to be used inside composable flag parse/unparse +// implementations +template +inline bool ParseFlag(absl::string_view input, T* dst, std::string* error); +template +inline std::string UnparseFlag(const T& v); + namespace flags_internal { // Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types. @@ -188,6 +238,36 @@ bool AbslParseFlag(absl::string_view, double*, std::string*); bool AbslParseFlag(absl::string_view, std::string*, std::string*); bool AbslParseFlag(absl::string_view, std::vector*, std::string*); +template +bool AbslParseFlag(absl::string_view text, absl::optional* f, + std::string* err) { + if (text.empty()) { + *f = absl::nullopt; + return true; + } + T value; + if (!absl::ParseFlag(text, &value, err)) return false; + + *f = std::move(value); + return true; +} + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) +template +bool AbslParseFlag(absl::string_view text, std::optional* f, + std::string* err) { + if (text.empty()) { + *f = std::nullopt; + return true; + } + T value; + if (!absl::ParseFlag(text, &value, err)) return false; + + *f = std::move(value); + return true; +} +#endif + template bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) { // Comment on next line provides a good compiler error message if T @@ -201,6 +281,18 @@ bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) { std::string AbslUnparseFlag(absl::string_view v); std::string AbslUnparseFlag(const std::vector&); +template +std::string AbslUnparseFlag(const absl::optional& f) { + return f.has_value() ? absl::UnparseFlag(*f) : ""; +} + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) +template +std::string AbslUnparseFlag(const std::optional& f) { + return f.has_value() ? absl::UnparseFlag(*f) : ""; +} +#endif + template std::string Unparse(const T& v) { // Comment on next line provides a good compiler error message if T does not diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/marshalling_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/marshalling_test.cc index 4a64ce11a1..7b6d2ad5cf 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/marshalling_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/marshalling_test.cc @@ -659,6 +659,88 @@ TEST(MarshallingTest, TestVectorOfStringParsing) { // -------------------------------------------------------------------- +TEST(MarshallingTest, TestOptionalBoolParsing) { + std::string err; + absl::optional value; + + EXPECT_TRUE(absl::ParseFlag("", &value, &err)); + EXPECT_FALSE(value.has_value()); + + EXPECT_TRUE(absl::ParseFlag("true", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_TRUE(*value); + + EXPECT_TRUE(absl::ParseFlag("false", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_FALSE(*value); + + EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err)); +} + +// -------------------------------------------------------------------- + +TEST(MarshallingTest, TestOptionalIntParsing) { + std::string err; + absl::optional value; + + EXPECT_TRUE(absl::ParseFlag("", &value, &err)); + EXPECT_FALSE(value.has_value()); + + EXPECT_TRUE(absl::ParseFlag("10", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, 10); + + EXPECT_TRUE(absl::ParseFlag("0x1F", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, 31); + + EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err)); +} + +// -------------------------------------------------------------------- + +TEST(MarshallingTest, TestOptionalDoubleParsing) { + std::string err; + absl::optional value; + + EXPECT_TRUE(absl::ParseFlag("", &value, &err)); + EXPECT_FALSE(value.has_value()); + + EXPECT_TRUE(absl::ParseFlag("1.11", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, 1.11); + + EXPECT_TRUE(absl::ParseFlag("-0.12", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, -0.12); + + EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err)); +} + +// -------------------------------------------------------------------- + +TEST(MarshallingTest, TestOptionalStringParsing) { + std::string err; + absl::optional value; + + EXPECT_TRUE(absl::ParseFlag("", &value, &err)); + EXPECT_FALSE(value.has_value()); + + EXPECT_TRUE(absl::ParseFlag(" ", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, " "); + + EXPECT_TRUE(absl::ParseFlag("aqswde", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, "aqswde"); + + EXPECT_TRUE(absl::ParseFlag("nullopt", &value, &err)); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, "nullopt"); +} + +// -------------------------------------------------------------------- + TEST(MarshallingTest, TestBoolUnparsing) { EXPECT_EQ(absl::UnparseFlag(true), "true"); EXPECT_EQ(absl::UnparseFlag(false), "false"); @@ -808,6 +890,90 @@ TEST(MarshallingTest, TestStringUnparsing) { // -------------------------------------------------------------------- +TEST(MarshallingTest, TestOptionalBoolUnparsing) { + absl::optional value; + + EXPECT_EQ(absl::UnparseFlag(value), ""); + value = true; + EXPECT_EQ(absl::UnparseFlag(value), "true"); + value = false; + EXPECT_EQ(absl::UnparseFlag(value), "false"); + value = absl::nullopt; + EXPECT_EQ(absl::UnparseFlag(value), ""); +} + +// -------------------------------------------------------------------- + +TEST(MarshallingTest, TestOptionalIntUnparsing) { + absl::optional value; + + EXPECT_EQ(absl::UnparseFlag(value), ""); + value = 0; + EXPECT_EQ(absl::UnparseFlag(value), "0"); + value = -12; + EXPECT_EQ(absl::UnparseFlag(value), "-12"); + value = absl::nullopt; + EXPECT_EQ(absl::UnparseFlag(value), ""); +} + +// -------------------------------------------------------------------- + +TEST(MarshallingTest, TestOptionalDoubleUnparsing) { + absl::optional value; + + EXPECT_EQ(absl::UnparseFlag(value), ""); + value = 1.; + EXPECT_EQ(absl::UnparseFlag(value), "1"); + value = -1.23; + EXPECT_EQ(absl::UnparseFlag(value), "-1.23"); + value = absl::nullopt; + EXPECT_EQ(absl::UnparseFlag(value), ""); +} + +// -------------------------------------------------------------------- + +TEST(MarshallingTest, TestOptionalStringUnparsing) { + absl::optional strvalue; + EXPECT_EQ(absl::UnparseFlag(strvalue), ""); + + strvalue = "asdfg"; + EXPECT_EQ(absl::UnparseFlag(strvalue), "asdfg"); + + strvalue = " "; + EXPECT_EQ(absl::UnparseFlag(strvalue), " "); + + strvalue = ""; // It is UB to set an optional string flag to "" + EXPECT_EQ(absl::UnparseFlag(strvalue), ""); +} + +// -------------------------------------------------------------------- + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) + +TEST(MarshallingTest, TestStdOptionalUnparsing) { + std::optional strvalue; + EXPECT_EQ(absl::UnparseFlag(strvalue), ""); + + strvalue = "asdfg"; + EXPECT_EQ(absl::UnparseFlag(strvalue), "asdfg"); + + strvalue = " "; + EXPECT_EQ(absl::UnparseFlag(strvalue), " "); + + strvalue = ""; // It is UB to set an optional string flag to "" + EXPECT_EQ(absl::UnparseFlag(strvalue), ""); + + std::optional intvalue; + EXPECT_EQ(absl::UnparseFlag(intvalue), ""); + + intvalue = 10; + EXPECT_EQ(absl::UnparseFlag(intvalue), "10"); +} + +// -------------------------------------------------------------------- + +#endif + template void TestRoundtrip(T v) { T new_v; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/parse.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/parse.cc index dd1a6796ca..2851c0f788 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/parse.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/flags/parse.cc @@ -159,14 +159,14 @@ class ArgsList { // Returns success status: true if parsing successful, false otherwise. bool ReadFromFlagfile(const std::string& flag_file_name); - int Size() const { return args_.size() - next_arg_; } - int FrontIndex() const { return next_arg_; } + size_t Size() const { return args_.size() - next_arg_; } + size_t FrontIndex() const { return next_arg_; } absl::string_view Front() const { return args_[next_arg_]; } void PopFront() { next_arg_++; } private: std::vector args_; - int next_arg_; + size_t next_arg_; }; bool ArgsList::ReadFromFlagfile(const std::string& flag_file_name) { @@ -626,7 +626,7 @@ std::vector ParseCommandLineImpl(int argc, char* argv[], std::vector output_args; std::vector positional_args; - output_args.reserve(argc); + output_args.reserve(static_cast(argc)); // This is the list of undefined flags. The element of the list is the pair // consisting of boolean indicating if flag came from command line (vs from @@ -795,8 +795,8 @@ std::vector ParseCommandLineImpl(int argc, char* argv[], // All the remaining arguments are positional. if (!input_args.empty()) { - for (int arg_index = input_args.back().FrontIndex(); arg_index < argc; - ++arg_index) { + for (size_t arg_index = input_args.back().FrontIndex(); + arg_index < static_cast(argc); ++arg_index) { output_args.push_back(argv[arg_index]); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/CMakeLists.txt index 338ddc6c6c..c0f6eaaa2d 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/CMakeLists.txt @@ -14,6 +14,42 @@ # limitations under the License. # +absl_cc_library( + NAME + any_invocable + SRCS + "internal/any_invocable.h" + HDRS + "any_invocable.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::config + absl::core_headers + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + any_invocable_test + SRCS + "any_invocable_test.cc" + "internal/any_invocable.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::any_invocable + absl::base_internal + absl::config + absl::core_headers + absl::type_traits + absl::utility + GTest::gmock_main +) + absl_cc_library( NAME bind_front diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/any_invocable.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/any_invocable.h new file mode 100644 index 0000000000..3e783c871d --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/any_invocable.h @@ -0,0 +1,316 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: any_invocable.h +// ----------------------------------------------------------------------------- +// +// This header file defines an `absl::AnyInvocable` type that assumes ownership +// and wraps an object of an invocable type. (Invocable types adhere to the +// concept specified in https://en.cppreference.com/w/cpp/concepts/invocable.) +// +// In general, prefer `absl::AnyInvocable` when you need a type-erased +// function parameter that needs to take ownership of the type. +// +// NOTE: `absl::AnyInvocable` is similar to the C++23 `std::move_only_function` +// abstraction, but has a slightly different API and is not designed to be a +// drop-in replacement or C++11-compatible backfill of that type. +// +// Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original +// implementation. + +#ifndef ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ +#define ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/functional/internal/any_invocable.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::AnyInvocable +// +// `absl::AnyInvocable` is a functional wrapper type, like `std::function`, that +// assumes ownership of an invocable object. Unlike `std::function`, an +// `absl::AnyInvocable` is more type-safe and provides the following additional +// benefits: +// +// * Properly adheres to const correctness of the underlying type +// * Is move-only so avoids concurrency problems with copied invocables and +// unnecessary copies in general. +// * Supports reference qualifiers allowing it to perform unique actions (noted +// below). +// +// `absl::AnyInvocable` is a template, and an `absl::AnyInvocable` instantiation +// may wrap any invocable object with a compatible function signature, e.g. +// having arguments and return types convertible to types matching the +// `absl::AnyInvocable` signature, and also matching any stated reference +// qualifiers, as long as that type is moveable. It therefore provides broad +// type erasure for functional objects. +// +// An `absl::AnyInvocable` is typically used as a type-erased function parameter +// for accepting various functional objects: +// +// // Define a function taking an AnyInvocable parameter. +// void my_func(absl::AnyInvocable f) { +// ... +// }; +// +// // That function can accept any invocable type: +// +// // Accept a function reference. We don't need to move a reference. +// int func1() { return 0; }; +// my_func(func1); +// +// // Accept a lambda. We use std::move here because otherwise my_func would +// // copy the lambda. +// auto lambda = []() { return 0; }; +// my_func(std::move(lambda)); +// +// // Accept a function pointer. We don't need to move a function pointer. +// func2 = &func1; +// my_func(func2); +// +// // Accept an std::function by moving it. Note that the lambda is copyable +// // (satisfying std::function requirements) and moveable (satisfying +// // absl::AnyInvocable requirements). +// std::function func6 = []() { return 0; }; +// my_func(std::move(func6)); +// +// `AnyInvocable` also properly respects `const` qualifiers, reference +// qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as +// part of the user-specified function type (e.g. +// `AnyInvocable`). These qualifiers will be applied to +// the `AnyInvocable` object's `operator()`, and the underlying invocable must +// be compatible with those qualifiers. +// +// Comparison of const and non-const function types: +// +// // Store a closure inside of `func` with the function type `int()`. +// // Note that we have made `func` itself `const`. +// const AnyInvocable func = [](){ return 0; }; +// +// func(); // Compile-error: the passed type `int()` isn't `const`. +// +// // Store a closure inside of `const_func` with the function type +// // `int() const`. +// // Note that we have also made `const_func` itself `const`. +// const AnyInvocable const_func = [](){ return 0; }; +// +// const_func(); // Fine: `int() const` is `const`. +// +// In the above example, the call `func()` would have compiled if +// `std::function` were used even though the types are not const compatible. +// This is a bug, and using `absl::AnyInvocable` properly detects that bug. +// +// In addition to affecting the signature of `operator()`, the `const` and +// reference qualifiers of the function type also appropriately constrain which +// kinds of invocable objects you are allowed to place into the `AnyInvocable` +// instance. If you specify a function type that is const-qualified, then +// anything that you attempt to put into the `AnyInvocable` must be callable on +// a `const` instance of that type. +// +// Constraint example: +// +// // Fine because the lambda is callable when `const`. +// AnyInvocable func = [=](){ return 0; }; +// +// // This is a compile-error because the lambda isn't callable when `const`. +// AnyInvocable error = [=]() mutable { return 0; }; +// +// An `&&` qualifier can be used to express that an `absl::AnyInvocable` +// instance should be invoked at most once: +// +// // Invokes `continuation` with the logical result of an operation when +// // that operation completes (common in asynchronous code). +// void CallOnCompletion(AnyInvocable continuation) { +// int result_of_foo = foo(); +// +// // `std::move` is required because the `operator()` of `continuation` is +// // rvalue-reference qualified. +// std::move(continuation)(result_of_foo); +// } +// +// Attempting to call `absl::AnyInvocable` multiple times in such a case +// results in undefined behavior. +template +class AnyInvocable : private internal_any_invocable::Impl { + private: + static_assert( + std::is_function::value, + "The template argument of AnyInvocable must be a function type."); + + using Impl = internal_any_invocable::Impl; + + public: + // The return type of Sig + using result_type = typename Impl::result_type; + + // Constructors + + // Constructs the `AnyInvocable` in an empty state. + AnyInvocable() noexcept = default; + AnyInvocable(std::nullptr_t) noexcept {} // NOLINT + + // Constructs the `AnyInvocable` from an existing `AnyInvocable` by a move. + // Note that `f` is not guaranteed to be empty after move-construction, + // although it may be. + AnyInvocable(AnyInvocable&& /*f*/) noexcept = default; + + // Constructs an `AnyInvocable` from an invocable object. + // + // Upon construction, `*this` is only empty if `f` is a function pointer or + // member pointer type and is null, or if `f` is an `AnyInvocable` that is + // empty. + template ::value>> + AnyInvocable(F&& f) // NOLINT + : Impl(internal_any_invocable::ConversionConstruct(), + std::forward(f)) {} + + // Constructs an `AnyInvocable` that holds an invocable object of type `T`, + // which is constructed in-place from the given arguments. + // + // Example: + // + // AnyInvocable func( + // absl::in_place_type, arg1, arg2); + // + template ::value>> + explicit AnyInvocable(absl::in_place_type_t, Args&&... args) + : Impl(absl::in_place_type>, + std::forward(args)...) { + static_assert(std::is_same>::value, + "The explicit template argument of in_place_type is required " + "to be an unqualified object type."); + } + + // Overload of the above constructor to support list-initialization. + template &, Args...>::value>> + explicit AnyInvocable(absl::in_place_type_t, + std::initializer_list ilist, Args&&... args) + : Impl(absl::in_place_type>, ilist, + std::forward(args)...) { + static_assert(std::is_same>::value, + "The explicit template argument of in_place_type is required " + "to be an unqualified object type."); + } + + // Assignment Operators + + // Assigns an `AnyInvocable` through move-assignment. + // Note that `f` is not guaranteed to be empty after move-assignment + // although it may be. + AnyInvocable& operator=(AnyInvocable&& /*f*/) noexcept = default; + + // Assigns an `AnyInvocable` from a nullptr, clearing the `AnyInvocable`. If + // not empty, destroys the target, putting `*this` into an empty state. + AnyInvocable& operator=(std::nullptr_t) noexcept { + this->Clear(); + return *this; + } + + // Assigns an `AnyInvocable` from an existing `AnyInvocable` instance. + // + // Upon assignment, `*this` is only empty if `f` is a function pointer or + // member pointer type and is null, or if `f` is an `AnyInvocable` that is + // empty. + template ::value>> + AnyInvocable& operator=(F&& f) { + *this = AnyInvocable(std::forward(f)); + return *this; + } + + // Assigns an `AnyInvocable` from a reference to an invocable object. + // Upon assignment, stores a reference to the invocable object in the + // `AnyInvocable` instance. + template < + class F, + typename = absl::enable_if_t< + internal_any_invocable::CanAssignReferenceWrapper::value>> + AnyInvocable& operator=(std::reference_wrapper f) noexcept { + *this = AnyInvocable(f); + return *this; + } + + // Destructor + + // If not empty, destroys the target. + ~AnyInvocable() = default; + + // absl::AnyInvocable::swap() + // + // Exchanges the targets of `*this` and `other`. + void swap(AnyInvocable& other) noexcept { std::swap(*this, other); } + + // abl::AnyInvocable::operator bool() + // + // Returns `true` if `*this` is not empty. + explicit operator bool() const noexcept { return this->HasValue(); } + + // Invokes the target object of `*this`. `*this` must not be empty. + // + // Note: The signature of this function call operator is the same as the + // template parameter `Sig`. + using Impl::operator(); + + // Equality operators + + // Returns `true` if `*this` is empty. + friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept { + return !f.HasValue(); + } + + // Returns `true` if `*this` is empty. + friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept { + return !f.HasValue(); + } + + // Returns `false` if `*this` is empty. + friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept { + return f.HasValue(); + } + + // Returns `false` if `*this` is empty. + friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept { + return f.HasValue(); + } + + // swap() + // + // Exchanges the targets of `f1` and `f2`. + friend void swap(AnyInvocable& f1, AnyInvocable& f2) noexcept { f1.swap(f2); } + + private: + // Friending other instantiations is necessary for conversions. + template + friend class internal_any_invocable::CoreImpl; +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/any_invocable_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/any_invocable_test.cc new file mode 100644 index 0000000000..1ed854077a --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/any_invocable_test.cc @@ -0,0 +1,1719 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/functional/any_invocable.h" + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +static_assert(absl::internal_any_invocable::kStorageSize >= sizeof(void*), + "These tests assume that the small object storage is at least " + "the size of a pointer."); + +namespace { + +// Helper macro used to avoid spelling `noexcept` in language versions older +// than C++17, where it is not part of the type system, in order to avoid +// compilation failures and internal compiler errors. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex) +#else +#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) +#endif + +// A dummy type we use when passing qualifiers to metafunctions +struct _ {}; + +template +struct Wrapper { + template ::value>> + Wrapper(U&&); // NOLINT +}; + +// This will cause a recursive trait instantiation if the SFINAE checks are +// not ordered correctly for constructibility. +static_assert(std::is_constructible>, + Wrapper>>::value, + ""); + +// A metafunction that takes the cv and l-value reference qualifiers that were +// associated with a function type (here passed via qualifiers of an object +// type), and . +template +struct QualifiersForThisImpl { + static_assert(std::is_object::value, ""); + using type = + absl::conditional_t::value, const This, This>&; +}; + +template +struct QualifiersForThisImpl + : QualifiersForThisImpl {}; + +template +struct QualifiersForThisImpl { + static_assert(std::is_object::value, ""); + using type = + absl::conditional_t::value, const This, This>&&; +}; + +template +using QualifiersForThis = + typename QualifiersForThisImpl::type; + +// A metafunction that takes the cv and l-value reference qualifier of T and +// applies them to U's function type qualifiers. +template +struct GiveQualifiersToFunImpl; + +template +struct GiveQualifiersToFunImpl { + using type = + absl::conditional_t::value, R(P...) const, R(P...)>; +}; + +template +struct GiveQualifiersToFunImpl { + using type = + absl::conditional_t::value, R(P...) const&, R(P...)&>; +}; + +template +struct GiveQualifiersToFunImpl { + using type = + absl::conditional_t::value, R(P...) const&&, R(P...) &&>; +}; + +// If noexcept is a part of the type system, then provide the noexcept forms. +#if defined(__cpp_noexcept_function_type) + +template +struct GiveQualifiersToFunImpl { + using type = absl::conditional_t::value, + R(P...) const noexcept, R(P...) noexcept>; +}; + +template +struct GiveQualifiersToFunImpl { + using type = + absl::conditional_t::value, R(P...) const & noexcept, + R(P...) & noexcept>; +}; + +template +struct GiveQualifiersToFunImpl { + using type = + absl::conditional_t::value, R(P...) const && noexcept, + R(P...) && noexcept>; +}; + +#endif // defined(__cpp_noexcept_function_type) + +template +using GiveQualifiersToFun = typename GiveQualifiersToFunImpl::type; + +// This is used in template parameters to decide whether or not to use a type +// that fits in the small object optimization storage. +enum class ObjSize { small, large }; + +// A base type that is used with classes as a means to insert an +// appropriately-sized dummy datamember when Size is ObjSize::large so that the +// user's class type is guaranteed to not fit in small object storage. +template +struct TypeErasedPadding; + +template <> +struct TypeErasedPadding {}; + +template <> +struct TypeErasedPadding { + char dummy_data[absl::internal_any_invocable::kStorageSize + 1] = {}; +}; + +struct Int { + Int(int v) noexcept : value(v) {} // NOLINT +#ifndef _MSC_VER + Int(Int&&) noexcept { + // NOTE: Prior to C++17, this not being called requires optimizations to + // take place when performing the top-level invocation. In practice, + // most supported compilers perform this optimization prior to C++17. + std::abort(); + } +#else + Int(Int&& v) noexcept = default; +#endif + operator int() && noexcept { return value; } // NOLINT + + int MemberFunctionAdd(int const& b, int c) noexcept { // NOLINT + return value + b + c; + } + + int value; +}; + +enum class Movable { no, yes, nothrow, trivial }; + +enum class NothrowCall { no, yes }; + +enum class Destructible { nothrow, trivial }; + +enum class ObjAlign : std::size_t { + normal = absl::internal_any_invocable::kAlignment, + large = absl::internal_any_invocable::kAlignment * 2, +}; + +// A function-object template that has knobs for each property that can affect +// how the object is stored in AnyInvocable. +template +struct add; + +#define ABSL_INTERNALS_ADD(qual) \ + template \ + struct alignas(static_cast(Alignment)) \ + add : TypeErasedPadding { \ + explicit add(int state_init) : state(state_init) {} \ + explicit add(std::initializer_list state_init, int tail) \ + : state(std::accumulate(std::begin(state_init), std::end(state_init), \ + 0) + \ + tail) {} \ + add(add&& other) = default; /*NOLINT*/ \ + Int operator()(int a, int b, int c) qual \ + ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) { \ + return state + a + b + c; \ + } \ + int state; \ + }; \ + \ + template \ + struct alignas(static_cast(Alignment)) \ + add : TypeErasedPadding { \ + explicit add(int state_init) : state(state_init) {} \ + explicit add(std::initializer_list state_init, int tail) \ + : state(std::accumulate(std::begin(state_init), std::end(state_init), \ + 0) + \ + tail) {} \ + ~add() noexcept {} \ + add(add&& other) = default; /*NOLINT*/ \ + Int operator()(int a, int b, int c) qual \ + ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) { \ + return state + a + b + c; \ + } \ + int state; \ + } + +// Explicitly specify an empty argument. +// MSVC (at least up to _MSC_VER 1931, if not beyond) warns that +// ABSL_INTERNALS_ADD() is an undefined zero-arg overload. +#define ABSL_INTERNALS_NOARG +ABSL_INTERNALS_ADD(ABSL_INTERNALS_NOARG); +#undef ABSL_INTERNALS_NOARG + +ABSL_INTERNALS_ADD(const); +ABSL_INTERNALS_ADD(&); +ABSL_INTERNALS_ADD(const&); +ABSL_INTERNALS_ADD(&&); // NOLINT +ABSL_INTERNALS_ADD(const&&); // NOLINT + +#undef ABSL_INTERNALS_ADD + +template +struct add : private add { + using Base = add; + + explicit add(int state_init) : Base(state_init) {} + + explicit add(std::initializer_list state_init, int tail) + : Base(state_init, tail) {} + + add(add&&) = delete; + + using Base::operator(); + using Base::state; +}; + +template +struct add : private add { + using Base = add; + + explicit add(int state_init) : Base(state_init) {} + + explicit add(std::initializer_list state_init, int tail) + : Base(state_init, tail) {} + + add(add&& other) noexcept(false) : Base(other.state) {} // NOLINT + + using Base::operator(); + using Base::state; +}; + +template +struct add : private add { + using Base = add; + + explicit add(int state_init) : Base(state_init) {} + + explicit add(std::initializer_list state_init, int tail) + : Base(state_init, tail) {} + + add(add&& other) noexcept : Base(other.state) {} + + using Base::operator(); + using Base::state; +}; + +// Actual non-member functions rather than function objects +Int add_function(Int&& a, int b, int c) noexcept { return a.value + b + c; } + +Int mult_function(Int&& a, int b, int c) noexcept { return a.value * b * c; } + +Int square_function(Int const&& a) noexcept { return a.value * a.value; } + +template +using AnyInvocable = absl::AnyInvocable; + +// Instantiations of this template contains all of the compile-time parameters +// for a given instantiation of the AnyInvocable test suite. +template +struct TestParams { + static constexpr Movable kMovability = Movability; + static constexpr Destructible kDestructibility = Destructibility; + using Qualifiers = Qual; + static constexpr NothrowCall kCallExceptionSpec = CallExceptionSpec; + static constexpr bool kIsNoexcept = kCallExceptionSpec == NothrowCall::yes; + static constexpr bool kIsRvalueQualified = + std::is_rvalue_reference::value; + static constexpr ObjSize kSize = Size; + static constexpr ObjAlign kAlignment = Alignment; + + // These types are used when testing with member object pointer Invocables + using UnqualifiedUnaryFunType = int(Int const&&) + ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes); + using UnaryFunType = GiveQualifiersToFun; + using MemObjPtrType = int(Int::*); + using UnaryAnyInvType = AnyInvocable; + using UnaryThisParamType = QualifiersForThis; + + template + static UnaryThisParamType ToUnaryThisParam(T&& fun) { + return static_cast(fun); + } + + // This function type intentionally uses 3 "kinds" of parameter types. + // - A user-defined type + // - A reference type + // - A scalar type + // + // These were chosen because internal forwarding takes place on parameters + // differently depending based on type properties (scalars are forwarded by + // value). + using ResultType = Int; + using AnyInvocableFunTypeNotNoexcept = Int(Int, const int&, int); + using UnqualifiedFunType = + typename std::conditional::type; + using FunType = GiveQualifiersToFun; + using MemFunPtrType = + typename std::conditional::type; + using AnyInvType = AnyInvocable; + using AddType = add; + using ThisParamType = QualifiersForThis; + + template + static ThisParamType ToThisParam(T&& fun) { + return static_cast(fun); + } + + // These typedefs are used when testing void return type covariance. + using UnqualifiedVoidFunType = + typename std::conditional::type; + using VoidFunType = GiveQualifiersToFun; + using VoidAnyInvType = AnyInvocable; + using VoidThisParamType = QualifiersForThis; + + template + static VoidThisParamType ToVoidThisParam(T&& fun) { + return static_cast(fun); + } + + using CompatibleAnyInvocableFunType = + absl::conditional_t::value, + GiveQualifiersToFun, + GiveQualifiersToFun>; + + using CompatibleAnyInvType = AnyInvocable; + + using IncompatibleInvocable = + absl::conditional_t::value, + GiveQualifiersToFun<_&, UnqualifiedFunType>(_::*), + GiveQualifiersToFun<_&&, UnqualifiedFunType>(_::*)>; +}; + +// Given a member-pointer type, this metafunction yields the target type of the +// pointer, not including the class-type. It is used to verify that the function +// call operator of AnyInvocable has the proper signature, corresponding to the +// function type that the user provided. +template +struct MemberTypeOfImpl; + +template +struct MemberTypeOfImpl { + using type = T; +}; + +template +using MemberTypeOf = typename MemberTypeOfImpl::type; + +template +struct IsMemberSwappableImpl : std::false_type { + static constexpr bool kIsNothrow = false; +}; + +template +struct IsMemberSwappableImpl< + T, absl::void_t().swap(std::declval()))>> + : std::true_type { + static constexpr bool kIsNothrow = + noexcept(std::declval().swap(std::declval())); +}; + +template +using IsMemberSwappable = IsMemberSwappableImpl; + +template +using IsNothrowMemberSwappable = + std::integral_constant::kIsNothrow>; + +template +class AnyInvTestBasic : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(AnyInvTestBasic); + +TYPED_TEST_P(AnyInvTestBasic, DefaultConstruction) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun; + + EXPECT_FALSE(static_cast(fun)); + + EXPECT_TRUE(std::is_nothrow_default_constructible::value); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionNullptr) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun = nullptr; + + EXPECT_FALSE(static_cast(fun)); + + EXPECT_TRUE( + (std::is_nothrow_constructible::value)); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionNullFunctionPtr) { + using AnyInvType = typename TypeParam::AnyInvType; + using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; + + UnqualifiedFunType* const null_fun_ptr = nullptr; + AnyInvType fun = null_fun_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberFunctionPtr) { + using AnyInvType = typename TypeParam::AnyInvType; + using MemFunPtrType = typename TypeParam::MemFunPtrType; + + const MemFunPtrType null_mem_fun_ptr = nullptr; + AnyInvType fun = null_mem_fun_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberObjectPtr) { + using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; + using MemObjPtrType = typename TypeParam::MemObjPtrType; + + const MemObjPtrType null_mem_obj_ptr = nullptr; + UnaryAnyInvType fun = null_mem_obj_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberFunctionPtr) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun = &Int::MemberFunctionAdd; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberObjectPtr) { + using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; + + UnaryAnyInvType fun = &Int::value; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13)); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionFunctionReferenceDecay) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun = add_function; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableEmpty) { + using AnyInvType = typename TypeParam::AnyInvType; + using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; + + CompatibleAnyInvType other; + AnyInvType fun = std::move(other); + + EXPECT_FALSE(static_cast(other)); // NOLINT + EXPECT_EQ(other, nullptr); // NOLINT + EXPECT_EQ(nullptr, other); // NOLINT + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableNonempty) { + using AnyInvType = typename TypeParam::AnyInvType; + using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; + + CompatibleAnyInvType other = &add_function; + AnyInvType fun = std::move(other); + + EXPECT_FALSE(static_cast(other)); // NOLINT + EXPECT_EQ(other, nullptr); // NOLINT + EXPECT_EQ(nullptr, other); // NOLINT + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestBasic, ConversionToBool) { + using AnyInvType = typename TypeParam::AnyInvType; + + { + AnyInvType fun; + + // This tests contextually-convertible-to-bool. + EXPECT_FALSE(fun ? true : false); // NOLINT + + // Make sure that the conversion is not implicit. + EXPECT_TRUE( + (std::is_nothrow_constructible::value)); + EXPECT_FALSE((std::is_convertible::value)); + } + + { + AnyInvType fun = &add_function; + + // This tests contextually-convertible-to-bool. + EXPECT_TRUE(fun ? true : false); // NOLINT + } +} + +TYPED_TEST_P(AnyInvTestBasic, Invocation) { + using AnyInvType = typename TypeParam::AnyInvType; + + using FunType = typename TypeParam::FunType; + using AnyInvCallType = MemberTypeOf; + + // Make sure the function call operator of AnyInvocable always has the + // type that was specified via the template argument. + EXPECT_TRUE((std::is_same::value)); + + AnyInvType fun = &add_function; + + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceConstruction) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType fun(absl::in_place_type, 5); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceConstructionInitializerList) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType fun(absl::in_place_type, {1, 2, 3, 4}, 5); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(39, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstruction) { + using AnyInvType = typename TypeParam::AnyInvType; + using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; + + AnyInvType fun(absl::in_place_type, nullptr); + + // In-place construction does not lead to empty. + EXPECT_TRUE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstructionValueInit) { + using AnyInvType = typename TypeParam::AnyInvType; + using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; + + AnyInvType fun(absl::in_place_type); + + // In-place construction does not lead to empty. + EXPECT_TRUE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstruction) { + using AnyInvType = typename TypeParam::AnyInvType; + using MemFunPtrType = typename TypeParam::MemFunPtrType; + + AnyInvType fun(absl::in_place_type, nullptr); + + // In-place construction does not lead to empty. + EXPECT_TRUE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstructionValueInit) { + using AnyInvType = typename TypeParam::AnyInvType; + using MemFunPtrType = typename TypeParam::MemFunPtrType; + + AnyInvType fun(absl::in_place_type); + + // In-place construction does not lead to empty. + EXPECT_TRUE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstruction) { + using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; + using MemObjPtrType = typename TypeParam::MemObjPtrType; + + UnaryAnyInvType fun(absl::in_place_type, nullptr); + + // In-place construction does not lead to empty. + EXPECT_TRUE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstructionValueInit) { + using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; + using MemObjPtrType = typename TypeParam::MemObjPtrType; + + UnaryAnyInvType fun(absl::in_place_type); + + // In-place construction does not lead to empty. + EXPECT_TRUE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, InPlaceVoidCovarianceConstruction) { + using VoidAnyInvType = typename TypeParam::VoidAnyInvType; + using AddType = typename TypeParam::AddType; + + VoidAnyInvType fun(absl::in_place_type, 5); + + EXPECT_TRUE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromEmpty) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType source_fun; + AnyInvType fun(std::move(source_fun)); + + EXPECT_FALSE(static_cast(fun)); + + EXPECT_TRUE(std::is_nothrow_move_constructible::value); +} + +TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromNonEmpty) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType source_fun(absl::in_place_type, 5); + AnyInvType fun(std::move(source_fun)); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); + + EXPECT_TRUE(std::is_nothrow_move_constructible::value); +} + +TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrEmpty) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun; + + EXPECT_TRUE(fun == nullptr); + EXPECT_TRUE(nullptr == fun); + + EXPECT_FALSE(fun != nullptr); + EXPECT_FALSE(nullptr != fun); +} + +TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrNonempty) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType fun(absl::in_place_type, 5); + + EXPECT_FALSE(fun == nullptr); + EXPECT_FALSE(nullptr == fun); + + EXPECT_TRUE(fun != nullptr); + EXPECT_TRUE(nullptr != fun); +} + +TYPED_TEST_P(AnyInvTestBasic, ResultType) { + using AnyInvType = typename TypeParam::AnyInvType; + using ExpectedResultType = typename TypeParam::ResultType; + + EXPECT_TRUE((std::is_same::value)); +} + +template +class AnyInvTestCombinatoric : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(AnyInvTestCombinatoric); + +TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType source_fun; + AnyInvType fun; + + fun = std::move(source_fun); + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyLhsNonemptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType source_fun(absl::in_place_type, 5); + AnyInvType fun; + + fun = std::move(source_fun); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyEmptyLhsRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType source_fun; + AnyInvType fun(absl::in_place_type, 5); + + fun = std::move(source_fun); + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyLhsNonemptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType source_fun(absl::in_place_type, 5); + AnyInvType fun(absl::in_place_type, 20); + + fun = std::move(source_fun); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignEmpty) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType source_fun; + source_fun = std::move(source_fun); + + // This space intentionally left blank. +} + +TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignNonempty) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType source_fun(absl::in_place_type, 5); + source_fun = std::move(source_fun); + + // This space intentionally left blank. +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrEmptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun; + fun = nullptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrEmptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; + + UnqualifiedFunType* const null_fun_ptr = nullptr; + AnyInvType fun; + fun = null_fun_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrEmptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using MemFunPtrType = typename TypeParam::MemFunPtrType; + + const MemFunPtrType null_mem_fun_ptr = nullptr; + AnyInvType fun; + fun = null_mem_fun_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrEmptyLhs) { + using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; + using MemObjPtrType = typename TypeParam::MemObjPtrType; + + const MemObjPtrType null_mem_obj_ptr = nullptr; + UnaryAnyInvType fun; + fun = null_mem_obj_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrEmptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun; + fun = &Int::MemberFunctionAdd; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrEmptyLhs) { + using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; + + UnaryAnyInvType fun; + fun = &Int::value; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayEmptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun; + fun = add_function; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, + AssignCompatibleAnyInvocableEmptyLhsEmptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; + + CompatibleAnyInvType other; + AnyInvType fun; + fun = std::move(other); + + EXPECT_FALSE(static_cast(other)); // NOLINT + EXPECT_EQ(other, nullptr); // NOLINT + EXPECT_EQ(nullptr, other); // NOLINT + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, + AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; + + CompatibleAnyInvType other = &add_function; + AnyInvType fun; + fun = std::move(other); + + EXPECT_FALSE(static_cast(other)); // NOLINT + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrNonemptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun = &mult_function; + fun = nullptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrNonemptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType; + + UnqualifiedFunType* const null_fun_ptr = nullptr; + AnyInvType fun = &mult_function; + fun = null_fun_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrNonemptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using MemFunPtrType = typename TypeParam::MemFunPtrType; + + const MemFunPtrType null_mem_fun_ptr = nullptr; + AnyInvType fun = &mult_function; + fun = null_mem_fun_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrNonemptyLhs) { + using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; + using MemObjPtrType = typename TypeParam::MemObjPtrType; + + const MemObjPtrType null_mem_obj_ptr = nullptr; + UnaryAnyInvType fun = &square_function; + fun = null_mem_obj_ptr; + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrNonemptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun = &mult_function; + fun = &Int::MemberFunctionAdd; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrNonemptyLhs) { + using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType; + + UnaryAnyInvType fun = &square_function; + fun = &Int::value; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayNonemptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + + AnyInvType fun = &mult_function; + fun = add_function; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, + AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; + + CompatibleAnyInvType other; + AnyInvType fun = &mult_function; + fun = std::move(other); + + EXPECT_FALSE(static_cast(other)); // NOLINT + EXPECT_EQ(other, nullptr); // NOLINT + EXPECT_EQ(nullptr, other); // NOLINT + + EXPECT_FALSE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, + AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType; + + CompatibleAnyInvType other = &add_function; + AnyInvType fun = &mult_function; + fun = std::move(other); + + EXPECT_FALSE(static_cast(other)); // NOLINT + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsEmptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + + // Swap idiom + { + AnyInvType fun; + AnyInvType other; + + using std::swap; + swap(fun, other); + + EXPECT_FALSE(static_cast(fun)); + EXPECT_FALSE(static_cast(other)); + + EXPECT_TRUE( + absl::type_traits_internal::IsNothrowSwappable::value); + } + + // Member swap + { + AnyInvType fun; + AnyInvType other; + + fun.swap(other); + + EXPECT_FALSE(static_cast(fun)); + EXPECT_FALSE(static_cast(other)); + + EXPECT_TRUE(IsNothrowMemberSwappable::value); + } +} + +TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsNonemptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + // Swap idiom + { + AnyInvType fun; + AnyInvType other(absl::in_place_type, 5); + + using std::swap; + swap(fun, other); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_FALSE(static_cast(other)); + + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); + + EXPECT_TRUE( + absl::type_traits_internal::IsNothrowSwappable::value); + } + + // Member swap + { + AnyInvType fun; + AnyInvType other(absl::in_place_type, 5); + + fun.swap(other); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_FALSE(static_cast(other)); + + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); + + EXPECT_TRUE(IsNothrowMemberSwappable::value); + } +} + +TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsEmptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + // Swap idiom + { + AnyInvType fun(absl::in_place_type, 5); + AnyInvType other; + + using std::swap; + swap(fun, other); + + EXPECT_FALSE(static_cast(fun)); + EXPECT_TRUE(static_cast(other)); + + EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value); + + EXPECT_TRUE( + absl::type_traits_internal::IsNothrowSwappable::value); + } + + // Member swap + { + AnyInvType fun(absl::in_place_type, 5); + AnyInvType other; + + fun.swap(other); + + EXPECT_FALSE(static_cast(fun)); + EXPECT_TRUE(static_cast(other)); + + EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value); + + EXPECT_TRUE(IsNothrowMemberSwappable::value); + } +} + +TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsNonemptyRhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + // Swap idiom + { + AnyInvType fun(absl::in_place_type, 5); + AnyInvType other(absl::in_place_type, 6); + + using std::swap; + swap(fun, other); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_TRUE(static_cast(other)); + + EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value); + EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value); + + EXPECT_TRUE( + absl::type_traits_internal::IsNothrowSwappable::value); + } + + // Member swap + { + AnyInvType fun(absl::in_place_type, 5); + AnyInvType other(absl::in_place_type, 6); + + fun.swap(other); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_TRUE(static_cast(other)); + + EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value); + EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value); + + EXPECT_TRUE(IsNothrowMemberSwappable::value); + } +} + +template +class AnyInvTestMovable : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(AnyInvTestMovable); + +TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionUserDefinedType) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType fun(AddType(5)); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionVoidCovariance) { + using VoidAnyInvType = typename TypeParam::VoidAnyInvType; + using AddType = typename TypeParam::AddType; + + VoidAnyInvType fun(AddType(5)); + + EXPECT_TRUE(static_cast(fun)); +} + +TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeEmptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType fun; + fun = AddType(5); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeNonemptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType fun = &add_function; + fun = AddType(5); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); +} + +TYPED_TEST_P(AnyInvTestMovable, ConversionAssignVoidCovariance) { + using VoidAnyInvType = typename TypeParam::VoidAnyInvType; + using AddType = typename TypeParam::AddType; + + VoidAnyInvType fun; + fun = AddType(5); + + EXPECT_TRUE(static_cast(fun)); +} + +template +class AnyInvTestNoexceptFalse : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse); + +TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionConstructionConstraints) { + using AnyInvType = typename TypeParam::AnyInvType; + + EXPECT_TRUE((std::is_constructible< + AnyInvType, + typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value)); + EXPECT_FALSE(( + std::is_constructible::value)); +} + +TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionAssignConstraints) { + using AnyInvType = typename TypeParam::AnyInvType; + + EXPECT_TRUE((std::is_assignable< + AnyInvType&, + typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value)); + EXPECT_FALSE( + (std::is_assignable::value)); +} + +template +class AnyInvTestNoexceptTrue : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue); + +TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionConstructionConstraints) { +#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L + GTEST_SKIP() << "Noexcept was not part of the type system before C++17."; +#else + using AnyInvType = typename TypeParam::AnyInvType; + + EXPECT_FALSE((std::is_constructible< + AnyInvType, + typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value)); + EXPECT_FALSE(( + std::is_constructible::value)); +#endif +} + +TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionAssignConstraints) { +#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L + GTEST_SKIP() << "Noexcept was not part of the type system before C++17."; +#else + using AnyInvType = typename TypeParam::AnyInvType; + + EXPECT_FALSE((std::is_assignable< + AnyInvType&, + typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value)); + EXPECT_FALSE( + (std::is_assignable::value)); +#endif +} + +template +class AnyInvTestNonRvalue : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(AnyInvTestNonRvalue); + +TYPED_TEST_P(AnyInvTestNonRvalue, ConversionConstructionReferenceWrapper) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AddType add(4); + AnyInvType fun = std::ref(add); + add.state = 5; + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); +} + +TYPED_TEST_P(AnyInvTestNonRvalue, NonMoveableResultType) { +#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L + GTEST_SKIP() << "Copy/move elision was not standard before C++17"; +#else + // Define a result type that cannot be copy- or move-constructed. + struct Result { + int x; + + explicit Result(const int x_in) : x(x_in) {} + Result(Result&&) = delete; + }; + + static_assert(!std::is_move_constructible::value, ""); + static_assert(!std::is_copy_constructible::value, ""); + + // Assumption check: it should nevertheless be possible to use functors that + // return a Result struct according to the language rules. + const auto return_17 = []() noexcept { return Result(17); }; + EXPECT_EQ(17, return_17().x); + + // Just like plain functors, it should work fine to use an AnyInvocable that + // returns the non-moveable type. + using UnqualifiedFun = + absl::conditional_t; + + using Fun = + GiveQualifiersToFun; + + AnyInvocable any_inv(return_17); + EXPECT_EQ(17, any_inv().x); +#endif +} + +TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperEmptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AddType add(4); + AnyInvType fun; + fun = std::ref(add); + add.state = 5; + EXPECT_TRUE( + (std::is_nothrow_assignable>::value)); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); +} + +TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperNonemptyLhs) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AddType add(4); + AnyInvType fun = &mult_function; + fun = std::ref(add); + add.state = 5; + EXPECT_TRUE( + (std::is_nothrow_assignable>::value)); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value); + + EXPECT_TRUE(static_cast(fun)); + EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value); +} + +template +class AnyInvTestRvalue : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(AnyInvTestRvalue); + +TYPED_TEST_P(AnyInvTestRvalue, ConversionConstructionReferenceWrapper) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + EXPECT_FALSE(( + std::is_convertible, AnyInvType>::value)); +} + +TYPED_TEST_P(AnyInvTestRvalue, NonMoveableResultType) { +#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L + GTEST_SKIP() << "Copy/move elision was not standard before C++17"; +#else + // Define a result type that cannot be copy- or move-constructed. + struct Result { + int x; + + explicit Result(const int x_in) : x(x_in) {} + Result(Result&&) = delete; + }; + + static_assert(!std::is_move_constructible::value, ""); + static_assert(!std::is_copy_constructible::value, ""); + + // Assumption check: it should nevertheless be possible to use functors that + // return a Result struct according to the language rules. + const auto return_17 = []() noexcept { return Result(17); }; + EXPECT_EQ(17, return_17().x); + + // Just like plain functors, it should work fine to use an AnyInvocable that + // returns the non-moveable type. + using UnqualifiedFun = + absl::conditional_t; + + using Fun = + GiveQualifiersToFun; + + EXPECT_EQ(17, AnyInvocable(return_17)().x); +#endif +} + +TYPED_TEST_P(AnyInvTestRvalue, ConversionAssignReferenceWrapper) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + EXPECT_FALSE(( + std::is_assignable>::value)); +} + +TYPED_TEST_P(AnyInvTestRvalue, NonConstCrashesOnSecondCall) { + using AnyInvType = typename TypeParam::AnyInvType; + using AddType = typename TypeParam::AddType; + + AnyInvType fun(absl::in_place_type, 5); + + EXPECT_TRUE(static_cast(fun)); + std::move(fun)(7, 8, 9); + + // Ensure we're still valid + EXPECT_TRUE(static_cast(fun)); // NOLINT(bugprone-use-after-move) + +#if !defined(NDEBUG) || ABSL_OPTION_HARDENED == 1 + EXPECT_DEATH_IF_SUPPORTED(std::move(fun)(7, 8, 9), ""); +#endif +} + +// Ensure that any qualifiers (in particular &&-qualifiers) do not affect +// when the destructor is actually run. +TYPED_TEST_P(AnyInvTestRvalue, QualifierIndependentObjectLifetime) { + using AnyInvType = typename TypeParam::AnyInvType; + + auto refs = std::make_shared(); + { + AnyInvType fun([refs](auto&&...) noexcept { return 0; }); + EXPECT_FALSE(refs.unique()); + + std::move(fun)(7, 8, 9); + + // Ensure destructor hasn't run even if rref-qualified + EXPECT_FALSE(refs.unique()); + } + EXPECT_TRUE(refs.unique()); +} + +// NOTE: This test suite originally attempted to enumerate all possible +// combinations of type properties but the build-time started getting too large. +// Instead, it is now assumed that certain parameters are orthogonal and so +// some combinations are elided. + +// A metafunction to form a TypeList of all cv and non-rvalue ref combinations, +// coupled with all of the other explicitly specified parameters. +template +using NonRvalueQualifiedTestParams = ::testing::Types< // + TestParams, // + TestParams, // + TestParams, // + TestParams>; + +// A metafunction to form a TypeList of const and non-const rvalue ref +// qualifiers, coupled with all of the other explicitly specified parameters. +template +using RvalueQualifiedTestParams = ::testing::Types< + TestParams, // + TestParams // + >; + +// All qualifier combinations and a noexcept function type +using TestParameterListNonRvalueQualifiersNothrowCall = + NonRvalueQualifiedTestParams; +using TestParameterListRvalueQualifiersNothrowCall = + RvalueQualifiedTestParams; + +// All qualifier combinations and a non-noexcept function type +using TestParameterListNonRvalueQualifiersCallMayThrow = + NonRvalueQualifiedTestParams; +using TestParameterListRvalueQualifiersCallMayThrow = + RvalueQualifiedTestParams; + +// Lists of various cases that should lead to remote storage +using TestParameterListRemoteMovable = ::testing::Types< + // "Normal" aligned types that are large and have trivial destructors + TestParams, // + TestParams, // + TestParams, // + TestParams, // + + // Same as above but with non-trivial destructors + TestParams, // + TestParams, // + TestParams, // + TestParams // + +// Dynamic memory allocation for over-aligned data was introduced in C++17. +// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0035r4.html +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + // Types that must use remote storage because of a large alignment. + , + TestParams, // + TestParams, // + TestParams, // + TestParams // +#endif + >; +using TestParameterListRemoteNonMovable = ::testing::Types< + // "Normal" aligned types that are large and have trivial destructors + TestParams, // + TestParams, // + // Same as above but with non-trivial destructors + TestParams, // + TestParams // + >; + +// Parameters that lead to local storage +using TestParameterListLocal = ::testing::Types< + // Types that meet the requirements and have trivial destructors + TestParams, // + TestParams, // + + // Same as above but with non-trivial destructors + TestParams, // + TestParams // + >; + +// All of the tests that are run for every possible combination of types. +REGISTER_TYPED_TEST_SUITE_P( + AnyInvTestBasic, DefaultConstruction, ConstructionNullptr, + ConstructionNullFunctionPtr, ConstructionNullMemberFunctionPtr, + ConstructionNullMemberObjectPtr, ConstructionMemberFunctionPtr, + ConstructionMemberObjectPtr, ConstructionFunctionReferenceDecay, + ConstructionCompatibleAnyInvocableEmpty, + ConstructionCompatibleAnyInvocableNonempty, InPlaceConstruction, + ConversionToBool, Invocation, InPlaceConstructionInitializerList, + InPlaceNullFunPtrConstruction, InPlaceNullFunPtrConstructionValueInit, + InPlaceNullMemFunPtrConstruction, InPlaceNullMemFunPtrConstructionValueInit, + InPlaceNullMemObjPtrConstruction, InPlaceNullMemObjPtrConstructionValueInit, + InPlaceVoidCovarianceConstruction, MoveConstructionFromEmpty, + MoveConstructionFromNonEmpty, ComparisonWithNullptrEmpty, + ComparisonWithNullptrNonempty, ResultType); + +INSTANTIATE_TYPED_TEST_SUITE_P( + NonRvalueCallMayThrow, AnyInvTestBasic, + TestParameterListNonRvalueQualifiersCallMayThrow); +INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestBasic, + TestParameterListRvalueQualifiersCallMayThrow); + +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestBasic, + TestParameterListRemoteMovable); +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestBasic, + TestParameterListRemoteNonMovable); + +INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestBasic, TestParameterListLocal); + +INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestBasic, + TestParameterListNonRvalueQualifiersNothrowCall); +INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestBasic, + TestParameterListRvalueQualifiersNothrowCall); + +// Tests for functions that take two operands. +REGISTER_TYPED_TEST_SUITE_P( + AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs, + MoveAssignEmptyLhsNonemptyRhs, MoveAssignNonemptyEmptyLhsRhs, + MoveAssignNonemptyLhsNonemptyRhs, SelfMoveAssignEmpty, + SelfMoveAssignNonempty, AssignNullptrEmptyLhs, + AssignNullFunctionPtrEmptyLhs, AssignNullMemberFunctionPtrEmptyLhs, + AssignNullMemberObjectPtrEmptyLhs, AssignMemberFunctionPtrEmptyLhs, + AssignMemberObjectPtrEmptyLhs, AssignFunctionReferenceDecayEmptyLhs, + AssignCompatibleAnyInvocableEmptyLhsEmptyRhs, + AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs, AssignNullptrNonemptyLhs, + AssignNullFunctionPtrNonemptyLhs, AssignNullMemberFunctionPtrNonemptyLhs, + AssignNullMemberObjectPtrNonemptyLhs, AssignMemberFunctionPtrNonemptyLhs, + AssignMemberObjectPtrNonemptyLhs, AssignFunctionReferenceDecayNonemptyLhs, + AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs, + AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs, SwapEmptyLhsEmptyRhs, + SwapEmptyLhsNonemptyRhs, SwapNonemptyLhsEmptyRhs, + SwapNonemptyLhsNonemptyRhs); + +INSTANTIATE_TYPED_TEST_SUITE_P( + NonRvalueCallMayThrow, AnyInvTestCombinatoric, + TestParameterListNonRvalueQualifiersCallMayThrow); +INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestCombinatoric, + TestParameterListRvalueQualifiersCallMayThrow); + +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestCombinatoric, + TestParameterListRemoteMovable); +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestCombinatoric, + TestParameterListRemoteNonMovable); + +INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestCombinatoric, + TestParameterListLocal); + +INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestCombinatoric, + TestParameterListNonRvalueQualifiersNothrowCall); +INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestCombinatoric, + TestParameterListRvalueQualifiersNothrowCall); + +REGISTER_TYPED_TEST_SUITE_P(AnyInvTestMovable, + ConversionConstructionUserDefinedType, + ConversionConstructionVoidCovariance, + ConversionAssignUserDefinedTypeEmptyLhs, + ConversionAssignUserDefinedTypeNonemptyLhs, + ConversionAssignVoidCovariance); + +INSTANTIATE_TYPED_TEST_SUITE_P( + NonRvalueCallMayThrow, AnyInvTestMovable, + TestParameterListNonRvalueQualifiersCallMayThrow); +INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestMovable, + TestParameterListRvalueQualifiersCallMayThrow); + +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestMovable, + TestParameterListRemoteMovable); + +INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestMovable, + TestParameterListLocal); + +INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestMovable, + TestParameterListNonRvalueQualifiersNothrowCall); +INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestMovable, + TestParameterListRvalueQualifiersNothrowCall); + +REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse, + ConversionConstructionConstraints, + ConversionAssignConstraints); + +INSTANTIATE_TYPED_TEST_SUITE_P( + NonRvalueCallMayThrow, AnyInvTestNoexceptFalse, + TestParameterListNonRvalueQualifiersCallMayThrow); +INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestNoexceptFalse, + TestParameterListRvalueQualifiersCallMayThrow); + +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNoexceptFalse, + TestParameterListRemoteMovable); +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNoexceptFalse, + TestParameterListRemoteNonMovable); + +INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNoexceptFalse, + TestParameterListLocal); + +REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue, + ConversionConstructionConstraints, + ConversionAssignConstraints); + +INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNoexceptTrue, + TestParameterListNonRvalueQualifiersNothrowCall); +INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestNoexceptTrue, + TestParameterListRvalueQualifiersNothrowCall); + +REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNonRvalue, + ConversionConstructionReferenceWrapper, + NonMoveableResultType, + ConversionAssignReferenceWrapperEmptyLhs, + ConversionAssignReferenceWrapperNonemptyLhs); + +INSTANTIATE_TYPED_TEST_SUITE_P( + NonRvalueCallMayThrow, AnyInvTestNonRvalue, + TestParameterListNonRvalueQualifiersCallMayThrow); + +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNonRvalue, + TestParameterListRemoteMovable); +INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNonRvalue, + TestParameterListRemoteNonMovable); + +INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNonRvalue, + TestParameterListLocal); + +INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNonRvalue, + TestParameterListNonRvalueQualifiersNothrowCall); + +REGISTER_TYPED_TEST_SUITE_P(AnyInvTestRvalue, + ConversionConstructionReferenceWrapper, + NonMoveableResultType, + ConversionAssignReferenceWrapper, + NonConstCrashesOnSecondCall, + QualifierIndependentObjectLifetime); + +INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestRvalue, + TestParameterListRvalueQualifiersCallMayThrow); + +INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestRvalue, + TestParameterListRvalueQualifiersNothrowCall); + +// Minimal SFINAE testing for platforms where we can't run the tests, but we can +// build binaries for. +static_assert( + std::is_convertible>::value, ""); +static_assert(!std::is_convertible>::value, + ""); + +#undef ABSL_INTERNAL_NOEXCEPT_SPEC + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/bind_front.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/bind_front.h index 5b47970e35..f9075bd1d5 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/bind_front.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/bind_front.h @@ -30,6 +30,10 @@ #ifndef ABSL_FUNCTIONAL_BIND_FRONT_H_ #define ABSL_FUNCTIONAL_BIND_FRONT_H_ +#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +#include // For std::bind_front. +#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L + #include "absl/functional/internal/front_binder.h" #include "absl/utility/utility.h" @@ -46,7 +50,8 @@ ABSL_NAMESPACE_BEGIN // specified. More importantly, it provides more reliable correctness guarantees // than `std::bind()`; while `std::bind()` will silently ignore passing more // parameters than expected, for example, `absl::bind_front()` will report such -// mis-uses as errors. +// mis-uses as errors. In C++20, `absl::bind_front` is replaced by +// `std::bind_front`. // // absl::bind_front(a...) can be seen as storing the results of // std::make_tuple(a...). @@ -170,6 +175,9 @@ ABSL_NAMESPACE_BEGIN // // Doesn't copy "hi". // absl::bind_front(Print, absl::string_view(hi))("Chuk"); // +#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +using std::bind_front; +#else // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L template constexpr functional_internal::bind_front_t bind_front( F&& func, BoundArgs&&... args) { @@ -177,6 +185,7 @@ constexpr functional_internal::bind_front_t bind_front( absl::in_place, absl::forward(func), absl::forward(args)...); } +#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref.h index 824e3cea9d..f9779607fb 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref.h @@ -69,7 +69,8 @@ class FunctionRef; // An `absl::FunctionRef` is a lightweight wrapper to any invokable object with // a compatible signature. Generally, an `absl::FunctionRef` should only be used // as an argument type and should be preferred as an argument over a const -// reference to a `std::function`. +// reference to a `std::function`. `absl::FunctionRef` itself does not allocate, +// although the wrapped invokable may. // // Example: // diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref_test.cc index 3aa5974587..412027cd2b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref_test.cc @@ -14,6 +14,7 @@ #include "absl/functional/function_ref.h" +#include #include #include "gmock/gmock.h" diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_type_benchmark.cc similarity index 78% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref_benchmark.cc rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_type_benchmark.cc index 045305bfef..03dc31d8cd 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_ref_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/function_type_benchmark.cc @@ -1,4 +1,4 @@ -// Copyright 2019 The Abseil Authors. +// Copyright 2022 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/functional/function_ref.h" - +#include #include +#include #include "benchmark/benchmark.h" #include "absl/base/attributes.h" +#include "absl/functional/any_invocable.h" +#include "absl/functional/function_ref.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -61,6 +63,12 @@ void BM_TrivialFunctionRef(benchmark::State& state) { } BENCHMARK(BM_TrivialFunctionRef); +void BM_TrivialAnyInvocable(benchmark::State& state) { + ConstructAndCallFunctionBenchmark>(state, + TrivialFunctor{}); +} +BENCHMARK(BM_TrivialAnyInvocable); + void BM_LargeStdFunction(benchmark::State& state) { ConstructAndCallFunctionBenchmark>(state, LargeFunctor{}); @@ -72,6 +80,13 @@ void BM_LargeFunctionRef(benchmark::State& state) { } BENCHMARK(BM_LargeFunctionRef); + +void BM_LargeAnyInvocable(benchmark::State& state) { + ConstructAndCallFunctionBenchmark>(state, + LargeFunctor{}); +} +BENCHMARK(BM_LargeAnyInvocable); + void BM_FunPtrStdFunction(benchmark::State& state) { ConstructAndCallFunctionBenchmark>(state, FreeFunction); } @@ -82,6 +97,11 @@ void BM_FunPtrFunctionRef(benchmark::State& state) { } BENCHMARK(BM_FunPtrFunctionRef); +void BM_FunPtrAnyInvocable(benchmark::State& state) { + ConstructAndCallFunctionBenchmark>(state, FreeFunction); +} +BENCHMARK(BM_FunPtrAnyInvocable); + // Doesn't include construction or copy overhead in the loop. template void CallFunctionBenchmark(benchmark::State& state, const Callable& c, @@ -113,6 +133,12 @@ void BM_TrivialArgsFunctionRef(benchmark::State& state) { } BENCHMARK(BM_TrivialArgsFunctionRef); +void BM_TrivialArgsAnyInvocable(benchmark::State& state) { + CallFunctionBenchmark>( + state, FunctorWithTrivialArgs{}, 1, 2, 3); +} +BENCHMARK(BM_TrivialArgsAnyInvocable); + struct FunctorWithNonTrivialArgs { void operator()(std::string a, std::string b, std::string c) const { benchmark::DoNotOptimize(&a); @@ -137,6 +163,14 @@ void BM_NonTrivialArgsFunctionRef(benchmark::State& state) { } BENCHMARK(BM_NonTrivialArgsFunctionRef); +void BM_NonTrivialArgsAnyInvocable(benchmark::State& state) { + std::string a, b, c; + CallFunctionBenchmark< + AnyInvocable>( + state, FunctorWithNonTrivialArgs{}, a, b, c); +} +BENCHMARK(BM_NonTrivialArgsAnyInvocable); + } // namespace ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/internal/any_invocable.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/internal/any_invocable.h new file mode 100644 index 0000000000..8fce4bf614 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/functional/internal/any_invocable.h @@ -0,0 +1,877 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Implementation details for `absl::AnyInvocable` + +#ifndef ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ +#define ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ + +//////////////////////////////////////////////////////////////////////////////// +// // +// This implementation of the proposed `any_invocable` uses an approach that // +// chooses between local storage and remote storage for the contained target // +// object based on the target object's size, alignment requirements, and // +// whether or not it has a nothrow move constructor. Additional optimizations // +// are performed when the object is a trivially copyable type [basic.types]. // +// // +// There are three datamembers per `AnyInvocable` instance // +// // +// 1) A union containing either // +// - A pointer to the target object referred to via a void*, or // +// - the target object, emplaced into a raw char buffer // +// // +// 2) A function pointer to a "manager" function operation that takes a // +// discriminator and logically branches to either perform a move operation // +// or destroy operation based on that discriminator. // +// // +// 3) A function pointer to an "invoker" function operation that invokes the // +// target object, directly returning the result. // +// // +// When in the logically empty state, the manager function is an empty // +// function and the invoker function is one that would be undefined-behavior // +// to call. // +// // +// An additional optimization is performed when converting from one // +// AnyInvocable to another where only the noexcept specification and/or the // +// cv/ref qualifiers of the function type differ. In these cases, the // +// conversion works by "moving the guts", similar to if they were the same // +// exact type, as opposed to having to perform an additional layer of // +// wrapping through remote storage. // +// // +//////////////////////////////////////////////////////////////////////////////// + +// IWYU pragma: private, include "absl/functional/any_invocable.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/invoke.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Helper macro used to prevent spelling `noexcept` in language versions older +// than C++17, where it is not part of the type system, in order to avoid +// compilation failures and internal compiler errors. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex) +#else +#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) +#endif + +// Defined in functional/any_invocable.h +template +class AnyInvocable; + +namespace internal_any_invocable { + +// Constants relating to the small-object-storage for AnyInvocable +enum StorageProperty : std::size_t { + kAlignment = alignof(std::max_align_t), // The alignment of the storage + kStorageSize = sizeof(void*) * 2 // The size of the storage +}; + +//////////////////////////////////////////////////////////////////////////////// +// +// A metafunction for checking if a type is an AnyInvocable instantiation. +// This is used during conversion operations. +template +struct IsAnyInvocable : std::false_type {}; + +template +struct IsAnyInvocable> : std::true_type {}; +// +//////////////////////////////////////////////////////////////////////////////// + +// A type trait that tells us whether or not a target function type should be +// stored locally in the small object optimization storage +template +using IsStoredLocally = std::integral_constant< + bool, sizeof(T) <= kStorageSize && alignof(T) <= kAlignment && + kAlignment % alignof(T) == 0 && + std::is_nothrow_move_constructible::value>; + +// An implementation of std::remove_cvref_t of C++20. +template +using RemoveCVRef = + typename std::remove_cv::type>::type; + +//////////////////////////////////////////////////////////////////////////////// +// +// An implementation of the C++ standard INVOKE pseudo-macro, operation is +// equivalent to std::invoke except that it forces an implicit conversion to the +// specified return type. If "R" is void, the function is executed and the +// return value is simply ignored. +template ::value>> +void InvokeR(F&& f, P&&... args) { + absl::base_internal::invoke(std::forward(f), std::forward

(args)...); +} + +template ::value, int> = 0> +ReturnType InvokeR(F&& f, P&&... args) { + return absl::base_internal::invoke(std::forward(f), + std::forward

(args)...); +} + +// +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// +/// +// A metafunction that takes a "T" corresponding to a parameter type of the +// user's specified function type, and yields the parameter type to use for the +// type-erased invoker. In order to prevent observable moves, this must be +// either a reference or, if the type is trivial, the original parameter type +// itself. Since the parameter type may be incomplete at the point that this +// metafunction is used, we can only do this optimization for scalar types +// rather than for any trivial type. +template +T ForwardImpl(std::true_type); + +template +T&& ForwardImpl(std::false_type); + +// NOTE: We deliberately use an intermediate struct instead of a direct alias, +// as a workaround for b/206991861 on MSVC versions < 1924. +template +struct ForwardedParameter { + using type = decltype(( + ForwardImpl)(std::integral_constant::value>())); +}; + +template +using ForwardedParameterType = typename ForwardedParameter::type; +// +//////////////////////////////////////////////////////////////////////////////// + +// A discriminator when calling the "manager" function that describes operation +// type-erased operation should be invoked. +// +// "relocate_from_to" specifies that the manager should perform a move. +// +// "dispose" specifies that the manager should perform a destroy. +enum class FunctionToCall : bool { relocate_from_to, dispose }; + +// The portion of `AnyInvocable` state that contains either a pointer to the +// target object or the object itself in local storage +union TypeErasedState { + struct { + // A pointer to the type-erased object when remotely stored + void* target; + // The size of the object for `RemoteManagerTrivial` + std::size_t size; + } remote; + + // Local-storage for the type-erased object when small and trivial enough + alignas(kAlignment) char storage[kStorageSize]; +}; + +// A typed accessor for the object in `TypeErasedState` storage +template +T& ObjectInLocalStorage(TypeErasedState* const state) { + // We launder here because the storage may be reused with the same type. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + return *std::launder(reinterpret_cast(&state->storage)); +#elif ABSL_HAVE_BUILTIN(__builtin_launder) + return *__builtin_launder(reinterpret_cast(&state->storage)); +#else + + // When `std::launder` or equivalent are not available, we rely on undefined + // behavior, which works as intended on Abseil's officially supported + // platforms as of Q2 2022. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#pragma GCC diagnostic push +#endif + return *reinterpret_cast(&state->storage); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + +#endif +} + +// The type for functions issuing lifetime-related operations: move and dispose +// A pointer to such a function is contained in each `AnyInvocable` instance. +// NOTE: When specifying `FunctionToCall::`dispose, the same state must be +// passed as both "from" and "to". +using ManagerType = void(FunctionToCall /*operation*/, + TypeErasedState* /*from*/, TypeErasedState* /*to*/) + ABSL_INTERNAL_NOEXCEPT_SPEC(true); + +// The type for functions issuing the actual invocation of the object +// A pointer to such a function is contained in each AnyInvocable instance. +template +using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType

...) + ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept); + +// The manager that is used when AnyInvocable is empty +inline void EmptyManager(FunctionToCall /*operation*/, + TypeErasedState* /*from*/, + TypeErasedState* /*to*/) noexcept {} + +// The manager that is used when a target function is in local storage and is +// a trivially copyable type. +inline void LocalManagerTrivial(FunctionToCall /*operation*/, + TypeErasedState* const from, + TypeErasedState* const to) noexcept { + // This single statement without branching handles both possible operations. + // + // For FunctionToCall::dispose, "from" and "to" point to the same state, and + // so this assignment logically would do nothing. + // + // Note: Correctness here relies on http://wg21.link/p0593, which has only + // become standard in C++20, though implementations do not break it in + // practice for earlier versions of C++. + // + // The correct way to do this without that paper is to first placement-new a + // default-constructed T in "to->storage" prior to the memmove, but doing so + // requires a different function to be created for each T that is stored + // locally, which can cause unnecessary bloat and be less cache friendly. + *to = *from; + + // Note: Because the type is trivially copyable, the destructor does not need + // to be called ("trivially copyable" requires a trivial destructor). +} + +// The manager that is used when a target function is in local storage and is +// not a trivially copyable type. +template +void LocalManagerNontrivial(FunctionToCall operation, + TypeErasedState* const from, + TypeErasedState* const to) noexcept { + static_assert(IsStoredLocally::value, + "Local storage must only be used for supported types."); + static_assert(!std::is_trivially_copyable::value, + "Locally stored types must be trivially copyable."); + + T& from_object = (ObjectInLocalStorage)(from); + + switch (operation) { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + ::new (static_cast(&to->storage)) T(std::move(from_object)); + ABSL_FALLTHROUGH_INTENDED; + case FunctionToCall::dispose: + from_object.~T(); // Must not throw. // NOLINT + return; + } + ABSL_INTERNAL_UNREACHABLE; +} + +// The invoker that is used when a target function is in local storage +// Note: QualTRef here is the target function type along with cv and reference +// qualifiers that must be used when calling the function. +template +ReturnType LocalInvoker( + TypeErasedState* const state, + ForwardedParameterType

... args) noexcept(SigIsNoexcept) { + using RawT = RemoveCVRef; + static_assert( + IsStoredLocally::value, + "Target object must be in local storage in order to be invoked from it."); + + auto& f = (ObjectInLocalStorage)(state); + return (InvokeR)(static_cast(f), + static_cast>(args)...); +} + +// The manager that is used when a target function is in remote storage and it +// has a trivial destructor +inline void RemoteManagerTrivial(FunctionToCall operation, + TypeErasedState* const from, + TypeErasedState* const to) noexcept { + switch (operation) { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + to->remote = from->remote; + return; + case FunctionToCall::dispose: +#if defined(__cpp_sized_deallocation) + ::operator delete(from->remote.target, from->remote.size); +#else // __cpp_sized_deallocation + ::operator delete(from->remote.target); +#endif // __cpp_sized_deallocation + return; + } + ABSL_INTERNAL_UNREACHABLE; +} + +// The manager that is used when a target function is in remote storage and the +// destructor of the type is not trivial +template +void RemoteManagerNontrivial(FunctionToCall operation, + TypeErasedState* const from, + TypeErasedState* const to) noexcept { + static_assert(!IsStoredLocally::value, + "Remote storage must only be used for types that do not " + "qualify for local storage."); + + switch (operation) { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + to->remote.target = from->remote.target; + return; + case FunctionToCall::dispose: + ::delete static_cast(from->remote.target); // Must not throw. + return; + } + ABSL_INTERNAL_UNREACHABLE; +} + +// The invoker that is used when a target function is in remote storage +template +ReturnType RemoteInvoker( + TypeErasedState* const state, + ForwardedParameterType

... args) noexcept(SigIsNoexcept) { + using RawT = RemoveCVRef; + static_assert(!IsStoredLocally::value, + "Target object must be in remote storage in order to be " + "invoked from it."); + + auto& f = *static_cast(state->remote.target); + return (InvokeR)(static_cast(f), + static_cast>(args)...); +} + +//////////////////////////////////////////////////////////////////////////////// +// +// A metafunction that checks if a type T is an instantiation of +// absl::in_place_type_t (needed for constructor constraints of AnyInvocable). +template +struct IsInPlaceType : std::false_type {}; + +template +struct IsInPlaceType> : std::true_type {}; +// +//////////////////////////////////////////////////////////////////////////////// + +// A constructor name-tag used with CoreImpl (below) to request the +// conversion-constructor. QualDecayedTRef is the decayed-type of the object to +// wrap, along with the cv and reference qualifiers that must be applied when +// performing an invocation of the wrapped object. +template +struct TypedConversionConstruct {}; + +// A helper base class for all core operations of AnyInvocable. Most notably, +// this class creates the function call operator and constraint-checkers so that +// the top-level class does not have to be a series of partial specializations. +// +// Note: This definition exists (as opposed to being a declaration) so that if +// the user of the top-level template accidentally passes a template argument +// that is not a function type, they will get a static_assert in AnyInvocable's +// class body rather than an error stating that Impl is not defined. +template +class Impl {}; // Note: This is partially-specialized later. + +// A std::unique_ptr deleter that deletes memory allocated via ::operator new. +#if defined(__cpp_sized_deallocation) +class TrivialDeleter { + public: + explicit TrivialDeleter(std::size_t size) : size_(size) {} + + void operator()(void* target) const { + ::operator delete(target, size_); + } + + private: + std::size_t size_; +}; +#else // __cpp_sized_deallocation +class TrivialDeleter { + public: + explicit TrivialDeleter(std::size_t) {} + + void operator()(void* target) const { ::operator delete(target); } +}; +#endif // __cpp_sized_deallocation + +template +class CoreImpl; + +constexpr bool IsCompatibleConversion(void*, void*) { return false; } +template +constexpr bool IsCompatibleConversion(CoreImpl*, + CoreImpl*) { + return !NoExceptDest || NoExceptSrc; +} + +// A helper base class for all core operations of AnyInvocable that do not +// depend on the cv/ref qualifiers of the function type. +template +class CoreImpl { + public: + using result_type = ReturnType; + + CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {} + + enum class TargetType : int { + kPointer = 0, + kCompatibleAnyInvocable = 1, + kIncompatibleAnyInvocable = 2, + kOther = 3, + }; + + // Note: QualDecayedTRef here includes the cv-ref qualifiers associated with + // the invocation of the Invocable. The unqualified type is the target object + // type to be stored. + template + explicit CoreImpl(TypedConversionConstruct, F&& f) { + using DecayedT = RemoveCVRef; + + constexpr TargetType kTargetType = + (std::is_pointer::value || + std::is_member_pointer::value) + ? TargetType::kPointer + : IsCompatibleAnyInvocable::value + ? TargetType::kCompatibleAnyInvocable + : IsAnyInvocable::value + ? TargetType::kIncompatibleAnyInvocable + : TargetType::kOther; + // NOTE: We only use integers instead of enums as template parameters in + // order to work around a bug on C++14 under MSVC 2017. + // See b/236131881. + Initialize(kTargetType), QualDecayedTRef>( + std::forward(f)); + } + + // Note: QualTRef here includes the cv-ref qualifiers associated with the + // invocation of the Invocable. The unqualified type is the target object + // type to be stored. + template + explicit CoreImpl(absl::in_place_type_t, Args&&... args) { + InitializeStorage(std::forward(args)...); + } + + CoreImpl(CoreImpl&& other) noexcept { + other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); + manager_ = other.manager_; + invoker_ = other.invoker_; + other.manager_ = EmptyManager; + other.invoker_ = nullptr; + } + + CoreImpl& operator=(CoreImpl&& other) noexcept { + // Put the left-hand operand in an empty state. + // + // Note: A full reset that leaves us with an object that has its invariants + // intact is necessary in order to handle self-move. This is required by + // types that are used with certain operations of the standard library, such + // as the default definition of std::swap when both operands target the same + // object. + Clear(); + + // Perform the actual move/destory operation on the target function. + other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); + manager_ = other.manager_; + invoker_ = other.invoker_; + other.manager_ = EmptyManager; + other.invoker_ = nullptr; + + return *this; + } + + ~CoreImpl() { manager_(FunctionToCall::dispose, &state_, &state_); } + + // Check whether or not the AnyInvocable is in the empty state. + bool HasValue() const { return invoker_ != nullptr; } + + // Effects: Puts the object into its empty state. + void Clear() { + manager_(FunctionToCall::dispose, &state_, &state_); + manager_ = EmptyManager; + invoker_ = nullptr; + } + + template = 0> + void Initialize(F&& f) { +// This condition handles types that decay into pointers, which includes +// function references. Since function references cannot be null, GCC warns +// against comparing their decayed form with nullptr. +// Since this is template-heavy code, we prefer to disable these warnings +// locally instead of adding yet another overload of this function. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Waddress" +#pragma GCC diagnostic ignored "-Wnonnull-compare" +#pragma GCC diagnostic push +#endif + if (static_cast>(f) == nullptr) { +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + manager_ = EmptyManager; + invoker_ = nullptr; + return; + } + InitializeStorage(std::forward(f)); + } + + template = 0> + void Initialize(F&& f) { + // In this case we can "steal the guts" of the other AnyInvocable. + f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_); + manager_ = f.manager_; + invoker_ = f.invoker_; + + f.manager_ = EmptyManager; + f.invoker_ = nullptr; + } + + template = 0> + void Initialize(F&& f) { + if (f.HasValue()) { + InitializeStorage(std::forward(f)); + } else { + manager_ = EmptyManager; + invoker_ = nullptr; + } + } + + template > + void Initialize(F&& f) { + InitializeStorage(std::forward(f)); + } + + // Use local (inline) storage for applicable target object types. + template >::value>> + void InitializeStorage(Args&&... args) { + using RawT = RemoveCVRef; + ::new (static_cast(&state_.storage)) + RawT(std::forward(args)...); + + invoker_ = LocalInvoker; + // We can simplify our manager if we know the type is trivially copyable. + InitializeLocalManager(); + } + + // Use remote storage for target objects that cannot be stored locally. + template >::value, + int> = 0> + void InitializeStorage(Args&&... args) { + InitializeRemoteManager>(std::forward(args)...); + // This is set after everything else in case an exception is thrown in an + // earlier step of the initialization. + invoker_ = RemoteInvoker; + } + + template ::value>> + void InitializeLocalManager() { + manager_ = LocalManagerTrivial; + } + + template ::value, int> = 0> + void InitializeLocalManager() { + manager_ = LocalManagerNontrivial; + } + + template + using HasTrivialRemoteStorage = + std::integral_constant::value && + alignof(T) <= + ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>; + + template ::value>> + void InitializeRemoteManager(Args&&... args) { + // unique_ptr is used for exception-safety in case construction throws. + std::unique_ptr uninitialized_target( + ::operator new(sizeof(T)), TrivialDeleter(sizeof(T))); + ::new (uninitialized_target.get()) T(std::forward(args)...); + state_.remote.target = uninitialized_target.release(); + state_.remote.size = sizeof(T); + manager_ = RemoteManagerTrivial; + } + + template ::value, int> = 0> + void InitializeRemoteManager(Args&&... args) { + state_.remote.target = ::new T(std::forward(args)...); + manager_ = RemoteManagerNontrivial; + } + + ////////////////////////////////////////////////////////////////////////////// + // + // Type trait to determine if the template argument is an AnyInvocable whose + // function type is compatible enough with ours such that we can + // "move the guts" out of it when moving, rather than having to place a new + // object into remote storage. + + template + struct IsCompatibleAnyInvocable { + static constexpr bool value = false; + }; + + template + struct IsCompatibleAnyInvocable> { + static constexpr bool value = + (IsCompatibleConversion)(static_cast< + typename AnyInvocable::CoreImpl*>( + nullptr), + static_cast(nullptr)); + }; + + // + ////////////////////////////////////////////////////////////////////////////// + + TypeErasedState state_; + ManagerType* manager_; + InvokerType* invoker_; +}; + +// A constructor name-tag used with Impl to request the +// conversion-constructor +struct ConversionConstruct {}; + +//////////////////////////////////////////////////////////////////////////////// +// +// A metafunction that is normally an identity metafunction except that when +// given a std::reference_wrapper, it yields T&. This is necessary because +// currently std::reference_wrapper's operator() is not conditionally noexcept, +// so when checking if such an Invocable is nothrow-invocable, we must pull out +// the underlying type. +template +struct UnwrapStdReferenceWrapperImpl { + using type = T; +}; + +template +struct UnwrapStdReferenceWrapperImpl> { + using type = T&; +}; + +template +using UnwrapStdReferenceWrapper = + typename UnwrapStdReferenceWrapperImpl::type; +// +//////////////////////////////////////////////////////////////////////////////// + +// An alias that always yields std::true_type (used with constraints) where +// substitution failures happen when forming the template arguments. +template +using TrueAlias = + std::integral_constant*) != 0>; + +/*SFINAE constraints for the conversion-constructor.*/ +template , AnyInvocable>::value>> +using CanConvert = TrueAlias< + absl::enable_if_t>::value>, + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, F>::value>>; + +/*SFINAE constraints for the std::in_place constructors.*/ +template +using CanEmplace = TrueAlias< + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, Args...>::value>>; + +/*SFINAE constraints for the conversion-assign operator.*/ +template , AnyInvocable>::value>> +using CanAssign = TrueAlias< + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, F>::value>>; + +/*SFINAE constraints for the reference-wrapper conversion-assign operator.*/ +template +using CanAssignReferenceWrapper = TrueAlias< + absl::enable_if_t< + Impl::template CallIsValid>::value>, + absl::enable_if_t::template CallIsNoexceptIfSigIsNoexcept< + std::reference_wrapper>::value>>; + +//////////////////////////////////////////////////////////////////////////////// +// +// The constraint for checking whether or not a call meets the noexcept +// callability requirements. This is a preprocessor macro because specifying it +// this way as opposed to a disjunction/branch can improve the user-side error +// messages and avoids an instantiation of std::is_nothrow_invocable_r in the +// cases where the user did not specify a noexcept function type. +// +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \ + ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals) + +// The disjunction below is because we can't rely on std::is_nothrow_invocable_r +// to give the right result when ReturnType is non-moveable in toolchains that +// don't treat non-moveable result types correctly. For example this was the +// case in libc++ before commit c3a24882 (2022-05). +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals) \ + absl::enable_if_t> inv_quals, \ + P...>, \ + std::conjunction< \ + std::is_nothrow_invocable< \ + UnwrapStdReferenceWrapper> inv_quals, P...>, \ + std::is_same< \ + ReturnType, \ + absl::base_internal::invoke_result_t< \ + UnwrapStdReferenceWrapper> inv_quals, \ + P...>>>>::value> + +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false(inv_quals) +// +//////////////////////////////////////////////////////////////////////////////// + +// A macro to generate partial specializations of Impl with the different +// combinations of supported cv/reference qualifiers and noexcept specifier. +// +// Here, `cv` are the cv-qualifiers if any, `ref` is the ref-qualifier if any, +// inv_quals is the reference type to be used when invoking the target, and +// noex is "true" if the function type is noexcept, or false if it is not. +// +// The CallIsValid condition is more complicated than simply using +// absl::base_internal::is_invocable_r because we can't rely on it to give the +// right result when ReturnType is non-moveable in toolchains that don't treat +// non-moveable result types correctly. For example this was the case in libc++ +// before commit c3a24882 (2022-05). +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \ + template \ + class Impl \ + : public CoreImpl { \ + public: \ + /*The base class, which contains the datamembers and core operations*/ \ + using Core = CoreImpl; \ + \ + /*SFINAE constraint to check if F is invocable with the proper signature*/ \ + template \ + using CallIsValid = TrueAlias inv_quals, P...>, \ + std::is_same inv_quals, P...>>>::value>>; \ + \ + /*SFINAE constraint to check if F is nothrow-invocable when necessary*/ \ + template \ + using CallIsNoexceptIfSigIsNoexcept = \ + TrueAlias; \ + \ + /*Put the AnyInvocable into an empty state.*/ \ + Impl() = default; \ + \ + /*The implementation of a conversion-constructor from "f*/ \ + /*This forwards to Core, attaching inv_quals so that the base class*/ \ + /*knows how to properly type-erase the invocation.*/ \ + template \ + explicit Impl(ConversionConstruct, F&& f) \ + : Core(TypedConversionConstruct< \ + typename std::decay::type inv_quals>(), \ + std::forward(f)) {} \ + \ + /*Forward along the in-place construction parameters.*/ \ + template \ + explicit Impl(absl::in_place_type_t, Args&&... args) \ + : Core(absl::in_place_type inv_quals>, \ + std::forward(args)...) {} \ + \ + InvokerType* ExtractInvoker() cv { \ + using QualifiedTestType = int cv ref; \ + auto* invoker = this->invoker_; \ + if (!std::is_const::value && \ + std::is_rvalue_reference::value) { \ + ABSL_HARDENING_ASSERT([this]() { \ + /* We checked that this isn't const above, so const_cast is safe */ \ + const_cast(this)->invoker_ = \ + [](TypeErasedState*, \ + ForwardedParameterType

+ * + * opus_encode() and opus_encode_float() return the number of bytes actually written to the packet. + * The return value can be negative, which indicates that an error has occurred. If the return value + * is 2 bytes or less, then the packet does not need to be transmitted (DTX). + * + * Once the encoder state if no longer needed, it can be destroyed with + * + * @code + * opus_encoder_destroy(enc); + * @endcode + * + * If the encoder was created with opus_encoder_init() rather than opus_encoder_create(), + * then no action is required aside from potentially freeing the memory that was manually + * allocated for it (calling free(enc) for the example above) + * + */ + +/** Opus encoder state. + * This contains the complete state of an Opus encoder. + * It is position independent and can be freely copied. + * @see opus_encoder_create,opus_encoder_init + */ +typedef struct OpusEncoder OpusEncoder; + +/** Gets the size of an OpusEncoder structure. + * @param[in] channels int: Number of channels. + * This must be 1 or 2. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_encoder_get_size(int channels); + +/** + */ + +/** Allocates and initializes an encoder state. + * There are three coding modes: + * + * @ref OPUS_APPLICATION_VOIP gives best quality at a given bitrate for voice + * signals. It enhances the input signal by high-pass filtering and + * emphasizing formants and harmonics. Optionally it includes in-band + * forward error correction to protect against packet loss. Use this + * mode for typical VoIP applications. Because of the enhancement, + * even at high bitrates the output may sound different from the input. + * + * @ref OPUS_APPLICATION_AUDIO gives best quality at a given bitrate for most + * non-voice signals like music. Use this mode for music and mixed + * (music/voice) content, broadcast, and applications requiring less + * than 15 ms of coding delay. + * + * @ref OPUS_APPLICATION_RESTRICTED_LOWDELAY configures low-delay mode that + * disables the speech-optimized mode in exchange for slightly reduced delay. + * This mode can only be set on an newly initialized or freshly reset encoder + * because it changes the codec delay. + * + * This is useful when the caller knows that the speech-optimized modes will not be needed (use with caution). + * @param [in] Fs opus_int32: Sampling rate of input signal (Hz) + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) in input signal + * @param [in] application int: Coding mode (@ref OPUS_APPLICATION_VOIP/@ref OPUS_APPLICATION_AUDIO/@ref OPUS_APPLICATION_RESTRICTED_LOWDELAY) + * @param [out] error int*: @ref opus_errorcodes + * @note Regardless of the sampling rate and number channels selected, the Opus encoder + * can switch to a lower audio bandwidth or number of channels if the bitrate + * selected is too low. This also means that it is safe to always use 48 kHz stereo input + * and let the encoder optimize the encoding. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusEncoder *opus_encoder_create( + opus_int32 Fs, + int channels, + int application, + int *error +); + +/** Initializes a previously allocated encoder state + * The memory pointed to by st must be at least the size returned by opus_encoder_get_size(). + * This is intended for applications which use their own allocator instead of malloc. + * @see opus_encoder_create(),opus_encoder_get_size() + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] Fs opus_int32: Sampling rate of input signal (Hz) + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) in input signal + * @param [in] application int: Coding mode (OPUS_APPLICATION_VOIP/OPUS_APPLICATION_AUDIO/OPUS_APPLICATION_RESTRICTED_LOWDELAY) + * @retval #OPUS_OK Success or @ref opus_errorcodes + */ +OPUS_EXPORT int opus_encoder_init( + OpusEncoder *st, + opus_int32 Fs, + int channels, + int application +) OPUS_ARG_NONNULL(1); + +/** Encodes an Opus frame. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] pcm opus_int16*: Input signal (interleaved if 2 channels). length is frame_size*channels*sizeof(opus_int16) + * @param [in] frame_size int: Number of samples per channel in the + * input signal. + * This must be an Opus frame size for + * the encoder's sampling rate. + * For example, at 48 kHz the permitted + * values are 120, 240, 480, 960, 1920, + * and 2880. + * Passing in a duration of less than + * 10 ms (480 samples at 48 kHz) will + * prevent the encoder from using the LPC + * or hybrid modes. + * @param [out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_encode( + OpusEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Encodes an Opus frame from floating point input. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] pcm float*: Input in float format (interleaved if 2 channels), with a normal range of +/-1.0. + * Samples with a range beyond +/-1.0 are supported but will + * be clipped by decoders using the integer API and should + * only be used if it is known that the far end supports + * extended dynamic range. + * length is frame_size*channels*sizeof(float) + * @param [in] frame_size int: Number of samples per channel in the + * input signal. + * This must be an Opus frame size for + * the encoder's sampling rate. + * For example, at 48 kHz the permitted + * values are 120, 240, 480, 960, 1920, + * and 2880. + * Passing in a duration of less than + * 10 ms (480 samples at 48 kHz) will + * prevent the encoder from using the LPC + * or hybrid modes. + * @param [out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_encode_float( + OpusEncoder *st, + const float *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Frees an OpusEncoder allocated by opus_encoder_create(). + * @param[in] st OpusEncoder*: State to be freed. + */ +OPUS_EXPORT void opus_encoder_destroy(OpusEncoder *st); + +/** Perform a CTL function on an Opus encoder. + * + * Generally the request and subsequent arguments are generated + * by a convenience macro. + * @param st OpusEncoder*: Encoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls or + * @ref opus_encoderctls. + * @see opus_genericctls + * @see opus_encoderctls + */ +OPUS_EXPORT int opus_encoder_ctl(OpusEncoder *st, int request, ...) OPUS_ARG_NONNULL(1); +/**@}*/ + +/** @defgroup opus_decoder Opus Decoder + * @{ + * + * @brief This page describes the process and functions used to decode Opus. + * + * The decoding process also starts with creating a decoder + * state. This can be done with: + * @code + * int error; + * OpusDecoder *dec; + * dec = opus_decoder_create(Fs, channels, &error); + * @endcode + * where + * @li Fs is the sampling rate and must be 8000, 12000, 16000, 24000, or 48000 + * @li channels is the number of channels (1 or 2) + * @li error will hold the error code in case of failure (or #OPUS_OK on success) + * @li the return value is a newly created decoder state to be used for decoding + * + * While opus_decoder_create() allocates memory for the state, it's also possible + * to initialize pre-allocated memory: + * @code + * int size; + * int error; + * OpusDecoder *dec; + * size = opus_decoder_get_size(channels); + * dec = malloc(size); + * error = opus_decoder_init(dec, Fs, channels); + * @endcode + * where opus_decoder_get_size() returns the required size for the decoder state. Note that + * future versions of this code may change the size, so no assuptions should be made about it. + * + * The decoder state is always continuous in memory and only a shallow copy is sufficient + * to copy it (e.g. memcpy()) + * + * To decode a frame, opus_decode() or opus_decode_float() must be called with a packet of compressed audio data: + * @code + * frame_size = opus_decode(dec, packet, len, decoded, max_size, 0); + * @endcode + * where + * + * @li packet is the byte array containing the compressed data + * @li len is the exact number of bytes contained in the packet + * @li decoded is the decoded audio data in opus_int16 (or float for opus_decode_float()) + * @li max_size is the max duration of the frame in samples (per channel) that can fit into the decoded_frame array + * + * opus_decode() and opus_decode_float() return the number of samples (per channel) decoded from the packet. + * If that value is negative, then an error has occurred. This can occur if the packet is corrupted or if the audio + * buffer is too small to hold the decoded audio. + * + * Opus is a stateful codec with overlapping blocks and as a result Opus + * packets are not coded independently of each other. Packets must be + * passed into the decoder serially and in the correct order for a correct + * decode. Lost packets can be replaced with loss concealment by calling + * the decoder with a null pointer and zero length for the missing packet. + * + * A single codec state may only be accessed from a single thread at + * a time and any required locking must be performed by the caller. Separate + * streams must be decoded with separate decoder states and can be decoded + * in parallel unless the library was compiled with NONTHREADSAFE_PSEUDOSTACK + * defined. + * + */ + +/** Opus decoder state. + * This contains the complete state of an Opus decoder. + * It is position independent and can be freely copied. + * @see opus_decoder_create,opus_decoder_init + */ +typedef struct OpusDecoder OpusDecoder; + +/** Gets the size of an OpusDecoder structure. + * @param [in] channels int: Number of channels. + * This must be 1 or 2. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decoder_get_size(int channels); + +/** Allocates and initializes a decoder state. + * @param [in] Fs opus_int32: Sample rate to decode at (Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) to decode + * @param [out] error int*: #OPUS_OK Success or @ref opus_errorcodes + * + * Internally Opus stores data at 48000 Hz, so that should be the default + * value for Fs. However, the decoder can efficiently decode to buffers + * at 8, 12, 16, and 24 kHz so if for some reason the caller cannot use + * data at the full sample rate, or knows the compressed data doesn't + * use the full frequency range, it can request decoding at a reduced + * rate. Likewise, the decoder is capable of filling in either mono or + * interleaved stereo pcm buffers, at the caller's request. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusDecoder *opus_decoder_create( + opus_int32 Fs, + int channels, + int *error +); + +/** Initializes a previously allocated decoder state. + * The state must be at least the size returned by opus_decoder_get_size(). + * This is intended for applications which use their own allocator instead of malloc. @see opus_decoder_create,opus_decoder_get_size + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @param [in] st OpusDecoder*: Decoder state. + * @param [in] Fs opus_int32: Sampling rate to decode to (Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) to decode + * @retval #OPUS_OK Success or @ref opus_errorcodes + */ +OPUS_EXPORT int opus_decoder_init( + OpusDecoder *st, + opus_int32 Fs, + int channels +) OPUS_ARG_NONNULL(1); + +/** Decode an Opus packet. + * @param [in] st OpusDecoder*: Decoder state + * @param [in] data char*: Input payload. Use a NULL pointer to indicate packet loss + * @param [in] len opus_int32: Number of bytes in payload* + * @param [out] pcm opus_int16*: Output signal (interleaved if 2 channels). length + * is frame_size*channels*sizeof(opus_int16) + * @param [in] frame_size Number of samples per channel of available space in \a pcm. + * If this is less than the maximum packet duration (120ms; 5760 for 48kHz), this function will + * not be capable of decoding some packets. In the case of PLC (data==NULL) or FEC (decode_fec=1), + * then frame_size needs to be exactly the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the next incoming packet. For the PLC and + * FEC cases, frame_size must be a multiple of 2.5 ms. + * @param [in] decode_fec int: Flag (0 or 1) to request that any in-band forward error correction data be + * decoded. If no such data is available, the frame is decoded as if it were lost. + * @returns Number of decoded samples or @ref opus_errorcodes + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decode( + OpusDecoder *st, + const unsigned char *data, + opus_int32 len, + opus_int16 *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Decode an Opus packet with floating point output. + * @param [in] st OpusDecoder*: Decoder state + * @param [in] data char*: Input payload. Use a NULL pointer to indicate packet loss + * @param [in] len opus_int32: Number of bytes in payload + * @param [out] pcm float*: Output signal (interleaved if 2 channels). length + * is frame_size*channels*sizeof(float) + * @param [in] frame_size Number of samples per channel of available space in \a pcm. + * If this is less than the maximum packet duration (120ms; 5760 for 48kHz), this function will + * not be capable of decoding some packets. In the case of PLC (data==NULL) or FEC (decode_fec=1), + * then frame_size needs to be exactly the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the next incoming packet. For the PLC and + * FEC cases, frame_size must be a multiple of 2.5 ms. + * @param [in] decode_fec int: Flag (0 or 1) to request that any in-band forward error correction data be + * decoded. If no such data is available the frame is decoded as if it were lost. + * @returns Number of decoded samples or @ref opus_errorcodes + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decode_float( + OpusDecoder *st, + const unsigned char *data, + opus_int32 len, + float *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Perform a CTL function on an Opus decoder. + * + * Generally the request and subsequent arguments are generated + * by a convenience macro. + * @param st OpusDecoder*: Decoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls or + * @ref opus_decoderctls. + * @see opus_genericctls + * @see opus_decoderctls + */ +OPUS_EXPORT int opus_decoder_ctl(OpusDecoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/** Frees an OpusDecoder allocated by opus_decoder_create(). + * @param[in] st OpusDecoder*: State to be freed. + */ +OPUS_EXPORT void opus_decoder_destroy(OpusDecoder *st); + +/** Parse an opus packet into one or more frames. + * Opus_decode will perform this operation internally so most applications do + * not need to use this function. + * This function does not copy the frames, the returned pointers are pointers into + * the input packet. + * @param [in] data char*: Opus packet to be parsed + * @param [in] len opus_int32: size of data + * @param [out] out_toc char*: TOC pointer + * @param [out] frames char*[48] encapsulated frames + * @param [out] size opus_int16[48] sizes of the encapsulated frames + * @param [out] payload_offset int*: returns the position of the payload within the packet (in bytes) + * @returns number of frames + */ +OPUS_EXPORT int opus_packet_parse( + const unsigned char *data, + opus_int32 len, + unsigned char *out_toc, + const unsigned char *frames[48], + opus_int16 size[48], + int *payload_offset +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(5); + +/** Gets the bandwidth of an Opus packet. + * @param [in] data char*: Opus packet + * @retval OPUS_BANDWIDTH_NARROWBAND Narrowband (4kHz bandpass) + * @retval OPUS_BANDWIDTH_MEDIUMBAND Mediumband (6kHz bandpass) + * @retval OPUS_BANDWIDTH_WIDEBAND Wideband (8kHz bandpass) + * @retval OPUS_BANDWIDTH_SUPERWIDEBAND Superwideband (12kHz bandpass) + * @retval OPUS_BANDWIDTH_FULLBAND Fullband (20kHz bandpass) + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_bandwidth(const unsigned char *data) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples per frame from an Opus packet. + * @param [in] data char*: Opus packet. + * This must contain at least one byte of + * data. + * @param [in] Fs opus_int32: Sampling rate in Hz. + * This must be a multiple of 400, or + * inaccurate results will be returned. + * @returns Number of samples per frame. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_samples_per_frame(const unsigned char *data, opus_int32 Fs) OPUS_ARG_NONNULL(1); + +/** Gets the number of channels from an Opus packet. + * @param [in] data char*: Opus packet + * @returns Number of channels + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_channels(const unsigned char *data) OPUS_ARG_NONNULL(1); + +/** Gets the number of frames in an Opus packet. + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @returns Number of frames + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_frames(const unsigned char packet[], opus_int32 len) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples of an Opus packet. + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @param [in] Fs opus_int32: Sampling rate in Hz. + * This must be a multiple of 400, or + * inaccurate results will be returned. + * @returns Number of samples + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_samples(const unsigned char packet[], opus_int32 len, opus_int32 Fs) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples of an Opus packet. + * @param [in] dec OpusDecoder*: Decoder state + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @returns Number of samples + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decoder_get_nb_samples(const OpusDecoder *dec, const unsigned char packet[], opus_int32 len) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2); + +/** Applies soft-clipping to bring a float signal within the [-1,1] range. If + * the signal is already in that range, nothing is done. If there are values + * outside of [-1,1], then the signal is clipped as smoothly as possible to + * both fit in the range and avoid creating excessive distortion in the + * process. + * @param [in,out] pcm float*: Input PCM and modified PCM + * @param [in] frame_size int Number of samples per channel to process + * @param [in] channels int: Number of channels + * @param [in,out] softclip_mem float*: State memory for the soft clipping process (one float per channel, initialized to zero) + */ +OPUS_EXPORT void opus_pcm_soft_clip(float *pcm, int frame_size, int channels, float *softclip_mem); + + +/**@}*/ + +/** @defgroup opus_repacketizer Repacketizer + * @{ + * + * The repacketizer can be used to merge multiple Opus packets into a single + * packet or alternatively to split Opus packets that have previously been + * merged. Splitting valid Opus packets is always guaranteed to succeed, + * whereas merging valid packets only succeeds if all frames have the same + * mode, bandwidth, and frame size, and when the total duration of the merged + * packet is no more than 120 ms. The 120 ms limit comes from the + * specification and limits decoder memory requirements at a point where + * framing overhead becomes negligible. + * + * The repacketizer currently only operates on elementary Opus + * streams. It will not manipualte multistream packets successfully, except in + * the degenerate case where they consist of data from a single stream. + * + * The repacketizing process starts with creating a repacketizer state, either + * by calling opus_repacketizer_create() or by allocating the memory yourself, + * e.g., + * @code + * OpusRepacketizer *rp; + * rp = (OpusRepacketizer*)malloc(opus_repacketizer_get_size()); + * if (rp != NULL) + * opus_repacketizer_init(rp); + * @endcode + * + * Then the application should submit packets with opus_repacketizer_cat(), + * extract new packets with opus_repacketizer_out() or + * opus_repacketizer_out_range(), and then reset the state for the next set of + * input packets via opus_repacketizer_init(). + * + * For example, to split a sequence of packets into individual frames: + * @code + * unsigned char *data; + * int len; + * while (get_next_packet(&data, &len)) + * { + * unsigned char out[1276]; + * opus_int32 out_len; + * int nb_frames; + * int err; + * int i; + * err = opus_repacketizer_cat(rp, data, len); + * if (err != OPUS_OK) + * { + * release_packet(data); + * return err; + * } + * nb_frames = opus_repacketizer_get_nb_frames(rp); + * for (i = 0; i < nb_frames; i++) + * { + * out_len = opus_repacketizer_out_range(rp, i, i+1, out, sizeof(out)); + * if (out_len < 0) + * { + * release_packet(data); + * return (int)out_len; + * } + * output_next_packet(out, out_len); + * } + * opus_repacketizer_init(rp); + * release_packet(data); + * } + * @endcode + * + * Alternatively, to combine a sequence of frames into packets that each + * contain up to TARGET_DURATION_MS milliseconds of data: + * @code + * // The maximum number of packets with duration TARGET_DURATION_MS occurs + * // when the frame size is 2.5 ms, for a total of (TARGET_DURATION_MS*2/5) + * // packets. + * unsigned char *data[(TARGET_DURATION_MS*2/5)+1]; + * opus_int32 len[(TARGET_DURATION_MS*2/5)+1]; + * int nb_packets; + * unsigned char out[1277*(TARGET_DURATION_MS*2/2)]; + * opus_int32 out_len; + * int prev_toc; + * nb_packets = 0; + * while (get_next_packet(data+nb_packets, len+nb_packets)) + * { + * int nb_frames; + * int err; + * nb_frames = opus_packet_get_nb_frames(data[nb_packets], len[nb_packets]); + * if (nb_frames < 1) + * { + * release_packets(data, nb_packets+1); + * return nb_frames; + * } + * nb_frames += opus_repacketizer_get_nb_frames(rp); + * // If adding the next packet would exceed our target, or it has an + * // incompatible TOC sequence, output the packets we already have before + * // submitting it. + * // N.B., The nb_packets > 0 check ensures we've submitted at least one + * // packet since the last call to opus_repacketizer_init(). Otherwise a + * // single packet longer than TARGET_DURATION_MS would cause us to try to + * // output an (invalid) empty packet. It also ensures that prev_toc has + * // been set to a valid value. Additionally, len[nb_packets] > 0 is + * // guaranteed by the call to opus_packet_get_nb_frames() above, so the + * // reference to data[nb_packets][0] should be valid. + * if (nb_packets > 0 && ( + * ((prev_toc & 0xFC) != (data[nb_packets][0] & 0xFC)) || + * opus_packet_get_samples_per_frame(data[nb_packets], 48000)*nb_frames > + * TARGET_DURATION_MS*48)) + * { + * out_len = opus_repacketizer_out(rp, out, sizeof(out)); + * if (out_len < 0) + * { + * release_packets(data, nb_packets+1); + * return (int)out_len; + * } + * output_next_packet(out, out_len); + * opus_repacketizer_init(rp); + * release_packets(data, nb_packets); + * data[0] = data[nb_packets]; + * len[0] = len[nb_packets]; + * nb_packets = 0; + * } + * err = opus_repacketizer_cat(rp, data[nb_packets], len[nb_packets]); + * if (err != OPUS_OK) + * { + * release_packets(data, nb_packets+1); + * return err; + * } + * prev_toc = data[nb_packets][0]; + * nb_packets++; + * } + * // Output the final, partial packet. + * if (nb_packets > 0) + * { + * out_len = opus_repacketizer_out(rp, out, sizeof(out)); + * release_packets(data, nb_packets); + * if (out_len < 0) + * return (int)out_len; + * output_next_packet(out, out_len); + * } + * @endcode + * + * An alternate way of merging packets is to simply call opus_repacketizer_cat() + * unconditionally until it fails. At that point, the merged packet can be + * obtained with opus_repacketizer_out() and the input packet for which + * opus_repacketizer_cat() needs to be re-added to a newly reinitialized + * repacketizer state. + */ + +typedef struct OpusRepacketizer OpusRepacketizer; + +/** Gets the size of an OpusRepacketizer structure. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_repacketizer_get_size(void); + +/** (Re)initializes a previously allocated repacketizer state. + * The state must be at least the size returned by opus_repacketizer_get_size(). + * This can be used for applications which use their own allocator instead of + * malloc(). + * It must also be called to reset the queue of packets waiting to be + * repacketized, which is necessary if the maximum packet duration of 120 ms + * is reached or if you wish to submit packets with a different Opus + * configuration (coding mode, audio bandwidth, frame size, or channel count). + * Failure to do so will prevent a new packet from being added with + * opus_repacketizer_cat(). + * @see opus_repacketizer_create + * @see opus_repacketizer_get_size + * @see opus_repacketizer_cat + * @param rp OpusRepacketizer*: The repacketizer state to + * (re)initialize. + * @returns A pointer to the same repacketizer state that was passed in. + */ +OPUS_EXPORT OpusRepacketizer *opus_repacketizer_init(OpusRepacketizer *rp) OPUS_ARG_NONNULL(1); + +/** Allocates memory and initializes the new repacketizer with + * opus_repacketizer_init(). + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusRepacketizer *opus_repacketizer_create(void); + +/** Frees an OpusRepacketizer allocated by + * opus_repacketizer_create(). + * @param[in] rp OpusRepacketizer*: State to be freed. + */ +OPUS_EXPORT void opus_repacketizer_destroy(OpusRepacketizer *rp); + +/** Add a packet to the current repacketizer state. + * This packet must match the configuration of any packets already submitted + * for repacketization since the last call to opus_repacketizer_init(). + * This means that it must have the same coding mode, audio bandwidth, frame + * size, and channel count. + * This can be checked in advance by examining the top 6 bits of the first + * byte of the packet, and ensuring they match the top 6 bits of the first + * byte of any previously submitted packet. + * The total duration of audio in the repacketizer state also must not exceed + * 120 ms, the maximum duration of a single packet, after adding this packet. + * + * The contents of the current repacketizer state can be extracted into new + * packets using opus_repacketizer_out() or opus_repacketizer_out_range(). + * + * In order to add a packet with a different configuration or to add more + * audio beyond 120 ms, you must clear the repacketizer state by calling + * opus_repacketizer_init(). + * If a packet is too large to add to the current repacketizer state, no part + * of it is added, even if it contains multiple frames, some of which might + * fit. + * If you wish to be able to add parts of such packets, you should first use + * another repacketizer to split the packet into pieces and add them + * individually. + * @see opus_repacketizer_out_range + * @see opus_repacketizer_out + * @see opus_repacketizer_init + * @param rp OpusRepacketizer*: The repacketizer state to which to + * add the packet. + * @param[in] data const unsigned char*: The packet data. + * The application must ensure + * this pointer remains valid + * until the next call to + * opus_repacketizer_init() or + * opus_repacketizer_destroy(). + * @param len opus_int32: The number of bytes in the packet data. + * @returns An error code indicating whether or not the operation succeeded. + * @retval #OPUS_OK The packet's contents have been added to the repacketizer + * state. + * @retval #OPUS_INVALID_PACKET The packet did not have a valid TOC sequence, + * the packet's TOC sequence was not compatible + * with previously submitted packets (because + * the coding mode, audio bandwidth, frame size, + * or channel count did not match), or adding + * this packet would increase the total amount of + * audio stored in the repacketizer state to more + * than 120 ms. + */ +OPUS_EXPORT int opus_repacketizer_cat(OpusRepacketizer *rp, const unsigned char *data, opus_int32 len) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2); + + +/** Construct a new packet from data previously submitted to the repacketizer + * state via opus_repacketizer_cat(). + * @param rp OpusRepacketizer*: The repacketizer state from which to + * construct the new packet. + * @param begin int: The index of the first frame in the current + * repacketizer state to include in the output. + * @param end int: One past the index of the last frame in the + * current repacketizer state to include in the + * output. + * @param[out] data const unsigned char*: The buffer in which to + * store the output packet. + * @param maxlen opus_int32: The maximum number of bytes to store in + * the output buffer. In order to guarantee + * success, this should be at least + * 1276 for a single frame, + * or for multiple frames, + * 1277*(end-begin). + * However, 1*(end-begin) plus + * the size of all packet data submitted to + * the repacketizer since the last call to + * opus_repacketizer_init() or + * opus_repacketizer_create() is also + * sufficient, and possibly much smaller. + * @returns The total size of the output packet on success, or an error code + * on failure. + * @retval #OPUS_BAD_ARG [begin,end) was an invalid range of + * frames (begin < 0, begin >= end, or end > + * opus_repacketizer_get_nb_frames()). + * @retval #OPUS_BUFFER_TOO_SMALL \a maxlen was insufficient to contain the + * complete output packet. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_repacketizer_out_range(OpusRepacketizer *rp, int begin, int end, unsigned char *data, opus_int32 maxlen) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Return the total number of frames contained in packet data submitted to + * the repacketizer state so far via opus_repacketizer_cat() since the last + * call to opus_repacketizer_init() or opus_repacketizer_create(). + * This defines the valid range of packets that can be extracted with + * opus_repacketizer_out_range() or opus_repacketizer_out(). + * @param rp OpusRepacketizer*: The repacketizer state containing the + * frames. + * @returns The total number of frames contained in the packet data submitted + * to the repacketizer state. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_repacketizer_get_nb_frames(OpusRepacketizer *rp) OPUS_ARG_NONNULL(1); + +/** Construct a new packet from data previously submitted to the repacketizer + * state via opus_repacketizer_cat(). + * This is a convenience routine that returns all the data submitted so far + * in a single packet. + * It is equivalent to calling + * @code + * opus_repacketizer_out_range(rp, 0, opus_repacketizer_get_nb_frames(rp), + * data, maxlen) + * @endcode + * @param rp OpusRepacketizer*: The repacketizer state from which to + * construct the new packet. + * @param[out] data const unsigned char*: The buffer in which to + * store the output packet. + * @param maxlen opus_int32: The maximum number of bytes to store in + * the output buffer. In order to guarantee + * success, this should be at least + * 1277*opus_repacketizer_get_nb_frames(rp). + * However, + * 1*opus_repacketizer_get_nb_frames(rp) + * plus the size of all packet data + * submitted to the repacketizer since the + * last call to opus_repacketizer_init() or + * opus_repacketizer_create() is also + * sufficient, and possibly much smaller. + * @returns The total size of the output packet on success, or an error code + * on failure. + * @retval #OPUS_BUFFER_TOO_SMALL \a maxlen was insufficient to contain the + * complete output packet. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_repacketizer_out(OpusRepacketizer *rp, unsigned char *data, opus_int32 maxlen) OPUS_ARG_NONNULL(1); + +/** Pads a given Opus packet to a larger size (possibly changing the TOC sequence). + * @param[in,out] data const unsigned char*: The buffer containing the + * packet to pad. + * @param len opus_int32: The size of the packet. + * This must be at least 1. + * @param new_len opus_int32: The desired size of the packet after padding. + * This must be at least as large as len. + * @returns an error code + * @retval #OPUS_OK \a on success. + * @retval #OPUS_BAD_ARG \a len was less than 1 or new_len was less than len. + * @retval #OPUS_INVALID_PACKET \a data did not contain a valid Opus packet. + */ +OPUS_EXPORT int opus_packet_pad(unsigned char *data, opus_int32 len, opus_int32 new_len); + +/** Remove all padding from a given Opus packet and rewrite the TOC sequence to + * minimize space usage. + * @param[in,out] data const unsigned char*: The buffer containing the + * packet to strip. + * @param len opus_int32: The size of the packet. + * This must be at least 1. + * @returns The new size of the output packet on success, or an error code + * on failure. + * @retval #OPUS_BAD_ARG \a len was less than 1. + * @retval #OPUS_INVALID_PACKET \a data did not contain a valid Opus packet. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_packet_unpad(unsigned char *data, opus_int32 len); + +/** Pads a given Opus multi-stream packet to a larger size (possibly changing the TOC sequence). + * @param[in,out] data const unsigned char*: The buffer containing the + * packet to pad. + * @param len opus_int32: The size of the packet. + * This must be at least 1. + * @param new_len opus_int32: The desired size of the packet after padding. + * This must be at least 1. + * @param nb_streams opus_int32: The number of streams (not channels) in the packet. + * This must be at least as large as len. + * @returns an error code + * @retval #OPUS_OK \a on success. + * @retval #OPUS_BAD_ARG \a len was less than 1. + * @retval #OPUS_INVALID_PACKET \a data did not contain a valid Opus packet. + */ +OPUS_EXPORT int opus_multistream_packet_pad(unsigned char *data, opus_int32 len, opus_int32 new_len, int nb_streams); + +/** Remove all padding from a given Opus multi-stream packet and rewrite the TOC sequence to + * minimize space usage. + * @param[in,out] data const unsigned char*: The buffer containing the + * packet to strip. + * @param len opus_int32: The size of the packet. + * This must be at least 1. + * @param nb_streams opus_int32: The number of streams (not channels) in the packet. + * This must be at least 1. + * @returns The new size of the output packet on success, or an error code + * on failure. + * @retval #OPUS_BAD_ARG \a len was less than 1 or new_len was less than len. + * @retval #OPUS_INVALID_PACKET \a data did not contain a valid Opus packet. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_packet_unpad(unsigned char *data, opus_int32 len, int nb_streams); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_H */ diff --git a/third-party/webrtc/dependencies/third_party/opus/src/include/opus_custom.h b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_custom.h new file mode 100644 index 0000000000..41f36bf2fb --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_custom.h @@ -0,0 +1,342 @@ +/* Copyright (c) 2007-2008 CSIRO + Copyright (c) 2007-2009 Xiph.Org Foundation + Copyright (c) 2008-2012 Gregory Maxwell + Written by Jean-Marc Valin and Gregory Maxwell */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + @file opus_custom.h + @brief Opus-Custom reference implementation API + */ + +#ifndef OPUS_CUSTOM_H +#define OPUS_CUSTOM_H + +#include "opus_defines.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef CUSTOM_MODES +# define OPUS_CUSTOM_EXPORT OPUS_EXPORT +# define OPUS_CUSTOM_EXPORT_STATIC OPUS_EXPORT +#else +# define OPUS_CUSTOM_EXPORT +# ifdef OPUS_BUILD +# define OPUS_CUSTOM_EXPORT_STATIC static OPUS_INLINE +# else +# define OPUS_CUSTOM_EXPORT_STATIC +# endif +#endif + +/** @defgroup opus_custom Opus Custom + * @{ + * Opus Custom is an optional part of the Opus specification and + * reference implementation which uses a distinct API from the regular + * API and supports frame sizes that are not normally supported.\ Use + * of Opus Custom is discouraged for all but very special applications + * for which a frame size different from 2.5, 5, 10, or 20 ms is needed + * (for either complexity or latency reasons) and where interoperability + * is less important. + * + * In addition to the interoperability limitations the use of Opus custom + * disables a substantial chunk of the codec and generally lowers the + * quality available at a given bitrate. Normally when an application needs + * a different frame size from the codec it should buffer to match the + * sizes but this adds a small amount of delay which may be important + * in some very low latency applications. Some transports (especially + * constant rate RF transports) may also work best with frames of + * particular durations. + * + * Libopus only supports custom modes if they are enabled at compile time. + * + * The Opus Custom API is similar to the regular API but the + * @ref opus_encoder_create and @ref opus_decoder_create calls take + * an additional mode parameter which is a structure produced by + * a call to @ref opus_custom_mode_create. Both the encoder and decoder + * must create a mode using the same sample rate (fs) and frame size + * (frame size) so these parameters must either be signaled out of band + * or fixed in a particular implementation. + * + * Similar to regular Opus the custom modes support on the fly frame size + * switching, but the sizes available depend on the particular frame size in + * use. For some initial frame sizes on a single on the fly size is available. + */ + +/** Contains the state of an encoder. One encoder state is needed + for each stream. It is initialized once at the beginning of the + stream. Do *not* re-initialize the state for every frame. + @brief Encoder state + */ +typedef struct OpusCustomEncoder OpusCustomEncoder; + +/** State of the decoder. One decoder state is needed for each stream. + It is initialized once at the beginning of the stream. Do *not* + re-initialize the state for every frame. + @brief Decoder state + */ +typedef struct OpusCustomDecoder OpusCustomDecoder; + +/** The mode contains all the information necessary to create an + encoder. Both the encoder and decoder need to be initialized + with exactly the same mode, otherwise the output will be + corrupted. + @brief Mode configuration + */ +typedef struct OpusCustomMode OpusCustomMode; + +/** Creates a new mode struct. This will be passed to an encoder or + * decoder. The mode MUST NOT BE DESTROYED until the encoders and + * decoders that use it are destroyed as well. + * @param [in] Fs int: Sampling rate (8000 to 96000 Hz) + * @param [in] frame_size int: Number of samples (per channel) to encode in each + * packet (64 - 1024, prime factorization must contain zero or more 2s, 3s, or 5s and no other primes) + * @param [out] error int*: Returned error code (if NULL, no error will be returned) + * @return A newly created mode + */ +OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT OpusCustomMode *opus_custom_mode_create(opus_int32 Fs, int frame_size, int *error); + +/** Destroys a mode struct. Only call this after all encoders and + * decoders using this mode are destroyed as well. + * @param [in] mode OpusCustomMode*: Mode to be freed. + */ +OPUS_CUSTOM_EXPORT void opus_custom_mode_destroy(OpusCustomMode *mode); + + +#if !defined(OPUS_BUILD) || defined(CELT_ENCODER_C) + +/* Encoder */ +/** Gets the size of an OpusCustomEncoder structure. + * @param [in] mode OpusCustomMode *: Mode configuration + * @param [in] channels int: Number of channels + * @returns size + */ +OPUS_CUSTOM_EXPORT_STATIC OPUS_WARN_UNUSED_RESULT int opus_custom_encoder_get_size( + const OpusCustomMode *mode, + int channels +) OPUS_ARG_NONNULL(1); + +# ifdef CUSTOM_MODES +/** Initializes a previously allocated encoder state + * The memory pointed to by st must be the size returned by opus_custom_encoder_get_size. + * This is intended for applications which use their own allocator instead of malloc. + * @see opus_custom_encoder_create(),opus_custom_encoder_get_size() + * To reset a previously initialized state use the OPUS_RESET_STATE CTL. + * @param [in] st OpusCustomEncoder*: Encoder state + * @param [in] mode OpusCustomMode *: Contains all the information about the characteristics of + * the stream (must be the same characteristics as used for the + * decoder) + * @param [in] channels int: Number of channels + * @return OPUS_OK Success or @ref opus_errorcodes + */ +OPUS_CUSTOM_EXPORT int opus_custom_encoder_init( + OpusCustomEncoder *st, + const OpusCustomMode *mode, + int channels +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2); +# endif +#endif + + +/** Creates a new encoder state. Each stream needs its own encoder + * state (can't be shared across simultaneous streams). + * @param [in] mode OpusCustomMode*: Contains all the information about the characteristics of + * the stream (must be the same characteristics as used for the + * decoder) + * @param [in] channels int: Number of channels + * @param [out] error int*: Returns an error code + * @return Newly created encoder state. +*/ +OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT OpusCustomEncoder *opus_custom_encoder_create( + const OpusCustomMode *mode, + int channels, + int *error +) OPUS_ARG_NONNULL(1); + + +/** Destroys a an encoder state. + * @param[in] st OpusCustomEncoder*: State to be freed. + */ +OPUS_CUSTOM_EXPORT void opus_custom_encoder_destroy(OpusCustomEncoder *st); + +/** Encodes a frame of audio. + * @param [in] st OpusCustomEncoder*: Encoder state + * @param [in] pcm float*: PCM audio in float format, with a normal range of +/-1.0. + * Samples with a range beyond +/-1.0 are supported but will + * be clipped by decoders using the integer API and should + * only be used if it is known that the far end supports + * extended dynamic range. There must be exactly + * frame_size samples per channel. + * @param [in] frame_size int: Number of samples per frame of input signal + * @param [out] compressed char *: The compressed data is written here. This may not alias pcm and must be at least maxCompressedBytes long. + * @param [in] maxCompressedBytes int: Maximum number of bytes to use for compressing the frame + * (can change from one frame to another) + * @return Number of bytes written to "compressed". + * If negative, an error has occurred (see error codes). It is IMPORTANT that + * the length returned be somehow transmitted to the decoder. Otherwise, no + * decoding is possible. + */ +OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT int opus_custom_encode_float( + OpusCustomEncoder *st, + const float *pcm, + int frame_size, + unsigned char *compressed, + int maxCompressedBytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Encodes a frame of audio. + * @param [in] st OpusCustomEncoder*: Encoder state + * @param [in] pcm opus_int16*: PCM audio in signed 16-bit format (native endian). + * There must be exactly frame_size samples per channel. + * @param [in] frame_size int: Number of samples per frame of input signal + * @param [out] compressed char *: The compressed data is written here. This may not alias pcm and must be at least maxCompressedBytes long. + * @param [in] maxCompressedBytes int: Maximum number of bytes to use for compressing the frame + * (can change from one frame to another) + * @return Number of bytes written to "compressed". + * If negative, an error has occurred (see error codes). It is IMPORTANT that + * the length returned be somehow transmitted to the decoder. Otherwise, no + * decoding is possible. + */ +OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT int opus_custom_encode( + OpusCustomEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *compressed, + int maxCompressedBytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Perform a CTL function on an Opus custom encoder. + * + * Generally the request and subsequent arguments are generated + * by a convenience macro. + * @see opus_encoderctls + */ +OPUS_CUSTOM_EXPORT int opus_custom_encoder_ctl(OpusCustomEncoder * OPUS_RESTRICT st, int request, ...) OPUS_ARG_NONNULL(1); + + +#if !defined(OPUS_BUILD) || defined(CELT_DECODER_C) +/* Decoder */ + +/** Gets the size of an OpusCustomDecoder structure. + * @param [in] mode OpusCustomMode *: Mode configuration + * @param [in] channels int: Number of channels + * @returns size + */ +OPUS_CUSTOM_EXPORT_STATIC OPUS_WARN_UNUSED_RESULT int opus_custom_decoder_get_size( + const OpusCustomMode *mode, + int channels +) OPUS_ARG_NONNULL(1); + +/** Initializes a previously allocated decoder state + * The memory pointed to by st must be the size returned by opus_custom_decoder_get_size. + * This is intended for applications which use their own allocator instead of malloc. + * @see opus_custom_decoder_create(),opus_custom_decoder_get_size() + * To reset a previously initialized state use the OPUS_RESET_STATE CTL. + * @param [in] st OpusCustomDecoder*: Decoder state + * @param [in] mode OpusCustomMode *: Contains all the information about the characteristics of + * the stream (must be the same characteristics as used for the + * encoder) + * @param [in] channels int: Number of channels + * @return OPUS_OK Success or @ref opus_errorcodes + */ +OPUS_CUSTOM_EXPORT_STATIC int opus_custom_decoder_init( + OpusCustomDecoder *st, + const OpusCustomMode *mode, + int channels +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2); + +#endif + + +/** Creates a new decoder state. Each stream needs its own decoder state (can't + * be shared across simultaneous streams). + * @param [in] mode OpusCustomMode: Contains all the information about the characteristics of the + * stream (must be the same characteristics as used for the encoder) + * @param [in] channels int: Number of channels + * @param [out] error int*: Returns an error code + * @return Newly created decoder state. + */ +OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT OpusCustomDecoder *opus_custom_decoder_create( + const OpusCustomMode *mode, + int channels, + int *error +) OPUS_ARG_NONNULL(1); + +/** Destroys a an decoder state. + * @param[in] st OpusCustomDecoder*: State to be freed. + */ +OPUS_CUSTOM_EXPORT void opus_custom_decoder_destroy(OpusCustomDecoder *st); + +/** Decode an opus custom frame with floating point output + * @param [in] st OpusCustomDecoder*: Decoder state + * @param [in] data char*: Input payload. Use a NULL pointer to indicate packet loss + * @param [in] len int: Number of bytes in payload + * @param [out] pcm float*: Output signal (interleaved if 2 channels). length + * is frame_size*channels*sizeof(float) + * @param [in] frame_size Number of samples per channel of available space in *pcm. + * @returns Number of decoded samples or @ref opus_errorcodes + */ +OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT int opus_custom_decode_float( + OpusCustomDecoder *st, + const unsigned char *data, + int len, + float *pcm, + int frame_size +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Decode an opus custom frame + * @param [in] st OpusCustomDecoder*: Decoder state + * @param [in] data char*: Input payload. Use a NULL pointer to indicate packet loss + * @param [in] len int: Number of bytes in payload + * @param [out] pcm opus_int16*: Output signal (interleaved if 2 channels). length + * is frame_size*channels*sizeof(opus_int16) + * @param [in] frame_size Number of samples per channel of available space in *pcm. + * @returns Number of decoded samples or @ref opus_errorcodes + */ +OPUS_CUSTOM_EXPORT OPUS_WARN_UNUSED_RESULT int opus_custom_decode( + OpusCustomDecoder *st, + const unsigned char *data, + int len, + opus_int16 *pcm, + int frame_size +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Perform a CTL function on an Opus custom decoder. + * + * Generally the request and subsequent arguments are generated + * by a convenience macro. + * @see opus_genericctls + */ +OPUS_CUSTOM_EXPORT int opus_custom_decoder_ctl(OpusCustomDecoder * OPUS_RESTRICT st, int request, ...) OPUS_ARG_NONNULL(1); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_CUSTOM_H */ diff --git a/third-party/webrtc/dependencies/third_party/opus/src/include/opus_defines.h b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_defines.h new file mode 100644 index 0000000000..d141418b21 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_defines.h @@ -0,0 +1,799 @@ +/* Copyright (c) 2010-2011 Xiph.Org Foundation, Skype Limited + Written by Jean-Marc Valin and Koen Vos */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus_defines.h + * @brief Opus reference implementation constants + */ + +#ifndef OPUS_DEFINES_H +#define OPUS_DEFINES_H + +#include "opus_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @defgroup opus_errorcodes Error codes + * @{ + */ +/** No error @hideinitializer*/ +#define OPUS_OK 0 +/** One or more invalid/out of range arguments @hideinitializer*/ +#define OPUS_BAD_ARG -1 +/** Not enough bytes allocated in the buffer @hideinitializer*/ +#define OPUS_BUFFER_TOO_SMALL -2 +/** An internal error was detected @hideinitializer*/ +#define OPUS_INTERNAL_ERROR -3 +/** The compressed data passed is corrupted @hideinitializer*/ +#define OPUS_INVALID_PACKET -4 +/** Invalid/unsupported request number @hideinitializer*/ +#define OPUS_UNIMPLEMENTED -5 +/** An encoder or decoder structure is invalid or already freed @hideinitializer*/ +#define OPUS_INVALID_STATE -6 +/** Memory allocation has failed @hideinitializer*/ +#define OPUS_ALLOC_FAIL -7 +/**@}*/ + +/** @cond OPUS_INTERNAL_DOC */ +/**Export control for opus functions */ + +#ifndef OPUS_EXPORT +# if defined(WIN32) +# if defined(OPUS_BUILD) && defined(DLL_EXPORT) +# define OPUS_EXPORT __declspec(dllexport) +# else +# define OPUS_EXPORT +# endif +# elif defined(__GNUC__) && defined(OPUS_BUILD) +# define OPUS_EXPORT __attribute__ ((visibility ("default"))) +# else +# define OPUS_EXPORT +# endif +#endif + +# if !defined(OPUS_GNUC_PREREQ) +# if defined(__GNUC__)&&defined(__GNUC_MINOR__) +# define OPUS_GNUC_PREREQ(_maj,_min) \ + ((__GNUC__<<16)+__GNUC_MINOR__>=((_maj)<<16)+(_min)) +# else +# define OPUS_GNUC_PREREQ(_maj,_min) 0 +# endif +# endif + +#if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) +# if OPUS_GNUC_PREREQ(3,0) +# define OPUS_RESTRICT __restrict__ +# elif (defined(_MSC_VER) && _MSC_VER >= 1400) +# define OPUS_RESTRICT __restrict +# else +# define OPUS_RESTRICT +# endif +#else +# define OPUS_RESTRICT restrict +#endif + +#if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) +# if OPUS_GNUC_PREREQ(2,7) +# define OPUS_INLINE __inline__ +# elif (defined(_MSC_VER)) +# define OPUS_INLINE __inline +# else +# define OPUS_INLINE +# endif +#else +# define OPUS_INLINE inline +#endif + +/**Warning attributes for opus functions + * NONNULL is not used in OPUS_BUILD to avoid the compiler optimizing out + * some paranoid null checks. */ +#if defined(__GNUC__) && OPUS_GNUC_PREREQ(3, 4) +# define OPUS_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) +#else +# define OPUS_WARN_UNUSED_RESULT +#endif +#if !defined(OPUS_BUILD) && defined(__GNUC__) && OPUS_GNUC_PREREQ(3, 4) +# define OPUS_ARG_NONNULL(_x) __attribute__ ((__nonnull__(_x))) +#else +# define OPUS_ARG_NONNULL(_x) +#endif + +/** These are the actual Encoder CTL ID numbers. + * They should not be used directly by applications. + * In general, SETs should be even and GETs should be odd.*/ +#define OPUS_SET_APPLICATION_REQUEST 4000 +#define OPUS_GET_APPLICATION_REQUEST 4001 +#define OPUS_SET_BITRATE_REQUEST 4002 +#define OPUS_GET_BITRATE_REQUEST 4003 +#define OPUS_SET_MAX_BANDWIDTH_REQUEST 4004 +#define OPUS_GET_MAX_BANDWIDTH_REQUEST 4005 +#define OPUS_SET_VBR_REQUEST 4006 +#define OPUS_GET_VBR_REQUEST 4007 +#define OPUS_SET_BANDWIDTH_REQUEST 4008 +#define OPUS_GET_BANDWIDTH_REQUEST 4009 +#define OPUS_SET_COMPLEXITY_REQUEST 4010 +#define OPUS_GET_COMPLEXITY_REQUEST 4011 +#define OPUS_SET_INBAND_FEC_REQUEST 4012 +#define OPUS_GET_INBAND_FEC_REQUEST 4013 +#define OPUS_SET_PACKET_LOSS_PERC_REQUEST 4014 +#define OPUS_GET_PACKET_LOSS_PERC_REQUEST 4015 +#define OPUS_SET_DTX_REQUEST 4016 +#define OPUS_GET_DTX_REQUEST 4017 +#define OPUS_SET_VBR_CONSTRAINT_REQUEST 4020 +#define OPUS_GET_VBR_CONSTRAINT_REQUEST 4021 +#define OPUS_SET_FORCE_CHANNELS_REQUEST 4022 +#define OPUS_GET_FORCE_CHANNELS_REQUEST 4023 +#define OPUS_SET_SIGNAL_REQUEST 4024 +#define OPUS_GET_SIGNAL_REQUEST 4025 +#define OPUS_GET_LOOKAHEAD_REQUEST 4027 +/* #define OPUS_RESET_STATE 4028 */ +#define OPUS_GET_SAMPLE_RATE_REQUEST 4029 +#define OPUS_GET_FINAL_RANGE_REQUEST 4031 +#define OPUS_GET_PITCH_REQUEST 4033 +#define OPUS_SET_GAIN_REQUEST 4034 +#define OPUS_GET_GAIN_REQUEST 4045 /* Should have been 4035 */ +#define OPUS_SET_LSB_DEPTH_REQUEST 4036 +#define OPUS_GET_LSB_DEPTH_REQUEST 4037 +#define OPUS_GET_LAST_PACKET_DURATION_REQUEST 4039 +#define OPUS_SET_EXPERT_FRAME_DURATION_REQUEST 4040 +#define OPUS_GET_EXPERT_FRAME_DURATION_REQUEST 4041 +#define OPUS_SET_PREDICTION_DISABLED_REQUEST 4042 +#define OPUS_GET_PREDICTION_DISABLED_REQUEST 4043 +/* Don't use 4045, it's already taken by OPUS_GET_GAIN_REQUEST */ +#define OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST 4046 +#define OPUS_GET_PHASE_INVERSION_DISABLED_REQUEST 4047 +#define OPUS_GET_IN_DTX_REQUEST 4049 + +/** Defines for the presence of extended APIs. */ +#define OPUS_HAVE_OPUS_PROJECTION_H + +/* Macros to trigger compilation errors when the wrong types are provided to a CTL */ +#define __opus_check_int(x) (((void)((x) == (opus_int32)0)), (opus_int32)(x)) +#define __opus_check_int_ptr(ptr) ((ptr) + ((ptr) - (opus_int32*)(ptr))) +#define __opus_check_uint_ptr(ptr) ((ptr) + ((ptr) - (opus_uint32*)(ptr))) +#define __opus_check_val16_ptr(ptr) ((ptr) + ((ptr) - (opus_val16*)(ptr))) +/** @endcond */ + +/** @defgroup opus_ctlvalues Pre-defined values for CTL interface + * @see opus_genericctls, opus_encoderctls + * @{ + */ +/* Values for the various encoder CTLs */ +#define OPUS_AUTO -1000 /**opus_int32: Allowed values: 0-10, inclusive. + * + * @hideinitializer */ +#define OPUS_SET_COMPLEXITY(x) OPUS_SET_COMPLEXITY_REQUEST, __opus_check_int(x) +/** Gets the encoder's complexity configuration. + * @see OPUS_SET_COMPLEXITY + * @param[out] x opus_int32 *: Returns a value in the range 0-10, + * inclusive. + * @hideinitializer */ +#define OPUS_GET_COMPLEXITY(x) OPUS_GET_COMPLEXITY_REQUEST, __opus_check_int_ptr(x) + +/** Configures the bitrate in the encoder. + * Rates from 500 to 512000 bits per second are meaningful, as well as the + * special values #OPUS_AUTO and #OPUS_BITRATE_MAX. + * The value #OPUS_BITRATE_MAX can be used to cause the codec to use as much + * rate as it can, which is useful for controlling the rate by adjusting the + * output buffer size. + * @see OPUS_GET_BITRATE + * @param[in] x opus_int32: Bitrate in bits per second. The default + * is determined based on the number of + * channels and the input sampling rate. + * @hideinitializer */ +#define OPUS_SET_BITRATE(x) OPUS_SET_BITRATE_REQUEST, __opus_check_int(x) +/** Gets the encoder's bitrate configuration. + * @see OPUS_SET_BITRATE + * @param[out] x opus_int32 *: Returns the bitrate in bits per second. + * The default is determined based on the + * number of channels and the input + * sampling rate. + * @hideinitializer */ +#define OPUS_GET_BITRATE(x) OPUS_GET_BITRATE_REQUEST, __opus_check_int_ptr(x) + +/** Enables or disables variable bitrate (VBR) in the encoder. + * The configured bitrate may not be met exactly because frames must + * be an integer number of bytes in length. + * @see OPUS_GET_VBR + * @see OPUS_SET_VBR_CONSTRAINT + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Hard CBR. For LPC/hybrid modes at very low bit-rate, this can + * cause noticeable quality degradation.
+ *
1
VBR (default). The exact type of VBR is controlled by + * #OPUS_SET_VBR_CONSTRAINT.
+ *
+ * @hideinitializer */ +#define OPUS_SET_VBR(x) OPUS_SET_VBR_REQUEST, __opus_check_int(x) +/** Determine if variable bitrate (VBR) is enabled in the encoder. + * @see OPUS_SET_VBR + * @see OPUS_GET_VBR_CONSTRAINT + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Hard CBR.
+ *
1
VBR (default). The exact type of VBR may be retrieved via + * #OPUS_GET_VBR_CONSTRAINT.
+ *
+ * @hideinitializer */ +#define OPUS_GET_VBR(x) OPUS_GET_VBR_REQUEST, __opus_check_int_ptr(x) + +/** Enables or disables constrained VBR in the encoder. + * This setting is ignored when the encoder is in CBR mode. + * @warning Only the MDCT mode of Opus currently heeds the constraint. + * Speech mode ignores it completely, hybrid mode may fail to obey it + * if the LPC layer uses more bitrate than the constraint would have + * permitted. + * @see OPUS_GET_VBR_CONSTRAINT + * @see OPUS_SET_VBR + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Unconstrained VBR.
+ *
1
Constrained VBR (default). This creates a maximum of one + * frame of buffering delay assuming a transport with a + * serialization speed of the nominal bitrate.
+ *
+ * @hideinitializer */ +#define OPUS_SET_VBR_CONSTRAINT(x) OPUS_SET_VBR_CONSTRAINT_REQUEST, __opus_check_int(x) +/** Determine if constrained VBR is enabled in the encoder. + * @see OPUS_SET_VBR_CONSTRAINT + * @see OPUS_GET_VBR + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Unconstrained VBR.
+ *
1
Constrained VBR (default).
+ *
+ * @hideinitializer */ +#define OPUS_GET_VBR_CONSTRAINT(x) OPUS_GET_VBR_CONSTRAINT_REQUEST, __opus_check_int_ptr(x) + +/** Configures mono/stereo forcing in the encoder. + * This can force the encoder to produce packets encoded as either mono or + * stereo, regardless of the format of the input audio. This is useful when + * the caller knows that the input signal is currently a mono source embedded + * in a stereo stream. + * @see OPUS_GET_FORCE_CHANNELS + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
Not forced (default)
+ *
1
Forced mono
+ *
2
Forced stereo
+ *
+ * @hideinitializer */ +#define OPUS_SET_FORCE_CHANNELS(x) OPUS_SET_FORCE_CHANNELS_REQUEST, __opus_check_int(x) +/** Gets the encoder's forced channel configuration. + * @see OPUS_SET_FORCE_CHANNELS + * @param[out] x opus_int32 *: + *
+ *
#OPUS_AUTO
Not forced (default)
+ *
1
Forced mono
+ *
2
Forced stereo
+ *
+ * @hideinitializer */ +#define OPUS_GET_FORCE_CHANNELS(x) OPUS_GET_FORCE_CHANNELS_REQUEST, __opus_check_int_ptr(x) + +/** Configures the maximum bandpass that the encoder will select automatically. + * Applications should normally use this instead of #OPUS_SET_BANDWIDTH + * (leaving that set to the default, #OPUS_AUTO). This allows the + * application to set an upper bound based on the type of input it is + * providing, but still gives the encoder the freedom to reduce the bandpass + * when the bitrate becomes too low, for better overall quality. + * @see OPUS_GET_MAX_BANDWIDTH + * @param[in] x opus_int32: Allowed values: + *
+ *
OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
OPUS_BANDWIDTH_FULLBAND
20 kHz passband (default)
+ *
+ * @hideinitializer */ +#define OPUS_SET_MAX_BANDWIDTH(x) OPUS_SET_MAX_BANDWIDTH_REQUEST, __opus_check_int(x) + +/** Gets the encoder's configured maximum allowed bandpass. + * @see OPUS_SET_MAX_BANDWIDTH + * @param[out] x opus_int32 *: Allowed values: + *
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband (default)
+ *
+ * @hideinitializer */ +#define OPUS_GET_MAX_BANDWIDTH(x) OPUS_GET_MAX_BANDWIDTH_REQUEST, __opus_check_int_ptr(x) + +/** Sets the encoder's bandpass to a specific value. + * This prevents the encoder from automatically selecting the bandpass based + * on the available bitrate. If an application knows the bandpass of the input + * audio it is providing, it should normally use #OPUS_SET_MAX_BANDWIDTH + * instead, which still gives the encoder the freedom to reduce the bandpass + * when the bitrate becomes too low, for better overall quality. + * @see OPUS_GET_BANDWIDTH + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband
+ *
+ * @hideinitializer */ +#define OPUS_SET_BANDWIDTH(x) OPUS_SET_BANDWIDTH_REQUEST, __opus_check_int(x) + +/** Configures the type of signal being encoded. + * This is a hint which helps the encoder's mode selection. + * @see OPUS_GET_SIGNAL + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_SIGNAL_VOICE
Bias thresholds towards choosing LPC or Hybrid modes.
+ *
#OPUS_SIGNAL_MUSIC
Bias thresholds towards choosing MDCT modes.
+ *
+ * @hideinitializer */ +#define OPUS_SET_SIGNAL(x) OPUS_SET_SIGNAL_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured signal type. + * @see OPUS_SET_SIGNAL + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_SIGNAL_VOICE
Bias thresholds towards choosing LPC or Hybrid modes.
+ *
#OPUS_SIGNAL_MUSIC
Bias thresholds towards choosing MDCT modes.
+ *
+ * @hideinitializer */ +#define OPUS_GET_SIGNAL(x) OPUS_GET_SIGNAL_REQUEST, __opus_check_int_ptr(x) + + +/** Configures the encoder's intended application. + * The initial value is a mandatory argument to the encoder_create function. + * @see OPUS_GET_APPLICATION + * @param[in] x opus_int32: Returns one of the following values: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @hideinitializer */ +#define OPUS_SET_APPLICATION(x) OPUS_SET_APPLICATION_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured application. + * @see OPUS_SET_APPLICATION + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @hideinitializer */ +#define OPUS_GET_APPLICATION(x) OPUS_GET_APPLICATION_REQUEST, __opus_check_int_ptr(x) + +/** Gets the total samples of delay added by the entire codec. + * This can be queried by the encoder and then the provided number of samples can be + * skipped on from the start of the decoder's output to provide time aligned input + * and output. From the perspective of a decoding application the real data begins this many + * samples late. + * + * The decoder contribution to this delay is identical for all decoders, but the + * encoder portion of the delay may vary from implementation to implementation, + * version to version, or even depend on the encoder's initial configuration. + * Applications needing delay compensation should call this CTL rather than + * hard-coding a value. + * @param[out] x opus_int32 *: Number of lookahead samples + * @hideinitializer */ +#define OPUS_GET_LOOKAHEAD(x) OPUS_GET_LOOKAHEAD_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's use of inband forward error correction (FEC). + * @note This is only applicable to the LPC layer + * @see OPUS_GET_INBAND_FEC + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Disable inband FEC (default).
+ *
1
Enable inband FEC.
+ *
+ * @hideinitializer */ +#define OPUS_SET_INBAND_FEC(x) OPUS_SET_INBAND_FEC_REQUEST, __opus_check_int(x) +/** Gets encoder's configured use of inband forward error correction. + * @see OPUS_SET_INBAND_FEC + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Inband FEC disabled (default).
+ *
1
Inband FEC enabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_INBAND_FEC(x) OPUS_GET_INBAND_FEC_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's expected packet loss percentage. + * Higher values trigger progressively more loss resistant behavior in the encoder + * at the expense of quality at a given bitrate in the absence of packet loss, but + * greater quality under loss. + * @see OPUS_GET_PACKET_LOSS_PERC + * @param[in] x opus_int32: Loss percentage in the range 0-100, inclusive (default: 0). + * @hideinitializer */ +#define OPUS_SET_PACKET_LOSS_PERC(x) OPUS_SET_PACKET_LOSS_PERC_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured packet loss percentage. + * @see OPUS_SET_PACKET_LOSS_PERC + * @param[out] x opus_int32 *: Returns the configured loss percentage + * in the range 0-100, inclusive (default: 0). + * @hideinitializer */ +#define OPUS_GET_PACKET_LOSS_PERC(x) OPUS_GET_PACKET_LOSS_PERC_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's use of discontinuous transmission (DTX). + * @note This is only applicable to the LPC layer + * @see OPUS_GET_DTX + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Disable DTX (default).
+ *
1
Enabled DTX.
+ *
+ * @hideinitializer */ +#define OPUS_SET_DTX(x) OPUS_SET_DTX_REQUEST, __opus_check_int(x) +/** Gets encoder's configured use of discontinuous transmission. + * @see OPUS_SET_DTX + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
DTX disabled (default).
+ *
1
DTX enabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_DTX(x) OPUS_GET_DTX_REQUEST, __opus_check_int_ptr(x) +/** Configures the depth of signal being encoded. + * + * This is a hint which helps the encoder identify silence and near-silence. + * It represents the number of significant bits of linear intensity below + * which the signal contains ignorable quantization or other noise. + * + * For example, OPUS_SET_LSB_DEPTH(14) would be an appropriate setting + * for G.711 u-law input. OPUS_SET_LSB_DEPTH(16) would be appropriate + * for 16-bit linear pcm input with opus_encode_float(). + * + * When using opus_encode() instead of opus_encode_float(), or when libopus + * is compiled for fixed-point, the encoder uses the minimum of the value + * set here and the value 16. + * + * @see OPUS_GET_LSB_DEPTH + * @param[in] x opus_int32: Input precision in bits, between 8 and 24 + * (default: 24). + * @hideinitializer */ +#define OPUS_SET_LSB_DEPTH(x) OPUS_SET_LSB_DEPTH_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured signal depth. + * @see OPUS_SET_LSB_DEPTH + * @param[out] x opus_int32 *: Input precision in bits, between 8 and + * 24 (default: 24). + * @hideinitializer */ +#define OPUS_GET_LSB_DEPTH(x) OPUS_GET_LSB_DEPTH_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's use of variable duration frames. + * When variable duration is enabled, the encoder is free to use a shorter frame + * size than the one requested in the opus_encode*() call. + * It is then the user's responsibility + * to verify how much audio was encoded by checking the ToC byte of the encoded + * packet. The part of the audio that was not encoded needs to be resent to the + * encoder for the next call. Do not use this option unless you really + * know what you are doing. + * @see OPUS_GET_EXPERT_FRAME_DURATION + * @param[in] x opus_int32: Allowed values: + *
+ *
OPUS_FRAMESIZE_ARG
Select frame size from the argument (default).
+ *
OPUS_FRAMESIZE_2_5_MS
Use 2.5 ms frames.
+ *
OPUS_FRAMESIZE_5_MS
Use 5 ms frames.
+ *
OPUS_FRAMESIZE_10_MS
Use 10 ms frames.
+ *
OPUS_FRAMESIZE_20_MS
Use 20 ms frames.
+ *
OPUS_FRAMESIZE_40_MS
Use 40 ms frames.
+ *
OPUS_FRAMESIZE_60_MS
Use 60 ms frames.
+ *
OPUS_FRAMESIZE_80_MS
Use 80 ms frames.
+ *
OPUS_FRAMESIZE_100_MS
Use 100 ms frames.
+ *
OPUS_FRAMESIZE_120_MS
Use 120 ms frames.
+ *
+ * @hideinitializer */ +#define OPUS_SET_EXPERT_FRAME_DURATION(x) OPUS_SET_EXPERT_FRAME_DURATION_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured use of variable duration frames. + * @see OPUS_SET_EXPERT_FRAME_DURATION + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
OPUS_FRAMESIZE_ARG
Select frame size from the argument (default).
+ *
OPUS_FRAMESIZE_2_5_MS
Use 2.5 ms frames.
+ *
OPUS_FRAMESIZE_5_MS
Use 5 ms frames.
+ *
OPUS_FRAMESIZE_10_MS
Use 10 ms frames.
+ *
OPUS_FRAMESIZE_20_MS
Use 20 ms frames.
+ *
OPUS_FRAMESIZE_40_MS
Use 40 ms frames.
+ *
OPUS_FRAMESIZE_60_MS
Use 60 ms frames.
+ *
OPUS_FRAMESIZE_80_MS
Use 80 ms frames.
+ *
OPUS_FRAMESIZE_100_MS
Use 100 ms frames.
+ *
OPUS_FRAMESIZE_120_MS
Use 120 ms frames.
+ *
+ * @hideinitializer */ +#define OPUS_GET_EXPERT_FRAME_DURATION(x) OPUS_GET_EXPERT_FRAME_DURATION_REQUEST, __opus_check_int_ptr(x) + +/** If set to 1, disables almost all use of prediction, making frames almost + * completely independent. This reduces quality. + * @see OPUS_GET_PREDICTION_DISABLED + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Enable prediction (default).
+ *
1
Disable prediction.
+ *
+ * @hideinitializer */ +#define OPUS_SET_PREDICTION_DISABLED(x) OPUS_SET_PREDICTION_DISABLED_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured prediction status. + * @see OPUS_SET_PREDICTION_DISABLED + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Prediction enabled (default).
+ *
1
Prediction disabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_PREDICTION_DISABLED(x) OPUS_GET_PREDICTION_DISABLED_REQUEST, __opus_check_int_ptr(x) + +/**@}*/ + +/** @defgroup opus_genericctls Generic CTLs + * + * These macros are used with the \c opus_decoder_ctl and + * \c opus_encoder_ctl calls to generate a particular + * request. + * + * When called on an \c OpusDecoder they apply to that + * particular decoder instance. When called on an + * \c OpusEncoder they apply to the corresponding setting + * on that encoder instance, if present. + * + * Some usage examples: + * + * @code + * int ret; + * opus_int32 pitch; + * ret = opus_decoder_ctl(dec_ctx, OPUS_GET_PITCH(&pitch)); + * if (ret == OPUS_OK) return ret; + * + * opus_encoder_ctl(enc_ctx, OPUS_RESET_STATE); + * opus_decoder_ctl(dec_ctx, OPUS_RESET_STATE); + * + * opus_int32 enc_bw, dec_bw; + * opus_encoder_ctl(enc_ctx, OPUS_GET_BANDWIDTH(&enc_bw)); + * opus_decoder_ctl(dec_ctx, OPUS_GET_BANDWIDTH(&dec_bw)); + * if (enc_bw != dec_bw) { + * printf("packet bandwidth mismatch!\n"); + * } + * @endcode + * + * @see opus_encoder, opus_decoder_ctl, opus_encoder_ctl, opus_decoderctls, opus_encoderctls + * @{ + */ + +/** Resets the codec state to be equivalent to a freshly initialized state. + * This should be called when switching streams in order to prevent + * the back to back decoding from giving different results from + * one at a time decoding. + * @hideinitializer */ +#define OPUS_RESET_STATE 4028 + +/** Gets the final state of the codec's entropy coder. + * This is used for testing purposes, + * The encoder and decoder state should be identical after coding a payload + * (assuming no data corruption or software bugs) + * + * @param[out] x opus_uint32 *: Entropy coder state + * + * @hideinitializer */ +#define OPUS_GET_FINAL_RANGE(x) OPUS_GET_FINAL_RANGE_REQUEST, __opus_check_uint_ptr(x) + +/** Gets the encoder's configured bandpass or the decoder's last bandpass. + * @see OPUS_SET_BANDWIDTH + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband
+ *
+ * @hideinitializer */ +#define OPUS_GET_BANDWIDTH(x) OPUS_GET_BANDWIDTH_REQUEST, __opus_check_int_ptr(x) + +/** Gets the sampling rate the encoder or decoder was initialized with. + * This simply returns the Fs value passed to opus_encoder_init() + * or opus_decoder_init(). + * @param[out] x opus_int32 *: Sampling rate of encoder or decoder. + * @hideinitializer + */ +#define OPUS_GET_SAMPLE_RATE(x) OPUS_GET_SAMPLE_RATE_REQUEST, __opus_check_int_ptr(x) + +/** If set to 1, disables the use of phase inversion for intensity stereo, + * improving the quality of mono downmixes, but slightly reducing normal + * stereo quality. Disabling phase inversion in the decoder does not comply + * with RFC 6716, although it does not cause any interoperability issue and + * is expected to become part of the Opus standard once RFC 6716 is updated + * by draft-ietf-codec-opus-update. + * @see OPUS_GET_PHASE_INVERSION_DISABLED + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Enable phase inversion (default).
+ *
1
Disable phase inversion.
+ *
+ * @hideinitializer */ +#define OPUS_SET_PHASE_INVERSION_DISABLED(x) OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured phase inversion status. + * @see OPUS_SET_PHASE_INVERSION_DISABLED + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Stereo phase inversion enabled (default).
+ *
1
Stereo phase inversion disabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_PHASE_INVERSION_DISABLED(x) OPUS_GET_PHASE_INVERSION_DISABLED_REQUEST, __opus_check_int_ptr(x) +/** Gets the DTX state of the encoder. + * Returns whether the last encoded frame was either a comfort noise update + * during DTX or not encoded because of DTX. + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
The encoder is not in DTX.
+ *
1
The encoder is in DTX.
+ *
+ * @hideinitializer */ +#define OPUS_GET_IN_DTX(x) OPUS_GET_IN_DTX_REQUEST, __opus_check_int_ptr(x) + +/**@}*/ + +/** @defgroup opus_decoderctls Decoder related CTLs + * @see opus_genericctls, opus_encoderctls, opus_decoder + * @{ + */ + +/** Configures decoder gain adjustment. + * Scales the decoded output by a factor specified in Q8 dB units. + * This has a maximum range of -32768 to 32767 inclusive, and returns + * OPUS_BAD_ARG otherwise. The default is zero indicating no adjustment. + * This setting survives decoder reset. + * + * gain = pow(10, x/(20.0*256)) + * + * @param[in] x opus_int32: Amount to scale PCM signal by in Q8 dB units. + * @hideinitializer */ +#define OPUS_SET_GAIN(x) OPUS_SET_GAIN_REQUEST, __opus_check_int(x) +/** Gets the decoder's configured gain adjustment. @see OPUS_SET_GAIN + * + * @param[out] x opus_int32 *: Amount to scale PCM signal by in Q8 dB units. + * @hideinitializer */ +#define OPUS_GET_GAIN(x) OPUS_GET_GAIN_REQUEST, __opus_check_int_ptr(x) + +/** Gets the duration (in samples) of the last packet successfully decoded or concealed. + * @param[out] x opus_int32 *: Number of samples (at current sampling rate). + * @hideinitializer */ +#define OPUS_GET_LAST_PACKET_DURATION(x) OPUS_GET_LAST_PACKET_DURATION_REQUEST, __opus_check_int_ptr(x) + +/** Gets the pitch of the last decoded frame, if available. + * This can be used for any post-processing algorithm requiring the use of pitch, + * e.g. time stretching/shortening. If the last frame was not voiced, or if the + * pitch was not coded in the frame, then zero is returned. + * + * This CTL is only implemented for decoder instances. + * + * @param[out] x opus_int32 *: pitch period at 48 kHz (or 0 if not available) + * + * @hideinitializer */ +#define OPUS_GET_PITCH(x) OPUS_GET_PITCH_REQUEST, __opus_check_int_ptr(x) + +/**@}*/ + +/** @defgroup opus_libinfo Opus library information functions + * @{ + */ + +/** Converts an opus error code into a human readable string. + * + * @param[in] error int: Error number + * @returns Error string + */ +OPUS_EXPORT const char *opus_strerror(int error); + +/** Gets the libopus version string. + * + * Applications may look for the substring "-fixed" in the version string to + * determine whether they have a fixed-point or floating-point build at + * runtime. + * + * @returns Version string + */ +OPUS_EXPORT const char *opus_get_version_string(void); +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_DEFINES_H */ diff --git a/third-party/webrtc/dependencies/third_party/opus/src/include/opus_multistream.h b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_multistream.h new file mode 100644 index 0000000000..babcee6905 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_multistream.h @@ -0,0 +1,660 @@ +/* Copyright (c) 2011 Xiph.Org Foundation + Written by Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus_multistream.h + * @brief Opus reference implementation multistream API + */ + +#ifndef OPUS_MULTISTREAM_H +#define OPUS_MULTISTREAM_H + +#include "opus.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @cond OPUS_INTERNAL_DOC */ + +/** Macros to trigger compilation errors when the wrong types are provided to a + * CTL. */ +/**@{*/ +#define __opus_check_encstate_ptr(ptr) ((ptr) + ((ptr) - (OpusEncoder**)(ptr))) +#define __opus_check_decstate_ptr(ptr) ((ptr) + ((ptr) - (OpusDecoder**)(ptr))) +/**@}*/ + +/** These are the actual encoder and decoder CTL ID numbers. + * They should not be used directly by applications. + * In general, SETs should be even and GETs should be odd.*/ +/**@{*/ +#define OPUS_MULTISTREAM_GET_ENCODER_STATE_REQUEST 5120 +#define OPUS_MULTISTREAM_GET_DECODER_STATE_REQUEST 5122 +/**@}*/ + +/** @endcond */ + +/** @defgroup opus_multistream_ctls Multistream specific encoder and decoder CTLs + * + * These are convenience macros that are specific to the + * opus_multistream_encoder_ctl() and opus_multistream_decoder_ctl() + * interface. + * The CTLs from @ref opus_genericctls, @ref opus_encoderctls, and + * @ref opus_decoderctls may be applied to a multistream encoder or decoder as + * well. + * In addition, you may retrieve the encoder or decoder state for an specific + * stream via #OPUS_MULTISTREAM_GET_ENCODER_STATE or + * #OPUS_MULTISTREAM_GET_DECODER_STATE and apply CTLs to it individually. + */ +/**@{*/ + +/** Gets the encoder state for an individual stream of a multistream encoder. + * @param[in] x opus_int32: The index of the stream whose encoder you + * wish to retrieve. + * This must be non-negative and less than + * the streams parameter used + * to initialize the encoder. + * @param[out] y OpusEncoder**: Returns a pointer to the given + * encoder state. + * @retval OPUS_BAD_ARG The index of the requested stream was out of range. + * @hideinitializer + */ +#define OPUS_MULTISTREAM_GET_ENCODER_STATE(x,y) OPUS_MULTISTREAM_GET_ENCODER_STATE_REQUEST, __opus_check_int(x), __opus_check_encstate_ptr(y) + +/** Gets the decoder state for an individual stream of a multistream decoder. + * @param[in] x opus_int32: The index of the stream whose decoder you + * wish to retrieve. + * This must be non-negative and less than + * the streams parameter used + * to initialize the decoder. + * @param[out] y OpusDecoder**: Returns a pointer to the given + * decoder state. + * @retval OPUS_BAD_ARG The index of the requested stream was out of range. + * @hideinitializer + */ +#define OPUS_MULTISTREAM_GET_DECODER_STATE(x,y) OPUS_MULTISTREAM_GET_DECODER_STATE_REQUEST, __opus_check_int(x), __opus_check_decstate_ptr(y) + +/**@}*/ + +/** @defgroup opus_multistream Opus Multistream API + * @{ + * + * The multistream API allows individual Opus streams to be combined into a + * single packet, enabling support for up to 255 channels. Unlike an + * elementary Opus stream, the encoder and decoder must negotiate the channel + * configuration before the decoder can successfully interpret the data in the + * packets produced by the encoder. Some basic information, such as packet + * duration, can be computed without any special negotiation. + * + * The format for multistream Opus packets is defined in + *
RFC 7845 + * and is based on the self-delimited Opus framing described in Appendix B of + * RFC 6716. + * Normal Opus packets are just a degenerate case of multistream Opus packets, + * and can be encoded or decoded with the multistream API by setting + * streams to 1 when initializing the encoder or + * decoder. + * + * Multistream Opus streams can contain up to 255 elementary Opus streams. + * These may be either "uncoupled" or "coupled", indicating that the decoder + * is configured to decode them to either 1 or 2 channels, respectively. + * The streams are ordered so that all coupled streams appear at the + * beginning. + * + * A mapping table defines which decoded channel i + * should be used for each input/output (I/O) channel j. This table is + * typically provided as an unsigned char array. + * Let i = mapping[j] be the index for I/O channel j. + * If i < 2*coupled_streams, then I/O channel j is + * encoded as the left channel of stream (i/2) if i + * is even, or as the right channel of stream (i/2) if + * i is odd. Otherwise, I/O channel j is encoded as + * mono in stream (i - coupled_streams), unless it has the special + * value 255, in which case it is omitted from the encoding entirely (the + * decoder will reproduce it as silence). Each value i must either + * be the special value 255 or be less than streams + coupled_streams. + * + * The output channels specified by the encoder + * should use the + * Vorbis + * channel ordering. A decoder may wish to apply an additional permutation + * to the mapping the encoder used to achieve a different output channel + * order (e.g. for outputing in WAV order). + * + * Each multistream packet contains an Opus packet for each stream, and all of + * the Opus packets in a single multistream packet must have the same + * duration. Therefore the duration of a multistream packet can be extracted + * from the TOC sequence of the first stream, which is located at the + * beginning of the packet, just like an elementary Opus stream: + * + * @code + * int nb_samples; + * int nb_frames; + * nb_frames = opus_packet_get_nb_frames(data, len); + * if (nb_frames < 1) + * return nb_frames; + * nb_samples = opus_packet_get_samples_per_frame(data, 48000) * nb_frames; + * @endcode + * + * The general encoding and decoding process proceeds exactly the same as in + * the normal @ref opus_encoder and @ref opus_decoder APIs. + * See their documentation for an overview of how to use the corresponding + * multistream functions. + */ + +/** Opus multistream encoder state. + * This contains the complete state of a multistream Opus encoder. + * It is position independent and can be freely copied. + * @see opus_multistream_encoder_create + * @see opus_multistream_encoder_init + */ +typedef struct OpusMSEncoder OpusMSEncoder; + +/** Opus multistream decoder state. + * This contains the complete state of a multistream Opus decoder. + * It is position independent and can be freely copied. + * @see opus_multistream_decoder_create + * @see opus_multistream_decoder_init + */ +typedef struct OpusMSDecoder OpusMSDecoder; + +/**\name Multistream encoder functions */ +/**@{*/ + +/** Gets the size of an OpusMSEncoder structure. + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than 255. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_encoder_get_size( + int streams, + int coupled_streams +); + +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_surround_encoder_get_size( + int channels, + int mapping_family +); + + +/** Allocates and initializes a multistream encoder state. + * Call opus_multistream_encoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than the number of channels. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than the number of input channels. + * @param[in] mapping const unsigned char[channels]: Mapping from + * encoded channels to input channels, as described in + * @ref opus_multistream. As an extra constraint, the + * multistream encoder does not allow encoding coupled + * streams for which one channel is unused since this + * is never a good idea. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSEncoder *opus_multistream_encoder_create( + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int application, + int *error +) OPUS_ARG_NONNULL(5); + +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSEncoder *opus_multistream_surround_encoder_create( + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + unsigned char *mapping, + int application, + int *error +) OPUS_ARG_NONNULL(4) OPUS_ARG_NONNULL(5) OPUS_ARG_NONNULL(6); + +/** Initialize a previously allocated multistream encoder state. + * The memory pointed to by \a st must be at least the size returned by + * opus_multistream_encoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_multistream_encoder_create + * @see opus_multistream_encoder_get_size + * @param st OpusMSEncoder*: Multistream encoder state to initialize. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than the number of channels. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than the number of input channels. + * @param[in] mapping const unsigned char[channels]: Mapping from + * encoded channels to input channels, as described in + * @ref opus_multistream. As an extra constraint, the + * multistream encoder does not allow encoding coupled + * streams for which one channel is unused since this + * is never a good idea. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_multistream_encoder_init( + OpusMSEncoder *st, + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int application +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + +OPUS_EXPORT int opus_multistream_surround_encoder_init( + OpusMSEncoder *st, + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + unsigned char *mapping, + int application +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(5) OPUS_ARG_NONNULL(6) OPUS_ARG_NONNULL(7); + +/** Encodes a multistream Opus frame. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param[in] pcm const opus_int16*: The input signal as interleaved + * samples. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_encode( + OpusMSEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Encodes a multistream Opus frame from floating point input. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param[in] pcm const float*: The input signal as interleaved + * samples with a normal range of + * +/-1.0. + * Samples with a range beyond +/-1.0 + * are supported but will be clipped by + * decoders using the integer API and + * should only be used if it is known + * that the far end supports extended + * dynamic range. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_encode_float( + OpusMSEncoder *st, + const float *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Frees an OpusMSEncoder allocated by + * opus_multistream_encoder_create(). + * @param st OpusMSEncoder*: Multistream encoder state to be freed. + */ +OPUS_EXPORT void opus_multistream_encoder_destroy(OpusMSEncoder *st); + +/** Perform a CTL function on a multistream Opus encoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_encoderctls, or @ref opus_multistream_ctls. + * @see opus_genericctls + * @see opus_encoderctls + * @see opus_multistream_ctls + */ +OPUS_EXPORT int opus_multistream_encoder_ctl(OpusMSEncoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/**@}*/ + +/**\name Multistream decoder functions */ +/**@{*/ + +/** Gets the size of an OpusMSDecoder structure. + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_decoder_get_size( + int streams, + int coupled_streams +); + +/** Allocates and initializes a multistream decoder state. + * Call opus_multistream_decoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] mapping const unsigned char[channels]: Mapping from + * coded channels to output channels, as described in + * @ref opus_multistream. + * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSDecoder *opus_multistream_decoder_create( + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int *error +) OPUS_ARG_NONNULL(5); + +/** Intialize a previously allocated decoder state object. + * The memory pointed to by \a st must be at least the size returned by + * opus_multistream_encoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_multistream_decoder_create + * @see opus_multistream_deocder_get_size + * @param st OpusMSEncoder*: Multistream encoder state to initialize. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] mapping const unsigned char[channels]: Mapping from + * coded channels to output channels, as described in + * @ref opus_multistream. + * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_multistream_decoder_init( + OpusMSDecoder *st, + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + +/** Decode a multistream Opus packet. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_decode( + OpusMSDecoder *st, + const unsigned char *data, + opus_int32 len, + opus_int16 *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Decode a multistream Opus packet with floating point output. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_decode_float( + OpusMSDecoder *st, + const unsigned char *data, + opus_int32 len, + float *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Perform a CTL function on a multistream Opus decoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_decoderctls, or @ref opus_multistream_ctls. + * @see opus_genericctls + * @see opus_decoderctls + * @see opus_multistream_ctls + */ +OPUS_EXPORT int opus_multistream_decoder_ctl(OpusMSDecoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/** Frees an OpusMSDecoder allocated by + * opus_multistream_decoder_create(). + * @param st OpusMSDecoder: Multistream decoder state to be freed. + */ +OPUS_EXPORT void opus_multistream_decoder_destroy(OpusMSDecoder *st); + +/**@}*/ + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_MULTISTREAM_H */ diff --git a/third-party/webrtc/dependencies/third_party/opus/src/include/opus_projection.h b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_projection.h new file mode 100644 index 0000000000..9dabf4e85c --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_projection.h @@ -0,0 +1,568 @@ +/* Copyright (c) 2017 Google Inc. + Written by Andrew Allen */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus_projection.h + * @brief Opus projection reference API + */ + +#ifndef OPUS_PROJECTION_H +#define OPUS_PROJECTION_H + +#include "opus_multistream.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @cond OPUS_INTERNAL_DOC */ + +/** These are the actual encoder and decoder CTL ID numbers. + * They should not be used directly by applications.c + * In general, SETs should be even and GETs should be odd.*/ +/**@{*/ +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_GAIN_REQUEST 6001 +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_SIZE_REQUEST 6003 +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_REQUEST 6005 +/**@}*/ + + +/** @endcond */ + +/** @defgroup opus_projection_ctls Projection specific encoder and decoder CTLs + * + * These are convenience macros that are specific to the + * opus_projection_encoder_ctl() and opus_projection_decoder_ctl() + * interface. + * The CTLs from @ref opus_genericctls, @ref opus_encoderctls, + * @ref opus_decoderctls, and @ref opus_multistream_ctls may be applied to a + * projection encoder or decoder as well. + */ +/**@{*/ + +/** Gets the gain (in dB. S7.8-format) of the demixing matrix from the encoder. + * @param[out] x opus_int32 *: Returns the gain (in dB. S7.8-format) + * of the demixing matrix. + * @hideinitializer + */ +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_GAIN(x) OPUS_PROJECTION_GET_DEMIXING_MATRIX_GAIN_REQUEST, __opus_check_int_ptr(x) + + +/** Gets the size in bytes of the demixing matrix from the encoder. + * @param[out] x opus_int32 *: Returns the size in bytes of the + * demixing matrix. + * @hideinitializer + */ +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX_SIZE(x) OPUS_PROJECTION_GET_DEMIXING_MATRIX_SIZE_REQUEST, __opus_check_int_ptr(x) + + +/** Copies the demixing matrix to the supplied pointer location. + * @param[out] x unsigned char *: Returns the demixing matrix to the + * supplied pointer location. + * @param y opus_int32: The size in bytes of the reserved memory at the + * pointer location. + * @hideinitializer + */ +#define OPUS_PROJECTION_GET_DEMIXING_MATRIX(x,y) OPUS_PROJECTION_GET_DEMIXING_MATRIX_REQUEST, x, __opus_check_int(y) + + +/**@}*/ + +/** Opus projection encoder state. + * This contains the complete state of a projection Opus encoder. + * It is position independent and can be freely copied. + * @see opus_projection_ambisonics_encoder_create + */ +typedef struct OpusProjectionEncoder OpusProjectionEncoder; + + +/** Opus projection decoder state. + * This contains the complete state of a projection Opus decoder. + * It is position independent and can be freely copied. + * @see opus_projection_decoder_create + * @see opus_projection_decoder_init + */ +typedef struct OpusProjectionDecoder OpusProjectionDecoder; + + +/**\name Projection encoder functions */ +/**@{*/ + +/** Gets the size of an OpusProjectionEncoder structure. + * @param channels int: The total number of input channels to encode. + * This must be no more than 255. + * @param mapping_family int: The mapping family to use for selecting + * the appropriate projection. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_projection_ambisonics_encoder_get_size( + int channels, + int mapping_family +); + + +/** Allocates and initializes a projection encoder state. + * Call opus_projection_encoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param mapping_family int: The mapping family to use for selecting + * the appropriate projection. + * @param[out] streams int *: The total number of streams that will + * be encoded from the input. + * @param[out] coupled_streams int *: Number of coupled (2 channel) + * streams that will be encoded from the input. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusProjectionEncoder *opus_projection_ambisonics_encoder_create( + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + int application, + int *error +) OPUS_ARG_NONNULL(4) OPUS_ARG_NONNULL(5); + + +/** Initialize a previously allocated projection encoder state. + * The memory pointed to by \a st must be at least the size returned by + * opus_projection_ambisonics_encoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_projection_ambisonics_encoder_create + * @see opus_projection_ambisonics_encoder_get_size + * @param st OpusProjectionEncoder*: Projection encoder state to initialize. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than the number of channels. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than the number of input channels. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_projection_ambisonics_encoder_init( + OpusProjectionEncoder *st, + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + int application +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(5) OPUS_ARG_NONNULL(6); + + +/** Encodes a projection Opus frame. + * @param st OpusProjectionEncoder*: Projection encoder state. + * @param[in] pcm const opus_int16*: The input signal as interleaved + * samples. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_projection_encode( + OpusProjectionEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + + +/** Encodes a projection Opus frame from floating point input. + * @param st OpusProjectionEncoder*: Projection encoder state. + * @param[in] pcm const float*: The input signal as interleaved + * samples with a normal range of + * +/-1.0. + * Samples with a range beyond +/-1.0 + * are supported but will be clipped by + * decoders using the integer API and + * should only be used if it is known + * that the far end supports extended + * dynamic range. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_projection_encode_float( + OpusProjectionEncoder *st, + const float *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + + +/** Frees an OpusProjectionEncoder allocated by + * opus_projection_ambisonics_encoder_create(). + * @param st OpusProjectionEncoder*: Projection encoder state to be freed. + */ +OPUS_EXPORT void opus_projection_encoder_destroy(OpusProjectionEncoder *st); + + +/** Perform a CTL function on a projection Opus encoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusProjectionEncoder*: Projection encoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_encoderctls, @ref opus_multistream_ctls, or + * @ref opus_projection_ctls + * @see opus_genericctls + * @see opus_encoderctls + * @see opus_multistream_ctls + * @see opus_projection_ctls + */ +OPUS_EXPORT int opus_projection_encoder_ctl(OpusProjectionEncoder *st, int request, ...) OPUS_ARG_NONNULL(1); + + +/**@}*/ + +/**\name Projection decoder functions */ +/**@{*/ + +/** Gets the size of an OpusProjectionDecoder structure. + * @param channels int: The total number of output channels. + * This must be no more than 255. + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_projection_decoder_get_size( + int channels, + int streams, + int coupled_streams +); + + +/** Allocates and initializes a projection decoder state. + * Call opus_projection_decoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] demixing_matrix const unsigned char[demixing_matrix_size]: Demixing matrix + * that mapping from coded channels to output channels, + * as described in @ref opus_projection and + * @ref opus_projection_ctls. + * @param demixing_matrix_size opus_int32: The size in bytes of the + * demixing matrix, as + * described in @ref + * opus_projection_ctls. + * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusProjectionDecoder *opus_projection_decoder_create( + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + unsigned char *demixing_matrix, + opus_int32 demixing_matrix_size, + int *error +) OPUS_ARG_NONNULL(5); + + +/** Intialize a previously allocated projection decoder state object. + * The memory pointed to by \a st must be at least the size returned by + * opus_projection_decoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_projection_decoder_create + * @see opus_projection_deocder_get_size + * @param st OpusProjectionDecoder*: Projection encoder state to initialize. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] demixing_matrix const unsigned char[demixing_matrix_size]: Demixing matrix + * that mapping from coded channels to output channels, + * as described in @ref opus_projection and + * @ref opus_projection_ctls. + * @param demixing_matrix_size opus_int32: The size in bytes of the + * demixing matrix, as + * described in @ref + * opus_projection_ctls. + * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_projection_decoder_init( + OpusProjectionDecoder *st, + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + unsigned char *demixing_matrix, + opus_int32 demixing_matrix_size +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + + +/** Decode a projection Opus packet. + * @param st OpusProjectionDecoder*: Projection decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_projection_decode( + OpusProjectionDecoder *st, + const unsigned char *data, + opus_int32 len, + opus_int16 *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + + +/** Decode a projection Opus packet with floating point output. + * @param st OpusProjectionDecoder*: Projection decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_projection_decode_float( + OpusProjectionDecoder *st, + const unsigned char *data, + opus_int32 len, + float *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + + +/** Perform a CTL function on a projection Opus decoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusProjectionDecoder*: Projection decoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_decoderctls, @ref opus_multistream_ctls, or + * @ref opus_projection_ctls. + * @see opus_genericctls + * @see opus_decoderctls + * @see opus_multistream_ctls + * @see opus_projection_ctls + */ +OPUS_EXPORT int opus_projection_decoder_ctl(OpusProjectionDecoder *st, int request, ...) OPUS_ARG_NONNULL(1); + + +/** Frees an OpusProjectionDecoder allocated by + * opus_projection_decoder_create(). + * @param st OpusProjectionDecoder: Projection decoder state to be freed. + */ +OPUS_EXPORT void opus_projection_decoder_destroy(OpusProjectionDecoder *st); + + +/**@}*/ + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_PROJECTION_H */ diff --git a/third-party/webrtc/dependencies/third_party/opus/src/include/opus_types.h b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_types.h new file mode 100644 index 0000000000..7cf675580f --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/opus/src/include/opus_types.h @@ -0,0 +1,166 @@ +/* (C) COPYRIGHT 1994-2002 Xiph.Org Foundation */ +/* Modified by Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +/* opus_types.h based on ogg_types.h from libogg */ + +/** + @file opus_types.h + @brief Opus reference implementation types +*/ +#ifndef OPUS_TYPES_H +#define OPUS_TYPES_H + +#define opus_int int /* used for counters etc; at least 16 bits */ +#define opus_int64 long long +#define opus_int8 signed char + +#define opus_uint unsigned int /* used for counters etc; at least 16 bits */ +#define opus_uint64 unsigned long long +#define opus_uint8 unsigned char + +/* Use the real stdint.h if it's there (taken from Paul Hsieh's pstdint.h) */ +#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || (defined(__GNUC__) && (defined(_STDINT_H) || defined(_STDINT_H_)) || defined (HAVE_STDINT_H)) +#include +# undef opus_int64 +# undef opus_int8 +# undef opus_uint64 +# undef opus_uint8 + typedef int8_t opus_int8; + typedef uint8_t opus_uint8; + typedef int16_t opus_int16; + typedef uint16_t opus_uint16; + typedef int32_t opus_int32; + typedef uint32_t opus_uint32; + typedef int64_t opus_int64; + typedef uint64_t opus_uint64; +#elif defined(_WIN32) + +# if defined(__CYGWIN__) +# include <_G_config.h> + typedef _G_int32_t opus_int32; + typedef _G_uint32_t opus_uint32; + typedef _G_int16 opus_int16; + typedef _G_uint16 opus_uint16; +# elif defined(__MINGW32__) + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; +# elif defined(__MWERKS__) + typedef int opus_int32; + typedef unsigned int opus_uint32; + typedef short opus_int16; + typedef unsigned short opus_uint16; +# else + /* MSVC/Borland */ + typedef __int32 opus_int32; + typedef unsigned __int32 opus_uint32; + typedef __int16 opus_int16; + typedef unsigned __int16 opus_uint16; +# endif + +#elif defined(__MACOS__) + +# include + typedef SInt16 opus_int16; + typedef UInt16 opus_uint16; + typedef SInt32 opus_int32; + typedef UInt32 opus_uint32; + +#elif (defined(__APPLE__) && defined(__MACH__)) /* MacOS X Framework build */ + +# include + typedef int16_t opus_int16; + typedef u_int16_t opus_uint16; + typedef int32_t opus_int32; + typedef u_int32_t opus_uint32; + +#elif defined(__BEOS__) + + /* Be */ +# include + typedef int16 opus_int16; + typedef u_int16 opus_uint16; + typedef int32_t opus_int32; + typedef u_int32_t opus_uint32; + +#elif defined (__EMX__) + + /* OS/2 GCC */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined (DJGPP) + + /* DJGPP */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined(R5900) + + /* PS2 EE */ + typedef int opus_int32; + typedef unsigned opus_uint32; + typedef short opus_int16; + typedef unsigned short opus_uint16; + +#elif defined(__SYMBIAN32__) + + /* Symbian GCC */ + typedef signed short opus_int16; + typedef unsigned short opus_uint16; + typedef signed int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined(CONFIG_TI_C54X) || defined (CONFIG_TI_C55X) + + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef long opus_int32; + typedef unsigned long opus_uint32; + +#elif defined(CONFIG_TI_C6X) + + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#else + + /* Give up, take a reasonable guess */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#endif + +#endif /* OPUS_TYPES_H */ diff --git a/third-party/webrtc/webrtc b/third-party/webrtc/webrtc index a206ca345b..2a9b8c92f7 160000 --- a/third-party/webrtc/webrtc +++ b/third-party/webrtc/webrtc @@ -1 +1 @@ -Subproject commit a206ca345bbbe520e0506ce4caf3ab4844204a58 +Subproject commit 2a9b8c92f7b95d677fd97c363e95cdec1de516da

...) noexcept(noex) -> ReturnType { \ + ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move"); \ + std::terminate(); \ + }; \ + return this->HasValue(); \ + }()); \ + } \ + return invoker; \ + } \ + \ + /*The actual invocation operation with the proper signature*/ \ + ReturnType operator()(P... args) cv ref noexcept(noex) { \ + assert(this->invoker_ != nullptr); \ + return this->ExtractInvoker()( \ + const_cast(&this->state_), \ + static_cast>(args)...); \ + } \ + } + +// Define the `noexcept(true)` specialization only for C++17 and beyond, when +// `noexcept` is part of the type system. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +// A convenience macro that defines specializations for the noexcept(true) and +// noexcept(false) forms, given the other properties. +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true) +#else +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false) +#endif + +// Non-ref-qualified partial specializations +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &); +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&); + +// Lvalue-ref-qualified partial specializations +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &); +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&); + +// Rvalue-ref-qualified partial specializations +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&); +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&); + +// Undef the detail-only macros. +#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL +#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL_ +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT +#undef ABSL_INTERNAL_NOEXCEPT_SPEC + +} // namespace internal_any_invocable +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/CMakeLists.txt index 5916ae3cf0..1569125480 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/CMakeLists.txt @@ -24,12 +24,13 @@ absl_cc_library( "internal/hash.h" COPTS ${ABSL_DEFAULT_COPTS} - DEPS + DEPS absl::city absl::config absl::core_headers absl::endian absl::fixed_array + absl::function_ref absl::meta absl::int128 absl::strings @@ -54,6 +55,7 @@ absl_cc_library( absl::variant GTest::gmock TESTONLY + PUBLIC ) absl_cc_test( @@ -68,13 +70,22 @@ absl_cc_test( absl::hash absl::hash_testing absl::core_headers + absl::btree + absl::flat_hash_map absl::flat_hash_set + absl::node_hash_map + absl::node_hash_set absl::spy_hash_state absl::meta absl::int128 GTest::gmock_main ) +# Internal-only target, do not depend on directly. +# +# Note: Even though external code should not depend on this target +# directly, it must be marked PUBLIC since it is a dependency of +# hash_testing. absl_cc_library( NAME spy_hash_state @@ -87,8 +98,10 @@ absl_cc_library( absl::strings absl::str_format TESTONLY + PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME city @@ -116,6 +129,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME low_level_hash diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash.h index 8282ea53c6..74e2d7c053 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash.h @@ -26,9 +26,9 @@ // support Abseil hashing without requiring you to define a hashing // algorithm. // * `HashState`, a type-erased class which implements the manipulation of the -// hash state (H) itself, contains member functions `combine()` and -// `combine_contiguous()`, which you can use to contribute to an existing -// hash state when hashing your types. +// hash state (H) itself; contains member functions `combine()`, +// `combine_contiguous()`, and `combine_unordered()`; and which you can use +// to contribute to an existing hash state when hashing your types. // // Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework // provides most of its utility by abstracting away the hash algorithm (and its @@ -40,6 +40,11 @@ // each process. E.g., `absl::Hash{}(9)` in one process and // `absl::Hash{}(9)` in another process are likely to differ. // +// `absl::Hash` may also produce different values from different dynamically +// loaded libraries. For this reason, `absl::Hash` values must never cross +// boundries in dynamically loaded libraries (including when used in types like +// hash containers.) +// // `absl::Hash` is intended to strongly mix input bits with a target of passing // an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect). // @@ -74,7 +79,9 @@ #define ABSL_HASH_HASH_H_ #include +#include +#include "absl/functional/function_ref.h" #include "absl/hash/internal/hash.h" namespace absl { @@ -107,14 +114,27 @@ ABSL_NAMESPACE_BEGIN // * std::string_view (as well as any instance of std::basic_string that // uses char and std::char_traits) // * All the standard sequence containers (provided the elements are hashable) -// * All the standard ordered associative containers (provided the elements are +// * All the standard associative containers (provided the elements are // hashable) // * absl types such as the following: // * absl::string_view -// * absl::InlinedVector -// * absl::FixedArray // * absl::uint128 // * absl::Time, absl::Duration, and absl::TimeZone +// * absl containers (provided the elements are hashable) such as the +// following: +// * absl::flat_hash_set, absl::node_hash_set, absl::btree_set +// * absl::flat_hash_map, absl::node_hash_map, absl::btree_map +// * absl::btree_multiset, absl::btree_multimap +// * absl::InlinedVector +// * absl::FixedArray +// +// When absl::Hash is used to hash an unordered container with a custom hash +// functor, the elements are hashed using default absl::Hash semantics, not +// the custom hash functor. This is consistent with the behavior of +// operator==() on unordered containers, which compares elements pairwise with +// operator==() rather than the custom equality functor. It is usually a +// mistake to use either operator==() or absl::Hash on unordered collections +// that use functors incompatible with operator==() equality. // // Note: the list above is not meant to be exhaustive. Additional type support // may be added, in which case the above list will be updated. @@ -153,7 +173,8 @@ ABSL_NAMESPACE_BEGIN // that are otherwise difficult to extend using `AbslHashValue()`. (See the // `HashState` class below.) // -// The "hash state" concept contains two member functions for mixing hash state: +// The "hash state" concept contains three member functions for mixing hash +// state: // // * `H::combine(state, values...)` // @@ -187,6 +208,15 @@ ABSL_NAMESPACE_BEGIN // (it may perform internal optimizations). If you need this guarantee, use a // loop instead. // +// * `H::combine_unordered(state, begin, end)` +// +// Combines a set of elements denoted by an iterator pair into a hash +// state, returning the updated state. Note that the existing hash +// state is move-only and must be passed by value. +// +// Unlike the other two methods, the hashing is order-independent. +// This can be used to hash unordered collections. +// // ----------------------------------------------------------------------------- // Adding Type Support to `absl::Hash` // ----------------------------------------------------------------------------- @@ -243,8 +273,9 @@ size_t HashOf(const Types&... values) { // classes, virtual functions, etc.). The type erasure adds overhead so it // should be avoided unless necessary. // -// Note: This wrapper will only erase calls to: +// Note: This wrapper will only erase calls to // combine_contiguous(H, const unsigned char*, size_t) +// RunCombineUnordered(H, CombinerF) // // All other calls will be handled internally and will not invoke overloads // provided by the wrapped class. @@ -318,6 +349,8 @@ class HashState : public hash_internal::HashStateBase { private: HashState() = default; + friend class HashState::HashStateBase; + template static void CombineContiguousImpl(void* p, const unsigned char* first, size_t size) { @@ -329,16 +362,57 @@ class HashState : public hash_internal::HashStateBase { void Init(T* state) { state_ = state; combine_contiguous_ = &CombineContiguousImpl; + run_combine_unordered_ = &RunCombineUnorderedImpl; + } + + template + struct CombineUnorderedInvoker { + template + void operator()(T inner_state, ConsumerT inner_cb) { + f(HashState::Create(&inner_state), + [&](HashState& inner_erased) { inner_cb(inner_erased.Real()); }); + } + + absl::FunctionRef)> f; + }; + + template + static HashState RunCombineUnorderedImpl( + HashState state, + absl::FunctionRef)> + f) { + // Note that this implementation assumes that inner_state and outer_state + // are the same type. This isn't true in the SpyHash case, but SpyHash + // types are move-convertible to each other, so this still works. + T& real_state = state.Real(); + real_state = T::RunCombineUnordered( + std::move(real_state), CombineUnorderedInvoker{f}); + return state; + } + + template + static HashState RunCombineUnordered(HashState state, CombinerT combiner) { + auto* run = state.run_combine_unordered_; + return run(std::move(state), std::ref(combiner)); } // Do not erase an already erased state. void Init(HashState* state) { state_ = state->state_; combine_contiguous_ = state->combine_contiguous_; + run_combine_unordered_ = state->run_combine_unordered_; + } + + template + T& Real() { + return *static_cast(state_); } void* state_; void (*combine_contiguous_)(void*, const unsigned char*, size_t); + HashState (*run_combine_unordered_)( + HashState state, + absl::FunctionRef)>); }; ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash_benchmark.cc index d498ac29c0..8712a01cca 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash_benchmark.cc @@ -19,6 +19,7 @@ #include #include "absl/base/attributes.h" +#include "absl/container/flat_hash_set.h" #include "absl/hash/hash.h" #include "absl/random/random.h" #include "absl/strings/cord.h" @@ -107,6 +108,44 @@ absl::Cord FragmentedCord(size_t size) { return result; } +template +std::vector Vector(size_t count) { + std::vector result; + for (size_t v = 0; v < count; ++v) { + result.push_back(v); + } + return result; +} + +// Bogus type that replicates an unorderd_set's bit mixing, but with +// vector-speed iteration. This is intended to measure the overhead of unordered +// hashing without counting the speed of unordered_set iteration. +template +struct FastUnorderedSet { + explicit FastUnorderedSet(size_t count) { + for (size_t v = 0; v < count; ++v) { + values.push_back(v); + } + } + std::vector values; + + template + friend H AbslHashValue(H h, const FastUnorderedSet& fus) { + return H::combine(H::combine_unordered(std::move(h), fus.values.begin(), + fus.values.end()), + fus.values.size()); + } +}; + +template +absl::flat_hash_set FlatHashSet(size_t count) { + absl::flat_hash_set result; + for (size_t v = 0; v < count; ++v) { + result.insert(v); + } + return result; +} + // Generates a benchmark and a codegen method for the provided types. The // codegen method provides a well known entrypoint for dumping assembly. #define MAKE_BENCHMARK(hash, name, ...) \ @@ -145,10 +184,22 @@ MAKE_BENCHMARK(AbslHash, Cord_Flat_200, FlatCord(200)); MAKE_BENCHMARK(AbslHash, Cord_Flat_5000, FlatCord(5000)); MAKE_BENCHMARK(AbslHash, Cord_Fragmented_200, FragmentedCord(200)); MAKE_BENCHMARK(AbslHash, Cord_Fragmented_5000, FragmentedCord(5000)); -MAKE_BENCHMARK(AbslHash, VectorInt64_10, std::vector(10)); -MAKE_BENCHMARK(AbslHash, VectorInt64_100, std::vector(100)); -MAKE_BENCHMARK(AbslHash, VectorDouble_10, std::vector(10, 1.1)); -MAKE_BENCHMARK(AbslHash, VectorDouble_100, std::vector(100, 1.1)); +MAKE_BENCHMARK(AbslHash, VectorInt64_10, Vector(10)); +MAKE_BENCHMARK(AbslHash, VectorInt64_100, Vector(100)); +MAKE_BENCHMARK(AbslHash, VectorInt64_1000, Vector(1000)); +MAKE_BENCHMARK(AbslHash, VectorDouble_10, Vector(10)); +MAKE_BENCHMARK(AbslHash, VectorDouble_100, Vector(100)); +MAKE_BENCHMARK(AbslHash, VectorDouble_1000, Vector(1000)); +MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_10, FlatHashSet(10)); +MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_100, FlatHashSet(100)); +MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_1000, FlatHashSet(1000)); +MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_10, FlatHashSet(10)); +MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_100, FlatHashSet(100)); +MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_1000, FlatHashSet(1000)); +MAKE_BENCHMARK(AbslHash, FastUnorderedSetInt64_1000, + FastUnorderedSet(1000)); +MAKE_BENCHMARK(AbslHash, FastUnorderedSetDouble_1000, + FastUnorderedSet(1000)); MAKE_BENCHMARK(AbslHash, PairStringString_0, std::make_pair(std::string(), std::string())); MAKE_BENCHMARK(AbslHash, PairStringString_10, @@ -180,6 +231,24 @@ MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_10, std::vector(10, 1.1)); MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_100, std::vector(100, 1.1)); +MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_1000, + std::vector(1000, 1.1)); +MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_10, + FlatHashSet(10)); +MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_100, + FlatHashSet(100)); +MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_1000, + FlatHashSet(1000)); +MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_10, + FlatHashSet(10)); +MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_100, + FlatHashSet(100)); +MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_1000, + FlatHashSet(1000)); +MAKE_BENCHMARK(TypeErasedAbslHash, FastUnorderedSetInt64_1000, + FastUnorderedSet(1000)); +MAKE_BENCHMARK(TypeErasedAbslHash, FastUnorderedSetDouble_1000, + FastUnorderedSet(1000)); // The latency benchmark attempts to model the speed of the hash function in // production. When a hash function is used for hashtable lookups it is rarely diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash_test.cc index b3ddebdd42..744a2e54c8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/hash_test.cc @@ -14,12 +14,14 @@ #include "absl/hash/hash.h" +#include #include #include #include #include #include #include +#include #include #include #include @@ -32,12 +34,18 @@ #include #include #include +#include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" +#include "absl/container/node_hash_map.h" +#include "absl/container/node_hash_set.h" #include "absl/hash/hash_testing.h" #include "absl/hash/internal/spy_hash_state.h" #include "absl/meta/type_traits.h" @@ -46,6 +54,56 @@ namespace { +// Utility wrapper of T for the purposes of testing the `AbslHash` type erasure +// mechanism. `TypeErasedValue` can be constructed with a `T`, and can +// be compared and hashed. However, all hashing goes through the hashing +// type-erasure framework. +template +class TypeErasedValue { + public: + TypeErasedValue() = default; + TypeErasedValue(const TypeErasedValue&) = default; + TypeErasedValue(TypeErasedValue&&) = default; + explicit TypeErasedValue(const T& n) : n_(n) {} + + template + friend H AbslHashValue(H hash_state, const TypeErasedValue& v) { + v.HashValue(absl::HashState::Create(&hash_state)); + return hash_state; + } + + void HashValue(absl::HashState state) const { + absl::HashState::combine(std::move(state), n_); + } + + bool operator==(const TypeErasedValue& rhs) const { return n_ == rhs.n_; } + bool operator!=(const TypeErasedValue& rhs) const { return !(*this == rhs); } + + private: + T n_; +}; + +// A TypeErasedValue refinement, for containers. It exposes the wrapped +// `value_type` and is constructible from an initializer list. +template +class TypeErasedContainer : public TypeErasedValue { + public: + using value_type = typename T::value_type; + TypeErasedContainer() = default; + TypeErasedContainer(const TypeErasedContainer&) = default; + TypeErasedContainer(TypeErasedContainer&&) = default; + explicit TypeErasedContainer(const T& n) : TypeErasedValue(n) {} + TypeErasedContainer(std::initializer_list init_list) + : TypeErasedContainer(T(init_list.begin(), init_list.end())) {} + // one-argument constructor of value type T, to appease older toolchains that + // get confused by one-element initializer lists in some contexts + explicit TypeErasedContainer(const value_type& v) + : TypeErasedContainer(T(&v, &v + 1)) {} +}; + +template +using TypeErasedVector = TypeErasedContainer>; + using absl::Hash; using absl::hash_internal::SpyHashState; @@ -81,10 +139,10 @@ TYPED_TEST_P(HashValueIntTest, FastPath) { absl::Hash>{}(std::tuple(n))); } -REGISTER_TYPED_TEST_CASE_P(HashValueIntTest, BasicUsage, FastPath); +REGISTER_TYPED_TEST_SUITE_P(HashValueIntTest, BasicUsage, FastPath); using IntTypes = testing::Types; -INSTANTIATE_TYPED_TEST_CASE_P(My, HashValueIntTest, IntTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueIntTest, IntTypes); enum LegacyEnum { kValue1, kValue2, kValue3 }; @@ -127,6 +185,8 @@ TEST(HashValueTest, FloatingPoint) { TEST(HashValueTest, Pointer) { EXPECT_TRUE((is_hashable::value)); + EXPECT_TRUE((is_hashable::value)); + EXPECT_TRUE((is_hashable::value)); int i; int* ptr = &i; @@ -162,10 +222,89 @@ TEST(HashValueTest, PointerAlignment) { // Limit the scope to the bits we would be using for Swisstable. constexpr size_t kMask = (1 << (kLog2NumValues + 7)) - 1; size_t stuck_bits = (~bits_or | bits_and) & kMask; - EXPECT_EQ(stuck_bits, 0) << "0x" << std::hex << stuck_bits; + EXPECT_EQ(stuck_bits, 0u) << "0x" << std::hex << stuck_bits; } } +TEST(HashValueTest, PointerToMember) { + struct Bass { + void q() {} + }; + + struct A : Bass { + virtual ~A() = default; + virtual void vfa() {} + + static auto pq() -> void (A::*)() { return &A::q; } + }; + + struct B : Bass { + virtual ~B() = default; + virtual void vfb() {} + + static auto pq() -> void (B::*)() { return &B::q; } + }; + + struct Foo : A, B { + void f1() {} + void f2() const {} + + int g1() & { return 0; } + int g2() const & { return 0; } + int g3() && { return 0; } + int g4() const && { return 0; } + + int h1() & { return 0; } + int h2() const & { return 0; } + int h3() && { return 0; } + int h4() const && { return 0; } + + int a; + int b; + + const int c = 11; + const int d = 22; + }; + + EXPECT_TRUE((is_hashable::value)); + EXPECT_TRUE((is_hashable::value)); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + std::make_tuple(&Foo::a, &Foo::b, static_cast(nullptr)))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + std::make_tuple(&Foo::c, &Foo::d, static_cast(nullptr), + &Foo::a, &Foo::b))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( + &Foo::f1, static_cast(nullptr)))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( + &Foo::f2, static_cast(nullptr)))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( + &Foo::g1, &Foo::h1, static_cast(nullptr)))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( + &Foo::g2, &Foo::h2, static_cast(nullptr)))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( + &Foo::g3, &Foo::h3, static_cast(nullptr)))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( + &Foo::g4, &Foo::h4, static_cast(nullptr)))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + std::make_tuple(static_cast(&Foo::vfa), + static_cast(&Foo::vfb), + static_cast(nullptr)))); + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + std::make_tuple(static_cast(Foo::A::pq()), + static_cast(Foo::B::pq()), + static_cast(nullptr)))); +} + TEST(HashValueTest, PairAndTuple) { EXPECT_TRUE((is_hashable>::value)); EXPECT_TRUE((is_hashable>::value)); @@ -381,6 +520,52 @@ TEST(HashValueTest, StdBitset) { std::bitset(bit_strings[5].c_str())})); } // namespace +// Dummy type with unordered equality and hashing semantics. This preserves +// input order internally, and is used below to ensure we get test coverage +// for equal sequences with different iteraton orders. +template +class UnorderedSequence { + public: + UnorderedSequence() = default; + template + UnorderedSequence(std::initializer_list l) + : values_(l.begin(), l.end()) {} + template ::value, + bool>::type = true> + UnorderedSequence(ForwardIterator begin, ForwardIterator end) + : values_(begin, end) {} + // one-argument constructor of value type T, to appease older toolchains that + // get confused by one-element initializer lists in some contexts + explicit UnorderedSequence(const T& v) : values_(&v, &v + 1) {} + + using value_type = T; + + size_t size() const { return values_.size(); } + typename std::vector::const_iterator begin() const { + return values_.begin(); + } + typename std::vector::const_iterator end() const { return values_.end(); } + + friend bool operator==(const UnorderedSequence& lhs, + const UnorderedSequence& rhs) { + return lhs.size() == rhs.size() && + std::is_permutation(lhs.begin(), lhs.end(), rhs.begin()); + } + friend bool operator!=(const UnorderedSequence& lhs, + const UnorderedSequence& rhs) { + return !(lhs == rhs); + } + template + friend H AbslHashValue(H h, const UnorderedSequence& u) { + return H::combine(H::combine_unordered(std::move(h), u.begin(), u.end()), + u.size()); + } + + private: + std::vector values_; +}; + template class HashValueSequenceTest : public testing::Test { }; @@ -389,22 +574,66 @@ TYPED_TEST_SUITE_P(HashValueSequenceTest); TYPED_TEST_P(HashValueSequenceTest, BasicUsage) { EXPECT_TRUE((is_hashable::value)); - using ValueType = typename TypeParam::value_type; - auto a = static_cast(0); - auto b = static_cast(23); - auto c = static_cast(42); + using IntType = typename TypeParam::value_type; + auto a = static_cast(0); + auto b = static_cast(23); + auto c = static_cast(42); - EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( - std::make_tuple(TypeParam(), TypeParam{}, TypeParam{a, b, c}, - TypeParam{a, b}, TypeParam{b, c}))); + std::vector exemplars = { + TypeParam(), TypeParam(), TypeParam{a, b, c}, + TypeParam{a, c, b}, TypeParam{c, a, b}, TypeParam{a}, + TypeParam{a, a}, TypeParam{a, a, a}, TypeParam{a, a, b}, + TypeParam{a, b, a}, TypeParam{b, a, a}, TypeParam{a, b}, + TypeParam{b, c}}; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars)); } -REGISTER_TYPED_TEST_CASE_P(HashValueSequenceTest, BasicUsage); -using IntSequenceTypes = - testing::Types, std::forward_list, std::list, - std::vector, std::vector, std::set, - std::multiset>; -INSTANTIATE_TYPED_TEST_CASE_P(My, HashValueSequenceTest, IntSequenceTypes); +REGISTER_TYPED_TEST_SUITE_P(HashValueSequenceTest, BasicUsage); +using IntSequenceTypes = testing::Types< + std::deque, std::forward_list, std::list, std::vector, + std::vector, TypeErasedContainer>, std::set, + std::multiset, UnorderedSequence, + TypeErasedContainer>, std::unordered_set, + std::unordered_multiset, absl::flat_hash_set, + absl::node_hash_set, absl::btree_set>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueSequenceTest, IntSequenceTypes); + +template +class HashValueNestedSequenceTest : public testing::Test {}; +TYPED_TEST_SUITE_P(HashValueNestedSequenceTest); + +TYPED_TEST_P(HashValueNestedSequenceTest, BasicUsage) { + using T = TypeParam; + using V = typename T::value_type; + std::vector exemplars = { + // empty case + T{}, + // sets of empty sets + T{V{}}, T{V{}, V{}}, T{V{}, V{}, V{}}, + // multisets of different values + T{V{1}}, T{V{1, 1}, V{1, 1}}, T{V{1, 1, 1}, V{1, 1, 1}, V{1, 1, 1}}, + // various orderings of same nested sets + T{V{}, V{1, 2}}, T{V{}, V{2, 1}}, T{V{1, 2}, V{}}, T{V{2, 1}, V{}}, + // various orderings of various nested sets, case 2 + T{V{1, 2}, V{3, 4}}, T{V{1, 2}, V{4, 3}}, T{V{1, 3}, V{2, 4}}, + T{V{1, 3}, V{4, 2}}, T{V{1, 4}, V{2, 3}}, T{V{1, 4}, V{3, 2}}, + T{V{2, 3}, V{1, 4}}, T{V{2, 3}, V{4, 1}}, T{V{2, 4}, V{1, 3}}, + T{V{2, 4}, V{3, 1}}, T{V{3, 4}, V{1, 2}}, T{V{3, 4}, V{2, 1}}}; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars)); +} + +REGISTER_TYPED_TEST_SUITE_P(HashValueNestedSequenceTest, BasicUsage); +template +using TypeErasedSet = TypeErasedContainer>; + +using NestedIntSequenceTypes = testing::Types< + std::vector>, std::vector>, + std::vector>, UnorderedSequence>, + UnorderedSequence>, + UnorderedSequence>, TypeErasedSet>, + TypeErasedSet>, TypeErasedSet>>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueNestedSequenceTest, + NestedIntSequenceTypes); // Private type that only supports AbslHashValue to make sure our chosen hash // implementation is recursive within absl::Hash. @@ -508,10 +737,10 @@ TEST(HashValueTest, CombinePiecewiseBuffer) { // // This test is run on a buffer that is a multiple of the stride size, and one // that isn't. - for (size_t big_buffer_size : {1024 * 2 + 512, 1024 * 3}) { + for (size_t big_buffer_size : {1024u * 2 + 512u, 1024u * 3}) { SCOPED_TRACE(big_buffer_size); std::string big_buffer; - for (int i = 0; i < big_buffer_size; ++i) { + for (size_t i = 0; i < big_buffer_size; ++i) { // Arbitrary string big_buffer.push_back(32 + (i * (i / 3)) % 64); } @@ -564,23 +793,64 @@ TEST(HashValueTest, Variant) { #endif } -TEST(HashValueTest, Maps) { - EXPECT_TRUE((is_hashable>::value)); +template +class HashValueAssociativeMapTest : public testing::Test {}; +TYPED_TEST_SUITE_P(HashValueAssociativeMapTest); - using M = std::map; - EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( - M{}, M{{0, "foo"}}, M{{1, "foo"}}, M{{0, "bar"}}, M{{1, "bar"}}, - M{{0, "foo"}, {42, "bar"}}, M{{1, "foo"}, {42, "bar"}}, - M{{1, "foo"}, {43, "bar"}}, M{{1, "foo"}, {43, "baz"}}))); - - using MM = std::multimap; - EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( - MM{}, MM{{0, "foo"}}, MM{{1, "foo"}}, MM{{0, "bar"}}, MM{{1, "bar"}}, - MM{{0, "foo"}, {0, "bar"}}, MM{{0, "bar"}, {0, "foo"}}, - MM{{0, "foo"}, {42, "bar"}}, MM{{1, "foo"}, {42, "bar"}}, - MM{{1, "foo"}, {1, "foo"}, {43, "bar"}}, MM{{1, "foo"}, {43, "baz"}}))); +TYPED_TEST_P(HashValueAssociativeMapTest, BasicUsage) { + using M = TypeParam; + using V = typename M::value_type; + std::vector exemplars{M{}, + M{V{0, "foo"}}, + M{V{1, "foo"}}, + M{V{0, "bar"}}, + M{V{1, "bar"}}, + M{V{0, "foo"}, V{42, "bar"}}, + M{V{42, "bar"}, V{0, "foo"}}, + M{V{1, "foo"}, V{42, "bar"}}, + M{V{1, "foo"}, V{43, "bar"}}, + M{V{1, "foo"}, V{43, "baz"}}}; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars)); } +REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMapTest, BasicUsage); +using AssociativeMapTypes = testing::Types< + std::map, std::unordered_map, + absl::flat_hash_map, + absl::node_hash_map, absl::btree_map, + UnorderedSequence>>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMapTest, + AssociativeMapTypes); + +template +class HashValueAssociativeMultimapTest : public testing::Test {}; +TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest); + +TYPED_TEST_P(HashValueAssociativeMultimapTest, BasicUsage) { + using MM = TypeParam; + using V = typename MM::value_type; + std::vector exemplars{MM{}, + MM{V{0, "foo"}}, + MM{V{1, "foo"}}, + MM{V{0, "bar"}}, + MM{V{1, "bar"}}, + MM{V{0, "foo"}, V{0, "bar"}}, + MM{V{0, "bar"}, V{0, "foo"}}, + MM{V{0, "foo"}, V{42, "bar"}}, + MM{V{1, "foo"}, V{42, "bar"}}, + MM{V{1, "foo"}, V{1, "foo"}, V{43, "bar"}}, + MM{V{1, "foo"}, V{43, "bar"}, V{1, "foo"}}, + MM{V{1, "foo"}, V{43, "baz"}}}; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars)); +} + +REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest, BasicUsage); +using AssociativeMultimapTypes = + testing::Types, + std::unordered_multimap>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMultimapTest, + AssociativeMultimapTypes); + TEST(HashValueTest, ReferenceWrapper) { EXPECT_TRUE(is_hashable>::value); @@ -818,10 +1088,10 @@ TYPED_TEST_P(HashIntTest, BasicUsage) { Hash>()({})); } -REGISTER_TYPED_TEST_CASE_P(HashIntTest, BasicUsage); +REGISTER_TYPED_TEST_SUITE_P(HashIntTest, BasicUsage); using IntTypes = testing::Types; -INSTANTIATE_TYPED_TEST_CASE_P(My, HashIntTest, IntTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(My, HashIntTest, IntTypes); struct StructWithPadding { char c; @@ -865,10 +1135,10 @@ TEST(HashTest, HashNonUniquelyRepresentedType) { unsigned char buffer2[kNumStructs * sizeof(StructWithPadding)]; std::memset(buffer2, 255, sizeof(buffer2)); auto* s2 = reinterpret_cast(buffer2); - for (int i = 0; i < kNumStructs; ++i) { + for (size_t i = 0; i < kNumStructs; ++i) { SCOPED_TRACE(i); - s1[i].c = s2[i].c = '0' + i; - s1[i].i = s2[i].i = i; + s1[i].c = s2[i].c = static_cast('0' + i); + s1[i].i = s2[i].i = static_cast(i); ASSERT_FALSE(memcmp(buffer1 + i * sizeof(StructWithPadding), buffer2 + i * sizeof(StructWithPadding), sizeof(StructWithPadding)) == 0) @@ -928,29 +1198,23 @@ TEST(HashTest, SmallValueOn64ByteBoundary) { Hash()(IntAndString{0, std::string(63, '0')}); } -struct TypeErased { - size_t n; - - template - friend H AbslHashValue(H hash_state, const TypeErased& v) { - v.HashValue(absl::HashState::Create(&hash_state)); - return hash_state; - } - - void HashValue(absl::HashState state) const { - absl::HashState::combine(std::move(state), n); - } -}; - TEST(HashTest, TypeErased) { - EXPECT_TRUE((is_hashable::value)); - EXPECT_TRUE((is_hashable>::value)); + EXPECT_TRUE((is_hashable>::value)); + EXPECT_TRUE((is_hashable, int>>::value)); - EXPECT_EQ(SpyHash(TypeErased{7}), SpyHash(size_t{7})); - EXPECT_NE(SpyHash(TypeErased{7}), SpyHash(size_t{13})); + EXPECT_EQ(SpyHash(TypeErasedValue(7)), SpyHash(size_t{7})); + EXPECT_NE(SpyHash(TypeErasedValue(7)), SpyHash(size_t{13})); - EXPECT_EQ(SpyHash(std::make_pair(TypeErased{7}, 17)), + EXPECT_EQ(SpyHash(std::make_pair(TypeErasedValue(7), 17)), SpyHash(std::make_pair(size_t{7}, 17))); + + absl::flat_hash_set> ss = {{1, 2}, {3, 4}}; + TypeErasedContainer>> es = { + absl::flat_hash_set{1, 2}, {3, 4}}; + absl::flat_hash_set>> se = { + {1, 2}, {3, 4}}; + EXPECT_EQ(SpyHash(ss), SpyHash(es)); + EXPECT_EQ(SpyHash(ss), SpyHash(se)); } struct ValueWithBoolConversion { @@ -962,7 +1226,9 @@ struct ValueWithBoolConversion { namespace std { template <> struct hash { - size_t operator()(ValueWithBoolConversion v) { return v.i; } + size_t operator()(ValueWithBoolConversion v) { + return static_cast(v.i); + } }; } // namespace std diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/city.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/city.cc index 5460134e57..f0d3196470 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/city.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/city.cc @@ -97,7 +97,7 @@ static uint32_t Hash32Len13to24(const char *s, size_t len) { uint32_t d = Fetch32(s + (len >> 1)); uint32_t e = Fetch32(s); uint32_t f = Fetch32(s + len - 4); - uint32_t h = len; + uint32_t h = static_cast(len); return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h))))))); } @@ -106,15 +106,15 @@ static uint32_t Hash32Len0to4(const char *s, size_t len) { uint32_t b = 0; uint32_t c = 9; for (size_t i = 0; i < len; i++) { - signed char v = s[i]; - b = b * c1 + v; + signed char v = static_cast(s[i]); + b = b * c1 + static_cast(v); c ^= b; } - return fmix(Mur(b, Mur(len, c))); + return fmix(Mur(b, Mur(static_cast(len), c))); } static uint32_t Hash32Len5to12(const char *s, size_t len) { - uint32_t a = len, b = len * 5, c = 9, d = b; + uint32_t a = static_cast(len), b = a * 5, c = 9, d = b; a += Fetch32(s); b += Fetch32(s + len - 4); c += Fetch32(s + ((len >> 1) & 4)); @@ -129,7 +129,7 @@ uint32_t CityHash32(const char *s, size_t len) { } // len > 24 - uint32_t h = len, g = c1 * len, f = g; + uint32_t h = static_cast(len), g = c1 * h, f = g; uint32_t a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2; uint32_t a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2; @@ -230,11 +230,11 @@ static uint64_t HashLen0to16(const char *s, size_t len) { return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul); } if (len > 0) { - uint8_t a = s[0]; - uint8_t b = s[len >> 1]; - uint8_t c = s[len - 1]; + uint8_t a = static_cast(s[0]); + uint8_t b = static_cast(s[len >> 1]); + uint8_t c = static_cast(s[len - 1]); uint32_t y = static_cast(a) + (static_cast(b) << 8); - uint32_t z = len + (static_cast(c) << 2); + uint32_t z = static_cast(len) + (static_cast(c) << 2); return ShiftMix(y * k2 ^ z * k0) * k2; } return k2; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/city_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/city_test.cc index 251d381d73..1bbf02e0d1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/city_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/city_test.cc @@ -22,6 +22,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace hash_internal { +namespace { static const uint64_t k0 = 0xc3a5c85c97cb3127ULL; static const uint64_t kSeed0 = 1234567; @@ -590,6 +591,7 @@ TEST(CityHashTest, Unchanging) { TestUnchanging(testdata[i], 0, kDataSize); } +} // namespace } // namespace hash_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/hash.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/hash.cc index 4b818917c4..11451e575c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/hash.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/hash.cc @@ -21,9 +21,9 @@ namespace hash_internal { uint64_t MixingHashState::CombineLargeContiguousImpl32( uint64_t state, const unsigned char* first, size_t len) { while (len >= PiecewiseChunkSize()) { - state = - Mix(state, absl::hash_internal::CityHash32(reinterpret_cast(first), - PiecewiseChunkSize())); + state = Mix(state, + hash_internal::CityHash32(reinterpret_cast(first), + PiecewiseChunkSize())); len -= PiecewiseChunkSize(); first += PiecewiseChunkSize(); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/hash.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/hash.h index f5174096cb..dbdc20504f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/hash.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/hash.h @@ -21,7 +21,9 @@ #include #include +#include #include +#include #include #include #include @@ -35,6 +37,8 @@ #include #include #include +#include +#include #include #include @@ -42,6 +46,7 @@ #include "absl/base/internal/unaligned_access.h" #include "absl/base/port.h" #include "absl/container/fixed_array.h" +#include "absl/hash/internal/city.h" #include "absl/hash/internal/low_level_hash.h" #include "absl/meta/type_traits.h" #include "absl/numeric/int128.h" @@ -49,10 +54,12 @@ #include "absl/types/optional.h" #include "absl/types/variant.h" #include "absl/utility/utility.h" -#include "absl/hash/internal/city.h" namespace absl { ABSL_NAMESPACE_BEGIN + +class HashState; + namespace hash_internal { // Internal detail: Large buffers are hashed in smaller chunks. This function @@ -114,24 +121,66 @@ class PiecewiseCombiner { size_t position_; }; +// is_hashable() +// +// Trait class which returns true if T is hashable by the absl::Hash framework. +// Used for the AbslHashValue implementations for composite types below. +template +struct is_hashable; + // HashStateBase // -// A hash state object represents an intermediate state in the computation -// of an unspecified hash algorithm. `HashStateBase` provides a CRTP style -// base class for hash state implementations. Developers adding type support -// for `absl::Hash` should not rely on any parts of the state object other than -// the following member functions: +// An internal implementation detail that contains common implementation details +// for all of the "hash state objects" objects generated by Abseil. This is not +// a public API; users should not create classes that inherit from this. +// +// A hash state object is the template argument `H` passed to `AbslHashValue`. +// It represents an intermediate state in the computation of an unspecified hash +// algorithm. `HashStateBase` provides a CRTP style base class for hash state +// implementations. Developers adding type support for `absl::Hash` should not +// rely on any parts of the state object other than the following member +// functions: // // * HashStateBase::combine() // * HashStateBase::combine_contiguous() +// * HashStateBase::combine_unordered() // -// A derived hash state class of type `H` must provide a static member function +// A derived hash state class of type `H` must provide a public member function // with a signature similar to the following: // // `static H combine_contiguous(H state, const unsigned char*, size_t)`. // +// It must also provide a private template method named RunCombineUnordered. +// +// A "consumer" is a 1-arg functor returning void. Its argument is a reference +// to an inner hash state object, and it may be called multiple times. When +// called, the functor consumes the entropy from the provided state object, +// and resets that object to its empty state. +// +// A "combiner" is a stateless 2-arg functor returning void. Its arguments are +// an inner hash state object and an ElementStateConsumer functor. A combiner +// uses the provided inner hash state object to hash each element of the +// container, passing the inner hash state object to the consumer after hashing +// each element. +// +// Given these definitions, a derived hash state class of type H +// must provide a private template method with a signature similar to the +// following: +// +// `template ` +// `static H RunCombineUnordered(H outer_state, CombinerT combiner)` +// +// This function is responsible for constructing the inner state object and +// providing a consumer to the combiner. It uses side effects of the consumer +// and combiner to mix the state of each element in an order-independent manner, +// and uses this to return an updated value of `outer_state`. +// +// This inside-out approach generates efficient object code in the normal case, +// but allows us to use stack storage to implement the absl::HashState type +// erasure mechanism (avoiding heap allocations while hashing). +// // `HashStateBase` will provide a complete implementation for a hash state -// object in terms of this method. +// object in terms of these two methods. // // Example: // @@ -140,6 +189,10 @@ class PiecewiseCombiner { // static H combine_contiguous(H state, const unsigned char*, size_t); // using MyHashState::HashStateBase::combine; // using MyHashState::HashStateBase::combine_contiguous; +// using MyHashState::HashStateBase::combine_unordered; +// private: +// template +// static H RunCombineUnordered(H state, CombinerT combiner); // }; template class HashStateBase { @@ -180,7 +233,30 @@ class HashStateBase { template static H combine_contiguous(H state, const T* data, size_t size); + template + static H combine_unordered(H state, I begin, I end); + using AbslInternalPiecewiseCombiner = PiecewiseCombiner; + + template + using is_hashable = absl::hash_internal::is_hashable; + + private: + // Common implementation of the iteration step of a "combiner", as described + // above. + template + struct CombineUnorderedCallback { + I begin; + I end; + + template + void operator()(InnerH inner_state, ElementStateConsumer cb) { + for (; begin != end; ++begin) { + inner_state = H::combine(std::move(inner_state), *begin); + cb(inner_state); + } + } + }; }; // is_uniquely_represented @@ -345,17 +421,43 @@ H AbslHashValue(H hash_state, std::nullptr_t) { return H::combine(std::move(hash_state), static_cast(nullptr)); } +// AbslHashValue() for hashing pointers-to-member +template +H AbslHashValue(H hash_state, T C::* ptr) { + auto salient_ptm_size = [](std::size_t n) -> std::size_t { +#if defined(_MSC_VER) + // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2, + // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain + // padding (namely when they have 1 or 3 ints). The value below is a lower + // bound on the number of salient, non-padding bytes that we use for + // hashing. + if (alignof(T C::*) == alignof(int)) { + // No padding when all subobjects have the same size as the total + // alignment. This happens in 32-bit mode. + return n; + } else { + // Padding for 1 int (size 16) or 3 ints (size 24). + // With 2 ints, the size is 16 with no padding, which we pessimize. + return n == 24 ? 20 : n == 16 ? 12 : n; + } +#else + // On other platforms, we assume that pointers-to-members do not have + // padding. +#ifdef __cpp_lib_has_unique_object_representations + static_assert(std::has_unique_object_representations::value); +#endif // __cpp_lib_has_unique_object_representations + return n; +#endif + }; + return H::combine_contiguous(std::move(hash_state), + reinterpret_cast(&ptr), + salient_ptm_size(sizeof ptr)); +} + // ----------------------------------------------------------------------------- // AbslHashValue for Composite Types // ----------------------------------------------------------------------------- -// is_hashable() -// -// Trait class which returns true if T is hashable by the absl::Hash framework. -// Used for the AbslHashValue implementations for composite types below. -template -struct is_hashable; - // AbslHashValue() for hashing pairs template typename std::enable_if::value && is_hashable::value, @@ -489,8 +591,9 @@ typename std::enable_if::value, H>::type AbslHashValue( // AbslHashValue for hashing std::vector // -// Do not use this for vector. It does not have a .data(), and a fallback -// for std::hash<> is most likely faster. +// Do not use this for vector on platforms that have a working +// implementation of std::hash. It does not have a .data(), and a fallback for +// std::hash<> is most likely faster. template typename std::enable_if::value && !std::is_same::value, H>::type @@ -500,6 +603,44 @@ AbslHashValue(H hash_state, const std::vector& vector) { vector.size()); } +// AbslHashValue special cases for hashing std::vector + +#if defined(ABSL_IS_BIG_ENDIAN) && \ + (defined(__GLIBCXX__) || defined(__GLIBCPP__)) + +// std::hash in libstdc++ does not work correctly with vector on Big +// Endian platforms therefore we need to implement a custom AbslHashValue for +// it. More details on the bug: +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531 +template +typename std::enable_if::value && std::is_same::value, + H>::type +AbslHashValue(H hash_state, const std::vector& vector) { + typename H::AbslInternalPiecewiseCombiner combiner; + for (const auto& i : vector) { + unsigned char c = static_cast(i); + hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c)); + } + return H::combine(combiner.finalize(std::move(hash_state)), vector.size()); +} +#else +// When not working around the libstdc++ bug above, we still have to contend +// with the fact that std::hash> is often poor quality, hashing +// directly on the internal words and on no other state. On these platforms, +// vector{1, 1} and vector{1, 1, 0} hash to the same value. +// +// Mixing in the size (as we do in our other vector<> implementations) on top +// of the library-provided hash implementation avoids this QOI issue. +template +typename std::enable_if::value && std::is_same::value, + H>::type +AbslHashValue(H hash_state, const std::vector& vector) { + return H::combine(std::move(hash_state), + std::hash>{}(vector), + vector.size()); +} +#endif + // ----------------------------------------------------------------------------- // AbslHashValue for Ordered Associative Containers // ----------------------------------------------------------------------------- @@ -549,6 +690,55 @@ typename std::enable_if::value, H>::type AbslHashValue( return H::combine(std::move(hash_state), set.size()); } +// ----------------------------------------------------------------------------- +// AbslHashValue for Unordered Associative Containers +// ----------------------------------------------------------------------------- + +// AbslHashValue for hashing std::unordered_set +template +typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const std::unordered_set& s) { + return H::combine( + H::combine_unordered(std::move(hash_state), s.begin(), s.end()), + s.size()); +} + +// AbslHashValue for hashing std::unordered_multiset +template +typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, + const std::unordered_multiset& s) { + return H::combine( + H::combine_unordered(std::move(hash_state), s.begin(), s.end()), + s.size()); +} + +// AbslHashValue for hashing std::unordered_set +template +typename std::enable_if::value && is_hashable::value, + H>::type +AbslHashValue(H hash_state, + const std::unordered_map& s) { + return H::combine( + H::combine_unordered(std::move(hash_state), s.begin(), s.end()), + s.size()); +} + +// AbslHashValue for hashing std::unordered_multiset +template +typename std::enable_if::value && is_hashable::value, + H>::type +AbslHashValue(H hash_state, + const std::unordered_multimap& s) { + return H::combine( + H::combine_unordered(std::move(hash_state), s.begin(), s.end()), + s.size()); +} + // ----------------------------------------------------------------------------- // AbslHashValue for Wrapper Types // ----------------------------------------------------------------------------- @@ -592,9 +782,28 @@ AbslHashValue(H hash_state, const absl::variant& v) { // AbslHashValue for Other Types // ----------------------------------------------------------------------------- -// AbslHashValue for hashing std::bitset is not defined, for the same reason as -// for vector (see std::vector above): It does not expose the raw bytes, -// and a fallback to std::hash<> is most likely faster. +// AbslHashValue for hashing std::bitset is not defined on Little Endian +// platforms, for the same reason as for vector (see std::vector above): +// It does not expose the raw bytes, and a fallback to std::hash<> is most +// likely faster. + +#if defined(ABSL_IS_BIG_ENDIAN) && \ + (defined(__GLIBCXX__) || defined(__GLIBCPP__)) +// AbslHashValue for hashing std::bitset +// +// std::hash in libstdc++ does not work correctly with std::bitset on Big Endian +// platforms therefore we need to implement a custom AbslHashValue for it. More +// details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531 +template +H AbslHashValue(H hash_state, const std::bitset& set) { + typename H::AbslInternalPiecewiseCombiner combiner; + for (int i = 0; i < N; i++) { + unsigned char c = static_cast(set[i]); + hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c)); + } + return H::combine(combiner.finalize(std::move(hash_state)), N); +} +#endif // ----------------------------------------------------------------------------- @@ -773,6 +982,31 @@ class ABSL_DLL MixingHashState : public HashStateBase { // move-only ensures that there is only one non-moved-from object. MixingHashState() : state_(Seed()) {} + friend class MixingHashState::HashStateBase; + + template + static MixingHashState RunCombineUnordered(MixingHashState state, + CombinerT combiner) { + uint64_t unordered_state = 0; + combiner(MixingHashState{}, [&](MixingHashState& inner_state) { + // Add the hash state of the element to the running total, but mix the + // carry bit back into the low bit. This in intended to avoid losing + // entropy to overflow, especially when unordered_multisets contain + // multiple copies of the same value. + auto element_state = inner_state.state_; + unordered_state += element_state; + if (unordered_state < element_state) { + ++unordered_state; + } + inner_state = MixingHashState{}; + }); + return MixingHashState::combine(std::move(state), unordered_state); + } + + // Allow the HashState type-erasure implementation to invoke + // RunCombinedUnordered() directly. + friend class absl::HashState; + // Workaround for MSVC bug. // We make the type copyable to fix the calling convention, even though we // never actually copy it. Keep it private to not affect the public API of the @@ -856,15 +1090,10 @@ class ABSL_DLL MixingHashState : public HashStateBase { } ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) { -#if defined(__aarch64__) - // On AArch64, calculating a 128-bit product is inefficient, because it - // requires a sequence of two instructions to calculate the upper and lower - // halves of the result. - using MultType = uint64_t; -#else + // Though the 128-bit product on AArch64 needs two instructions, it is + // still a good balance between speed and hash quality. using MultType = absl::conditional_t; -#endif // We do the addition in 64-bit space to make sure the 128-bit // multiplication is fast. If we were to do it as MultType the compiler has // to assume that the high word is non-zero and needs to perform 2 @@ -883,7 +1112,7 @@ class ABSL_DLL MixingHashState : public HashStateBase { #ifdef ABSL_HAVE_INTRINSIC_INT128 return LowLevelHashImpl(data, len); #else - return absl::hash_internal::CityHash64(reinterpret_cast(data), len); + return hash_internal::CityHash64(reinterpret_cast(data), len); #endif } @@ -929,7 +1158,7 @@ inline uint64_t MixingHashState::CombineContiguousImpl( if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) { return CombineLargeContiguousImpl32(state, first, len); } - v = absl::hash_internal::CityHash32(reinterpret_cast(first), len); + v = hash_internal::CityHash32(reinterpret_cast(first), len); } else if (len >= 4) { v = Read4To8(first, len); } else if (len > 0) { @@ -1007,6 +1236,14 @@ H HashStateBase::combine_contiguous(H state, const T* data, size_t size) { return hash_internal::hash_range_or_bytes(std::move(state), data, size); } +// HashStateBase::combine_unordered() +template +template +H HashStateBase::combine_unordered(H state, I begin, I end) { + return H::RunCombineUnordered(std::move(state), + CombineUnorderedCallback{begin, end}); +} + // HashStateBase::PiecewiseCombiner::add_buffer() template H PiecewiseCombiner::add_buffer(H state, const unsigned char* data, diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc index 6f9cb9c7bf..e05e7885ad 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc @@ -40,7 +40,7 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) { } uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, - const uint64_t salt[]) { + const uint64_t salt[5]) { const uint8_t* ptr = static_cast(data); uint64_t starting_length = static_cast(len); uint64_t current_state = seed ^ salt[0]; @@ -106,7 +106,8 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, } else if (len > 0) { // If we have at least 1 and at most 3 bytes, read all of the provided // bits into A, with some adjustments. - a = ((ptr[0] << 16) | (ptr[len >> 1] << 8) | ptr[len - 1]); + a = static_cast((ptr[0] << 16) | (ptr[len >> 1] << 8) | + ptr[len - 1]); b = 0; } else { a = 0; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/low_level_hash_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/low_level_hash_test.cc index cf22dc3c96..ae930b349b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/low_level_hash_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/low_level_hash_test.cc @@ -404,7 +404,55 @@ TEST(LowLevelHashTest, VerifyGolden) { uint64_t{0xc9ae5c8759b4877a}}, }; -#if defined(__aarch64__) +#if defined(ABSL_IS_BIG_ENDIAN) + constexpr uint64_t kGolden[kNumGoldenOutputs] = { + 0xe5a40d39ab796423, 0x1766974bf7527d81, 0x5c3bbbe230db17a8, + 0xa6630143a7e6aa6f, 0x17645cb7318b86b, 0x218b175f30ba61f8, + 0xa6564b468248c683, 0xef192f401b116e1c, 0xbe8dc0c54617639d, + 0xe7b01610fc22dbb8, 0x99d9f694404af913, 0xf4eecd37464b45c5, + 0x7d2c653d63596d9b, 0x3f15c8544ec5393a, 0x6b9dc0c1704f796c, + 0xf1ded7a7eae5ed5a, 0x2db2fd7c6dd4641b, 0x151ca2d3d4cd33ab, + 0xa5af5994ac2ccd64, 0x2b2a4ca3191d2fce, 0xf89e68c9364e7c05, + 0x71724c70b799c21, 0x70536fabfd157369, 0xdee92794c3c3082b, + 0xac033a6743d3b3eb, 0xed2956b506cd5151, 0xbd669644755264b6, + 0x6ab1ff5d5f549a63, 0xf6bd551a2e3e04e, 0x7b5a8cef6875ea73, + 0x22bccf4d4db0a91c, 0x4f2bc07754c7c7eb, 0xfb6b8342a86725db, + 0x13a1a0d4c5854da, 0x5f6e44655f7dedac, 0x54a9198dff2bdf85, + 0xdb17e6915d4e4042, 0xa69926cf5c3b89f, 0xf77f031bfd74c096, + 0x1d6f916fdd50ec3c, 0x334ac76013ade393, 0x99370f899111de15, + 0x352457a03ada6de, 0x341974d4f42d854d, 0xda89ab02872aeb5, + 0x6ec2b74e143b10d9, 0x6f284c0b5cd60522, 0xf9670de353438f88, + 0xde920913adf0a2b4, 0xb7a07d7c0c17a8ec, 0x879a69f558ba3a98, + 0x360cf6d802df20f9, 0x53530f8046673738, 0xbd8f5f2bcf35e483, + 0x3f171f047144b983, 0x644d04e820823465, 0x50e44773a20b2702, + 0xe584ed4c05c745dd, 0x9a825c85b95ab6c0, 0xbce2931deb74e775, + 0x10468e9e705c7cfe, 0x12e01de3104141e2, 0x5c11ae2ee3713abd, + 0x6ac5ffb0860319e6, 0xc1e6da1849d30fc9, 0xa0e4d247a458b447, + 0x4530d4615c32b89b, 0x116aa09107a76505, 0xf941339d00d9bb73, + 0x573a0fc1615afb33, 0xa975c81dc868b258, 0x3ab2c5250ab54bda, + 0x37f99f208a3e3b11, 0x4b49b0ff706689d, 0x30bafa0b8f0a87fe, + 0xea6787a65cc20cdd, 0x55861729f1fc3ab8, 0xea38e009c5be9b72, + 0xcb8522cba33c3c66, 0x352e77653fe306f3, 0xe0bb760793bac064, + 0xf66ec59322662956, 0x637aa320455d56f8, 0x46ee546be5824a89, + 0x9e6842421e83d8a4, 0xf98ac2bc96b9fb8c, 0xf2c1002fd9a70b99, + 0x4c2b62b1e39e9405, 0x3248555fa3ade9c4, 0xd4d04c37f6417c21, + 0xf40cd506b1bf5653, 0x6c45d6005c760d2f, 0x61d88a7e61ff0d7e, + 0x131591e8a53cc967, 0xdae85cb9bc29bab6, 0xe98835334905e626, + 0x7cce50a2b66b8754, 0x5b0b3d0c5ac498ae, 0xd35a218c974d1756, + 0xfce436ddc1d003c, 0xd183901de90bb741, 0x9378f8f34974a66, + 0x21f11ae0a0402368, 0xf2fbd7c94ef89cb6, 0xc329c69d0f0d080b, + 0xf2841cba16216a61, 0x47aba97b44916df1, 0x724d4e00a8019fcf, + 0x2df9005c2a728d63, 0xc788892a1a5d7515, 0x9e993a65f9df0480, + 0x76876721ff49f969, 0xbe7a796cfba15bf5, 0xa4c8bd54586f5488, + 0xb390a325275501ab, 0x893f11317427ccf1, 0x92f2bb57da5695b9, + 0x30985b90da88269f, 0x2c690e268e086de8, 0x1c02df6097997196, + 0x1f9778f8bbdf6455, 0x7d57378c7bf8416d, 0xba8582a5f8d84d38, + 0xe8ca43b85050be4e, 0x5048cf6bed8a5d9f, 0xfbc5ba80917d0ea4, + 0x8011026525bf1691, 0x26b8dc6aed9fb50d, 0x191f5bfee77c1fe3, + 0xdd497891465a2cc1, 0x6f1fe8c57a33072e, 0x2c9f4ec078c460c0, + 0x9a725bde8f6a1437, 0x6ce545fa3ef61e4d, + }; +#elif defined(__aarch64__) constexpr uint64_t kGolden[kNumGoldenOutputs] = { 0x45c0aadee165dcbe, 0x25ed8587f6f20d06, 0x5f23ae668ce7926d, 0xfef74d1da0846719, 0x54478408e68cb7d4, 0xee27ddaf88c6fe68, diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/spy_hash_state.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/spy_hash_state.h index c083120811..0972826621 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/spy_hash_state.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/hash/internal/spy_hash_state.h @@ -15,6 +15,7 @@ #ifndef ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_ #define ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_ +#include #include #include #include @@ -167,6 +168,24 @@ class SpyHashStateImpl : public HashStateBase> { using SpyHashStateImpl::HashStateBase::combine_contiguous; + template + static SpyHashStateImpl RunCombineUnordered(SpyHashStateImpl state, + CombinerT combiner) { + UnorderedCombinerCallback cb; + + combiner(SpyHashStateImpl{}, std::ref(cb)); + + std::sort(cb.element_hash_representations.begin(), + cb.element_hash_representations.end()); + state.hash_representation_.insert(state.hash_representation_.end(), + cb.element_hash_representations.begin(), + cb.element_hash_representations.end()); + if (cb.error && cb.error->has_value()) { + state.error_ = std::move(cb.error); + } + return state; + } + absl::optional error() const { if (moved_from_) { return "Returned a moved-from instance of the hash state object."; @@ -178,6 +197,22 @@ class SpyHashStateImpl : public HashStateBase> { template friend class SpyHashStateImpl; + struct UnorderedCombinerCallback { + std::vector element_hash_representations; + std::shared_ptr> error; + + // The inner spy can have a different type. + template + void operator()(SpyHashStateImpl& inner) { + element_hash_representations.push_back( + absl::StrJoin(inner.hash_representation_, "")); + if (inner.error_->has_value()) { + error = std::move(inner.error_); + } + inner = SpyHashStateImpl{}; + } + }; + // This is true if SpyHashStateImpl has been passed to a call of // AbslHashValue with the wrong type. This detects that the user called // AbslHashValue directly (because the hash state type does not match). diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/CMakeLists.txt new file mode 100644 index 0000000000..2337d0c0b4 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/CMakeLists.txt @@ -0,0 +1,927 @@ +# +# Copyright 2022 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Internal targets +absl_cc_library( + NAME + log_internal_check_op + SRCS + "internal/check_op.cc" + HDRS + "internal/check_op.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_nullguard + absl::log_internal_nullstream + absl::log_internal_strip + absl::strings +) + +absl_cc_library( + NAME + log_internal_conditions + SRCS + "internal/conditions.cc" + HDRS + "internal/conditions.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::log_internal_voidify +) + +absl_cc_library( + NAME + log_internal_config + SRCS + HDRS + "internal/config.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + log_internal_flags + SRCS + HDRS + "internal/flags.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::flags +) + +absl_cc_library( + NAME + log_internal_format + SRCS + "internal/log_format.cc" + HDRS + "internal/log_format.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_append_truncated + absl::log_internal_config + absl::log_internal_globals + absl::log_severity + absl::strings + absl::str_format + absl::time + absl::span +) + +absl_cc_library( + NAME + log_internal_globals + SRCS + "internal/globals.cc" + HDRS + "internal/globals.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_severity + absl::raw_logging_internal + absl::strings + absl::time +) + +absl_cc_library( + NAME + log_internal_proto + SRCS + "internal/proto.cc" + HDRS + "internal/proto.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::strings + absl::span +) + +absl_cc_library( + NAME + log_internal_message + SRCS + "internal/log_message.cc" + HDRS + "internal/log_message.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::errno_saver + absl::inlined_vector + absl::examine_stack + absl::log_internal_append_truncated + absl::log_internal_format + absl::log_internal_globals + absl::log_internal_proto + absl::log_internal_log_sink_set + absl::log_internal_nullguard + absl::log_globals + absl::log_entry + absl::log_severity + absl::log_sink + absl::log_sink_registry + absl::memory + absl::raw_logging_internal + absl::strings + absl::strerror + absl::time + absl::span +) + +absl_cc_library( + NAME + log_internal_log_sink_set + SRCS + "internal/log_sink_set.cc" + HDRS + "internal/log_sink_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + $<$:-llog> + DEPS + absl::base + absl::cleanup + absl::config + absl::core_headers + absl::log_internal_config + absl::log_internal_globals + absl::log_globals + absl::log_entry + absl::log_severity + absl::log_sink + absl::raw_logging_internal + absl::synchronization + absl::span + absl::strings +) + +absl_cc_library( + NAME + log_internal_nullguard + SRCS + HDRS + "internal/nullguard.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + log_internal_nullstream + SRCS + HDRS + "internal/nullstream.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_severity + absl::strings +) + +absl_cc_library( + NAME + log_internal_strip + SRCS + HDRS + "internal/strip.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_message + absl::log_internal_nullstream + absl::log_severity +) + +absl_cc_library( + NAME + log_internal_test_actions + SRCS + "internal/test_actions.cc" + HDRS + "internal/test_actions.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_entry + absl::log_severity + absl::strings + absl::time + TESTONLY +) + +absl_cc_library( + NAME + log_internal_test_helpers + SRCS + "internal/test_helpers.cc" + HDRS + "internal/test_helpers.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_globals + absl::log_initialize + absl::log_internal_globals + absl::log_severity + GTest::gtest + TESTONLY +) + +absl_cc_library( + NAME + log_internal_test_matchers + SRCS + "internal/test_matchers.cc" + HDRS + "internal/test_matchers.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_entry + absl::log_internal_test_helpers + absl::log_severity + absl::strings + absl::time + GTest::gtest + GTest::gmock + TESTONLY +) + +absl_cc_library( + NAME + log_internal_voidify + SRCS + HDRS + "internal/voidify.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + log_internal_append_truncated + SRCS + HDRS + "internal/append_truncated.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::strings + absl::span +) + +# Public targets +absl_cc_library( + NAME + check + SRCS + HDRS + "check.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log_internal_check_op + absl::log_internal_conditions + absl::log_internal_message + absl::log_internal_strip + PUBLIC +) + +absl_cc_library( + NAME + die_if_null + SRCS + "die_if_null.cc" + HDRS + "die_if_null.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log + absl::strings + PUBLIC +) + +absl_cc_library( + NAME + log_flags + SRCS + "flags.cc" + HDRS + "flags.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_globals + absl::log_severity + absl::log_internal_config + absl::log_internal_flags + absl::flags + absl::flags_marshalling + absl::strings + PUBLIC +) + +absl_cc_library( + NAME + log_globals + SRCS + "globals.cc" + HDRS + "globals.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::atomic_hook + absl::config + absl::core_headers + absl::hash + absl::log_severity + absl::strings +) + +absl_cc_library( + NAME + log_initialize + SRCS + "initialize.cc" + HDRS + "initialize.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_globals + absl::log_internal_globals + absl::time + PUBLIC +) + +absl_cc_library( + NAME + log + SRCS + HDRS + "log.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_conditions + absl::log_internal_message + absl::log_internal_strip + PUBLIC +) + +absl_cc_library( + NAME + log_entry + SRCS + "log_entry.cc" + HDRS + "log_entry.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_config + absl::log_severity + absl::span + absl::strings + absl::time + PUBLIC +) + +absl_cc_library( + NAME + log_sink + SRCS + "log_sink.cc" + HDRS + "log_sink.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_entry + PUBLIC +) + +absl_cc_library( + NAME + log_sink_registry + SRCS + HDRS + "log_sink_registry.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_sink + absl::log_internal_log_sink_set + PUBLIC +) + +absl_cc_library( + NAME + log_streamer + SRCS + HDRS + "log_streamer.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log + absl::log_severity + absl::optional + absl::strings + absl::strings_internal + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + scoped_mock_log + SRCS + "scoped_mock_log.cc" + HDRS + "scoped_mock_log.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_entry + absl::log_severity + absl::log_sink + absl::log_sink_registry + absl::raw_logging_internal + absl::strings + GTest::gmock + GTest::gtest + PUBLIC + TESTONLY +) + +absl_cc_library( + NAME + log_internal_structured + HDRS + "internal/structured.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_internal_message + absl::strings +) + +absl_cc_library( + NAME + log_structured + HDRS + "structured.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_internal_structured + absl::strings + PUBLIC +) + +# Test targets +absl_cc_test( + NAME + basic_log_test + SRCS + "basic_log_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::log + absl::log_entry + absl::log_globals + absl::log_severity + absl::log_internal_test_actions + absl::log_internal_test_helpers + absl::log_internal_test_matchers + absl::scoped_mock_log + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + check_test + SRCS + "check_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::check + absl::config + absl::core_headers + absl::log_internal_test_helpers + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + die_if_null_test + SRCS + "die_if_null_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::die_if_null + absl::log_internal_test_helpers + GTest::gtest_main +) + +absl_cc_test( + NAME + log_flags_test + SRCS + "flags_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log + absl::log_flags + absl::log_globals + absl::log_internal_flags + absl::log_internal_test_helpers + absl::log_internal_test_matchers + absl::log_severity + absl::flags + absl::flags_reflection + absl::scoped_mock_log + absl::strings + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + log_entry_test + SRCS + "log_entry_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_entry + absl::log_internal_append_truncated + absl::log_internal_format + absl::log_internal_globals + absl::log_internal_test_helpers + absl::log_severity + absl::span + absl::strings + absl::time + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + log_globals_test + SRCS + "globals_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log + absl::log_globals + absl::log_internal_globals + absl::log_internal_test_helpers + absl::log_severity + absl::scoped_mock_log + GTest::gtest_main +) + +absl_cc_test( + NAME + log_format_test + SRCS + "log_format_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log + absl::log_internal_test_matchers + absl::scoped_mock_log + absl::str_format + absl::strings + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + log_macro_hygiene_test + SRCS + "log_macro_hygiene_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log + absl::log_severity + absl::scoped_mock_log + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + log_sink_test + SRCS + "log_sink_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log + absl::log_internal_test_actions + absl::log_internal_test_helpers + absl::log_internal_test_matchers + absl::log_sink + absl::log_sink_registry + absl::log_severity + absl::raw_logging_internal + absl::scoped_mock_log + absl::strings + GTest::gtest_main +) + +absl_cc_test( + NAME + log_streamer_test + SRCS + "log_streamer_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::core_headers + absl::log + absl::log_internal_test_actions + absl::log_internal_test_helpers + absl::log_internal_test_matchers + absl::log_streamer + absl::log_severity + absl::scoped_mock_log + absl::strings + GTest::gtest_main +) + +absl_cc_test( + NAME + log_modifier_methods_test + SRCS + "log_modifier_methods_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log + absl::log_internal_test_actions + absl::log_internal_test_helpers + absl::log_internal_test_matchers + absl::log_sink + absl::scoped_mock_log + absl::strings + absl::time + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + scoped_mock_log_test + SRCS + "scoped_mock_log_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log + absl::log_globals + absl::log_internal_globals + absl::log_internal_test_helpers + absl::log_internal_test_matchers + absl::log_severity + absl::memory + absl::scoped_mock_log + absl::strings + absl::synchronization + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + log_internal_stderr_log_sink_test + SRCS + "internal/stderr_log_sink_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log + absl::log_globals + absl::log_internal_test_helpers + absl::log_severity + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + log_stripping_test + SRCS + "stripping_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::check + absl::flags_program_name + absl::log + absl::log_internal_test_helpers + absl::strerror + absl::strings + absl::str_format + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + log_structured_test + SRCS + "structured_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log + absl::log_internal_test_helpers + absl::log_internal_test_matchers + absl::log_structured + absl::scoped_mock_log + GTest::gmock + GTest::gtest_main +) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/basic_log_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/basic_log_test.cc new file mode 100644 index 0000000000..bc40f0d05b --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/basic_log_test.cc @@ -0,0 +1,440 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The testcases in this file are expected to pass or be skipped with any value +// of ABSL_MIN_LOG_LEVEL + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "absl/log/internal/test_actions.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/internal/test_matchers.h" +#include "absl/log/log.h" +#include "absl/log/log_entry.h" +#include "absl/log/scoped_mock_log.h" + +namespace { +#if GTEST_HAS_DEATH_TEST +using ::absl::log_internal::DeathTestExpectedLogging; +using ::absl::log_internal::DeathTestUnexpectedLogging; +using ::absl::log_internal::DeathTestValidateExpectations; +using ::absl::log_internal::DiedOfFatal; +using ::absl::log_internal::DiedOfQFatal; +#endif +using ::absl::log_internal::LoggingEnabledAt; +using ::absl::log_internal::LogSeverity; +using ::absl::log_internal::Prefix; +using ::absl::log_internal::SourceBasename; +using ::absl::log_internal::SourceFilename; +using ::absl::log_internal::SourceLine; +using ::absl::log_internal::Stacktrace; +using ::absl::log_internal::TextMessage; +using ::absl::log_internal::ThreadID; +using ::absl::log_internal::TimestampInMatchWindow; +using ::absl::log_internal::Verbosity; +using ::testing::AnyNumber; +using ::testing::Eq; +using ::testing::IsEmpty; +using ::testing::IsTrue; + +class BasicLogTest : public testing::TestWithParam {}; + +std::string ThresholdName( + testing::TestParamInfo severity) { + std::stringstream ostr; + ostr << severity.param; + return ostr.str().substr( + severity.param == absl::LogSeverityAtLeast::kInfinity ? 0 : 2); +} + +INSTANTIATE_TEST_SUITE_P(WithParam, BasicLogTest, + testing::Values(absl::LogSeverityAtLeast::kInfo, + absl::LogSeverityAtLeast::kWarning, + absl::LogSeverityAtLeast::kError, + absl::LogSeverityAtLeast::kFatal, + absl::LogSeverityAtLeast::kInfinity), + ThresholdName); + +TEST_P(BasicLogTest, Info) { + absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(INFO) << "hello world"; }; + + if (LoggingEnabledAt(absl::LogSeverity::kInfo)) { + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(absl::LogSeverity::kInfo)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + literal: "hello world" + })pb")), + Stacktrace(IsEmpty())))); + } + + test_sink.StartCapturingLogs(); + do_log(); +} + +TEST_P(BasicLogTest, Warning) { + absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(WARNING) << "hello world"; }; + + if (LoggingEnabledAt(absl::LogSeverity::kWarning)) { + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(absl::LogSeverity::kWarning)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + literal: "hello world" + })pb")), + Stacktrace(IsEmpty())))); + } + + test_sink.StartCapturingLogs(); + do_log(); +} + +TEST_P(BasicLogTest, Error) { + absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(ERROR) << "hello world"; }; + + if (LoggingEnabledAt(absl::LogSeverity::kError)) { + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(absl::LogSeverity::kError)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + literal: "hello world" + })pb")), + Stacktrace(IsEmpty())))); + } + + test_sink.StartCapturingLogs(); + do_log(); +} + +#if GTEST_HAS_DEATH_TEST +using BasicLogDeathTest = BasicLogTest; + +INSTANTIATE_TEST_SUITE_P(WithParam, BasicLogDeathTest, + testing::Values(absl::LogSeverityAtLeast::kInfo, + absl::LogSeverityAtLeast::kFatal, + absl::LogSeverityAtLeast::kInfinity), + ThresholdName); + +TEST_P(BasicLogDeathTest, Fatal) { + absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(FATAL) << "hello world"; }; + + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink( + absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + + ::testing::InSequence s; + + // Note the logic in DeathTestValidateExpectations() caters for the case + // of logging being disabled at FATAL level. + + if (LoggingEnabledAt(absl::LogSeverity::kFatal)) { + // The first call without the stack trace. + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(absl::LogSeverity::kFatal)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { literal: "hello world" })pb")), + Stacktrace(IsEmpty())))) + .WillOnce(DeathTestExpectedLogging()); + + // The second call with the stack trace. + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(absl::LogSeverity::kFatal)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { literal: "hello world" })pb")), + Stacktrace(Not(IsEmpty()))))) + .WillOnce(DeathTestExpectedLogging()); + } + + test_sink.StartCapturingLogs(); + do_log(); + }, + DiedOfFatal, DeathTestValidateExpectations()); +} + +TEST_P(BasicLogDeathTest, QFatal) { + absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(QFATAL) << "hello world"; }; + + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink( + absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + + if (LoggingEnabledAt(absl::LogSeverity::kFatal)) { + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(absl::LogSeverity::kFatal)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { literal: "hello world" })pb")), + Stacktrace(IsEmpty())))) + .WillOnce(DeathTestExpectedLogging()); + } + + test_sink.StartCapturingLogs(); + do_log(); + }, + DiedOfQFatal, DeathTestValidateExpectations()); +} +#endif + +TEST_P(BasicLogTest, Level) { + absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + for (auto severity : {absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, + absl::LogSeverity::kError}) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int log_line = __LINE__ + 1; + auto do_log = [severity] { LOG(LEVEL(severity)) << "hello world"; }; + + if (LoggingEnabledAt(severity)) { + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(severity)), TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + literal: "hello world" + })pb")), + Stacktrace(IsEmpty())))); + } + test_sink.StartCapturingLogs(); + do_log(); + } +} + +#if GTEST_HAS_DEATH_TEST +TEST_P(BasicLogDeathTest, Level) { + // TODO(b/242568884): re-enable once bug is fixed. + // absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + // Ensure that `severity` is not a compile-time constant to prove that + // `LOG(LEVEL(severity))` works regardless: + auto volatile severity = absl::LogSeverity::kFatal; + + const int log_line = __LINE__ + 1; + auto do_log = [severity] { LOG(LEVEL(severity)) << "hello world"; }; + + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink( + absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + + ::testing::InSequence s; + + if (LoggingEnabledAt(absl::LogSeverity::kFatal)) { + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(absl::LogSeverity::kFatal)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { literal: "hello world" })pb")), + Stacktrace(IsEmpty())))) + .WillOnce(DeathTestExpectedLogging()); + + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq(__FILE__)), + SourceBasename(Eq("basic_log_test.cc")), + SourceLine(Eq(log_line)), Prefix(IsTrue()), + LogSeverity(Eq(absl::LogSeverity::kFatal)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("hello world")), + Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { literal: "hello world" })pb")), + Stacktrace(Not(IsEmpty()))))) + .WillOnce(DeathTestExpectedLogging()); + } + + test_sink.StartCapturingLogs(); + do_log(); + }, + DiedOfFatal, DeathTestValidateExpectations()); +} +#endif + +TEST_P(BasicLogTest, LevelClampsNegativeValues) { + absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + if (!LoggingEnabledAt(absl::LogSeverity::kInfo)) { + GTEST_SKIP() << "This test cases required INFO log to be enabled"; + return; + } + + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(LogSeverity(Eq(absl::LogSeverity::kInfo)))); + + test_sink.StartCapturingLogs(); + LOG(LEVEL(-1)) << "hello world"; +} + +TEST_P(BasicLogTest, LevelClampsLargeValues) { + absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); + + if (!LoggingEnabledAt(absl::LogSeverity::kError)) { + GTEST_SKIP() << "This test cases required ERROR log to be enabled"; + return; + } + + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(LogSeverity(Eq(absl::LogSeverity::kError)))); + + test_sink.StartCapturingLogs(); + LOG(LEVEL(static_cast(absl::LogSeverity::kFatal) + 1)) << "hello world"; +} + +TEST(ErrnoPreservationTest, InSeverityExpression) { + errno = 77; + int saved_errno; + LOG(LEVEL((saved_errno = errno, absl::LogSeverity::kInfo))); + EXPECT_THAT(saved_errno, Eq(77)); +} + +TEST(ErrnoPreservationTest, InStreamedExpression) { + if (!LoggingEnabledAt(absl::LogSeverity::kInfo)) { + GTEST_SKIP() << "This test cases required INFO log to be enabled"; + return; + } + + errno = 77; + int saved_errno = 0; + LOG(INFO) << (saved_errno = errno, "hello world"); + EXPECT_THAT(saved_errno, Eq(77)); +} + +TEST(ErrnoPreservationTest, AfterStatement) { + errno = 77; + LOG(INFO); + const int saved_errno = errno; + EXPECT_THAT(saved_errno, Eq(77)); +} + +// Tests that using a variable/parameter in a logging statement suppresses +// unused-variable/parameter warnings. +// ----------------------------------------------------------------------- +class UnusedVariableWarningCompileTest { + // These four don't prove anything unless `ABSL_MIN_LOG_LEVEL` is greater than + // `kInfo`. + static void LoggedVariable() { + const int x = 0; + LOG(INFO) << x; + } + static void LoggedParameter(const int x) { LOG(INFO) << x; } + static void SeverityVariable() { + const int x = 0; + LOG(LEVEL(x)) << "hello world"; + } + static void SeverityParameter(const int x) { LOG(LEVEL(x)) << "hello world"; } +}; + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/check.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/check.h new file mode 100644 index 0000000000..c7303b8def --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/check.h @@ -0,0 +1,227 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/check.h +// ----------------------------------------------------------------------------- +// +// This header declares a family of `CHECK` macros. +// +// `CHECK` macros terminate the program with a fatal error if the specified +// condition is not true. +// +// Except for those whose names begin with `DCHECK`, these macros are not +// controlled by `NDEBUG` (cf. `assert`), so the check will be executed +// regardless of compilation mode. `CHECK` and friends are thus useful for +// confirming invariants in situations where continuing to run would be worse +// than terminating, e.g., due to risk of data corruption or security +// compromise. It is also more robust and portable to deliberately terminate +// at a particular place with a useful message and backtrace than to assume some +// ultimately unspecified and unreliable crashing behavior (such as a +// "segmentation fault"). + +#ifndef ABSL_LOG_CHECK_H_ +#define ABSL_LOG_CHECK_H_ + +#include "absl/base/optimization.h" +#include "absl/log/internal/check_op.h" // IWYU pragma: export +#include "absl/log/internal/conditions.h" // IWYU pragma: export +#include "absl/log/internal/log_message.h" // IWYU pragma: export +#include "absl/log/internal/strip.h" // IWYU pragma: export + +// CHECK() +// +// `CHECK` terminates the program with a fatal error if `condition` is not true. +// +// The message may include additional information such as stack traces, when +// available. +// +// Example: +// +// CHECK(!cheese.empty()) << "Out of Cheese"; +// +// Might produce a message like: +// +// Check failed: !cheese.empty() Out of Cheese +#define CHECK(condition) \ + ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, \ + ABSL_PREDICT_FALSE(!(condition))) \ + ABSL_LOG_INTERNAL_CHECK(#condition).InternalStream() + +// QCHECK() +// +// `QCHECK` behaves like `CHECK` but does not print a full stack trace and does +// not run registered error handlers (as `QFATAL`). It is useful when the +// problem is definitely unrelated to program flow, e.g. when validating user +// input. +#define QCHECK(condition) \ + ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, \ + ABSL_PREDICT_FALSE(!(condition))) \ + ABSL_LOG_INTERNAL_QCHECK(#condition).InternalStream() + +// PCHECK() +// +// `PCHECK` behaves like `CHECK` but appends a description of the current state +// of `errno` to the failure message. +// +// Example: +// +// int fd = open("/var/empty/missing", O_RDONLY); +// PCHECK(fd != -1) << "posix is difficult"; +// +// Might produce a message like: +// +// Check failed: fd != -1 posix is difficult: No such file or directory [2] +#define PCHECK(condition) CHECK(condition).WithPerror() + +// DCHECK() +// +// `DCHECK` behaves like `CHECK` in debug mode and does nothing otherwise (as +// `DLOG`). Unlike with `CHECK` (but as with `assert`), it is not safe to rely +// on evaluation of `condition`: when `NDEBUG` is enabled, DCHECK does not +// evaluate the condition. +#ifndef NDEBUG +#define DCHECK(condition) CHECK(condition) +#else +#define DCHECK(condition) CHECK(true || (condition)) +#endif + +// `CHECK_EQ` and friends are syntactic sugar for `CHECK(x == y)` that +// automatically output the expression being tested and the evaluated values on +// either side. +// +// Example: +// +// int x = 3, y = 5; +// CHECK_EQ(2 * x, y) << "oops!"; +// +// Might produce a message like: +// +// Check failed: 2 * x == y (6 vs. 5) oops! +// +// The values must implement the appropriate comparison operator as well as +// `operator<<(std::ostream&, ...)`. Care is taken to ensure that each +// argument is evaluated exactly once, and that anything which is legal to pass +// as a function argument is legal here. In particular, the arguments may be +// temporary expressions which will end up being destroyed at the end of the +// statement, +// +// Example: +// +// CHECK_EQ(std::string("abc")[1], 'b'); +// +// WARNING: Passing `NULL` as an argument to `CHECK_EQ` and similar macros does +// not compile. Use `nullptr` instead. +#define CHECK_EQ(val1, val2) \ + ABSL_LOG_INTERNAL_CHECK_OP(Check_EQ, ==, val1, val2) +#define CHECK_NE(val1, val2) \ + ABSL_LOG_INTERNAL_CHECK_OP(Check_NE, !=, val1, val2) +#define CHECK_LE(val1, val2) \ + ABSL_LOG_INTERNAL_CHECK_OP(Check_LE, <=, val1, val2) +#define CHECK_LT(val1, val2) ABSL_LOG_INTERNAL_CHECK_OP(Check_LT, <, val1, val2) +#define CHECK_GE(val1, val2) \ + ABSL_LOG_INTERNAL_CHECK_OP(Check_GE, >=, val1, val2) +#define CHECK_GT(val1, val2) ABSL_LOG_INTERNAL_CHECK_OP(Check_GT, >, val1, val2) +#define QCHECK_EQ(val1, val2) \ + ABSL_LOG_INTERNAL_QCHECK_OP(Check_EQ, ==, val1, val2) +#define QCHECK_NE(val1, val2) \ + ABSL_LOG_INTERNAL_QCHECK_OP(Check_NE, !=, val1, val2) +#define QCHECK_LE(val1, val2) \ + ABSL_LOG_INTERNAL_QCHECK_OP(Check_LE, <=, val1, val2) +#define QCHECK_LT(val1, val2) \ + ABSL_LOG_INTERNAL_QCHECK_OP(Check_LT, <, val1, val2) +#define QCHECK_GE(val1, val2) \ + ABSL_LOG_INTERNAL_QCHECK_OP(Check_GE, >=, val1, val2) +#define QCHECK_GT(val1, val2) \ + ABSL_LOG_INTERNAL_QCHECK_OP(Check_GT, >, val1, val2) +#ifndef NDEBUG +#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2) +#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2) +#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2) +#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2) +#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2) +#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2) +#else // ndef NDEBUG +#define DCHECK_EQ(val1, val2) ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2) +#define DCHECK_NE(val1, val2) ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2) +#define DCHECK_LE(val1, val2) ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2) +#define DCHECK_LT(val1, val2) ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2) +#define DCHECK_GE(val1, val2) ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2) +#define DCHECK_GT(val1, val2) ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2) +#endif // def NDEBUG + +// `CHECK_OK` and friends validate that the provided `absl::Status` or +// `absl::StatusOr` is OK. If it isn't, they print a failure message that +// includes the actual status and terminate the program. +// +// As with all `DCHECK` variants, `DCHECK_OK` has no effect (not even +// evaluating its argument) if `NDEBUG` is enabled. +// +// Example: +// +// CHECK_OK(FunctionReturnsStatus(x, y, z)) << "oops!"; +// +// Might produce a message like: +// +// Check failed: FunctionReturnsStatus(x, y, z) is OK (ABORTED: timeout) oops! +#define CHECK_OK(status) ABSL_LOG_INTERNAL_CHECK_OK(status) +#define QCHECK_OK(status) ABSL_LOG_INTERNAL_QCHECK_OK(status) +#ifndef NDEBUG +#define DCHECK_OK(status) ABSL_LOG_INTERNAL_CHECK_OK(status) +#else +#define DCHECK_OK(status) ABSL_LOG_INTERNAL_DCHECK_NOP(status, nullptr) +#endif + +// `CHECK_STREQ` and friends provide `CHECK_EQ` functionality for C strings, +// i.e., nul-terminated char arrays. The `CASE` versions are case-insensitive. +// +// Example: +// +// CHECK_STREQ(argv[0], "./skynet"); +// +// Note that both arguments may be temporary strings which are destroyed by the +// compiler at the end of the current full expression. +// +// Example: +// +// CHECK_STREQ(Foo().c_str(), Bar().c_str()); +#define CHECK_STREQ(s1, s2) \ + ABSL_LOG_INTERNAL_CHECK_STROP(strcmp, ==, true, s1, s2) +#define CHECK_STRNE(s1, s2) \ + ABSL_LOG_INTERNAL_CHECK_STROP(strcmp, !=, false, s1, s2) +#define CHECK_STRCASEEQ(s1, s2) \ + ABSL_LOG_INTERNAL_CHECK_STROP(strcasecmp, ==, true, s1, s2) +#define CHECK_STRCASENE(s1, s2) \ + ABSL_LOG_INTERNAL_CHECK_STROP(strcasecmp, !=, false, s1, s2) +#define QCHECK_STREQ(s1, s2) \ + ABSL_LOG_INTERNAL_QCHECK_STROP(strcmp, ==, true, s1, s2) +#define QCHECK_STRNE(s1, s2) \ + ABSL_LOG_INTERNAL_QCHECK_STROP(strcmp, !=, false, s1, s2) +#define QCHECK_STRCASEEQ(s1, s2) \ + ABSL_LOG_INTERNAL_QCHECK_STROP(strcasecmp, ==, true, s1, s2) +#define QCHECK_STRCASENE(s1, s2) \ + ABSL_LOG_INTERNAL_QCHECK_STROP(strcasecmp, !=, false, s1, s2) +#ifndef NDEBUG +#define DCHECK_STREQ(s1, s2) CHECK_STREQ(s1, s2) +#define DCHECK_STRCASEEQ(s1, s2) CHECK_STRCASEEQ(s1, s2) +#define DCHECK_STRNE(s1, s2) CHECK_STRNE(s1, s2) +#define DCHECK_STRCASENE(s1, s2) CHECK_STRCASENE(s1, s2) +#else // ndef NDEBUG +#define DCHECK_STREQ(s1, s2) ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2) +#define DCHECK_STRCASEEQ(s1, s2) ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2) +#define DCHECK_STRNE(s1, s2) ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2) +#define DCHECK_STRCASENE(s1, s2) ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2) +#endif // def NDEBUG + +#endif // ABSL_LOG_CHECK_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/check_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/check_test.cc new file mode 100644 index 0000000000..4ce9d87224 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/check_test.cc @@ -0,0 +1,433 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/check.h" + +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/log/internal/test_helpers.h" + +namespace { +using ::testing::AllOf; +using ::testing::HasSubstr; +using ::testing::Not; + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +#if GTEST_HAS_DEATH_TEST + +TEST(CHECKDeathTest, TestBasicValues) { + CHECK(true); + + EXPECT_DEATH(CHECK(false), "Check failed: false"); + + int i = 2; + CHECK(i != 3); // NOLINT +} + +#endif // GTEST_HAS_DEATH_TEST + +TEST(CHECKTest, TestLogicExpressions) { + int i = 5; + CHECK(i > 0 && i < 10); + CHECK(i < 0 || i > 3); +} + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +ABSL_CONST_INIT const auto global_var_check = [](int i) { + CHECK(i > 0); // NOLINT + return i + 1; +}(3); + +ABSL_CONST_INIT const auto global_var = [](int i) { + CHECK_GE(i, 0); // NOLINT + return i + 1; +}(global_var_check); +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG + +TEST(CHECKTest, TestPlacementsInCompoundStatements) { + // check placement inside if/else clauses + if (true) CHECK(true); + + if (false) + ; // NOLINT + else + CHECK(true); + + switch (0) + case 0: + CHECK(true); // NOLINT + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + constexpr auto var = [](int i) { + CHECK(i > 0); // NOLINT + return i + 1; + }(global_var); + (void)var; +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG +} + +TEST(CHECKTest, TestBoolConvertible) { + struct Tester { + } tester; + CHECK([&]() { return &tester; }()); +} + +#if GTEST_HAS_DEATH_TEST + +TEST(CHECKDeathTest, TestChecksWithSideeffects) { + int var = 0; + CHECK([&var]() { + ++var; + return true; + }()); + EXPECT_EQ(var, 1); + + EXPECT_DEATH(CHECK([&var]() { + ++var; + return false; + }()) << var, + "Check failed: .* 2"); +} + +#endif // GTEST_HAS_DEATH_TEST + +#if GTEST_HAS_DEATH_TEST + +TEST(CHECKDeachTest, TestOrderOfInvocationsBetweenCheckAndMessage) { + int counter = 0; + + auto GetStr = [&counter]() -> std::string { + return counter++ == 0 ? "" : "non-empty"; + }; + + EXPECT_DEATH(CHECK(!GetStr().empty()) << GetStr(), HasSubstr("non-empty")); +} + +TEST(CHECKTest, TestSecondaryFailure) { + auto FailingRoutine = []() { + CHECK(false) << "Secondary"; + return false; + }; + EXPECT_DEATH(CHECK(FailingRoutine()) << "Primary", + AllOf(HasSubstr("Secondary"), Not(HasSubstr("Primary")))); +} + +TEST(CHECKTest, TestSecondaryFailureInMessage) { + auto MessageGen = []() { + CHECK(false) << "Secondary"; + return "Primary"; + }; + EXPECT_DEATH(CHECK(false) << MessageGen(), + AllOf(HasSubstr("Secondary"), Not(HasSubstr("Primary")))); +} + +#endif // GTEST_HAS_DEATH_TEST + +TEST(CHECKTest, TestBinaryChecksWithPrimitives) { + CHECK_EQ(1, 1); + CHECK_NE(1, 2); + CHECK_GE(1, 1); + CHECK_GE(2, 1); + CHECK_LE(1, 1); + CHECK_LE(1, 2); + CHECK_GT(2, 1); + CHECK_LT(1, 2); +} + +// For testing using CHECK*() on anonymous enums. +enum { CASE_A, CASE_B }; + +TEST(CHECKTest, TestBinaryChecksWithEnumValues) { + // Tests using CHECK*() on anonymous enums. + CHECK_EQ(CASE_A, CASE_A); + CHECK_NE(CASE_A, CASE_B); + CHECK_GE(CASE_A, CASE_A); + CHECK_GE(CASE_B, CASE_A); + CHECK_LE(CASE_A, CASE_A); + CHECK_LE(CASE_A, CASE_B); + CHECK_GT(CASE_B, CASE_A); + CHECK_LT(CASE_A, CASE_B); +} + +TEST(CHECKTest, TestBinaryChecksWithNullptr) { + const void* p_null = nullptr; + const void* p_not_null = &p_null; + CHECK_EQ(p_null, nullptr); + CHECK_EQ(nullptr, p_null); + CHECK_NE(p_not_null, nullptr); + CHECK_NE(nullptr, p_not_null); +} + +#if GTEST_HAS_DEATH_TEST + +// Test logging of various char-typed values by failing CHECK*(). +TEST(CHECKDeathTest, TestComparingCharsValues) { + { + char a = ';'; + char b = 'b'; + EXPECT_DEATH(CHECK_EQ(a, b), "Check failed: a == b \\(';' vs. 'b'\\)"); + b = 1; + EXPECT_DEATH(CHECK_EQ(a, b), + "Check failed: a == b \\(';' vs. char value 1\\)"); + } + { + signed char a = ';'; + signed char b = 'b'; + EXPECT_DEATH(CHECK_EQ(a, b), "Check failed: a == b \\(';' vs. 'b'\\)"); + b = -128; + EXPECT_DEATH(CHECK_EQ(a, b), + "Check failed: a == b \\(';' vs. signed char value -128\\)"); + } + { + unsigned char a = ';'; + unsigned char b = 'b'; + EXPECT_DEATH(CHECK_EQ(a, b), "Check failed: a == b \\(';' vs. 'b'\\)"); + b = 128; + EXPECT_DEATH(CHECK_EQ(a, b), + "Check failed: a == b \\(';' vs. unsigned char value 128\\)"); + } +} + +TEST(CHECKDeathTest, TestNullValuesAreReportedCleanly) { + const char* a = nullptr; + const char* b = nullptr; + EXPECT_DEATH(CHECK_NE(a, b), + "Check failed: a != b \\(\\(null\\) vs. \\(null\\)\\)"); + + a = "xx"; + EXPECT_DEATH(CHECK_EQ(a, b), "Check failed: a == b \\(xx vs. \\(null\\)\\)"); + EXPECT_DEATH(CHECK_EQ(b, a), "Check failed: b == a \\(\\(null\\) vs. xx\\)"); + + std::nullptr_t n{}; + EXPECT_DEATH(CHECK_NE(n, nullptr), + "Check failed: n != nullptr \\(\\(null\\) vs. \\(null\\)\\)"); +} + +#endif // GTEST_HAS_DEATH_TEST + +TEST(CHECKTest, TestSTREQ) { + CHECK_STREQ("this", "this"); + CHECK_STREQ(nullptr, nullptr); + CHECK_STRCASEEQ("this", "tHiS"); + CHECK_STRCASEEQ(nullptr, nullptr); + CHECK_STRNE("this", "tHiS"); + CHECK_STRNE("this", nullptr); + CHECK_STRCASENE("this", "that"); + CHECK_STRCASENE(nullptr, "that"); + CHECK_STREQ((std::string("a") + "b").c_str(), "ab"); + CHECK_STREQ(std::string("test").c_str(), + (std::string("te") + std::string("st")).c_str()); +} + +TEST(CHECKTest, TestComparisonPlacementsInCompoundStatements) { + // check placement inside if/else clauses + if (true) CHECK_EQ(1, 1); + if (true) CHECK_STREQ("c", "c"); + + if (false) + ; // NOLINT + else + CHECK_LE(0, 1); + + if (false) + ; // NOLINT + else + CHECK_STRNE("a", "b"); + + switch (0) + case 0: + CHECK_NE(1, 0); + + switch (0) + case 0: + CHECK_STRCASEEQ("A", "a"); + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + constexpr auto var = [](int i) { + CHECK_GT(i, 0); + return i + 1; + }(global_var); + (void)var; + + // CHECK_STR... checks are not supported in constexpr routines. + // constexpr auto var2 = [](int i) { + // CHECK_STRNE("c", "d"); + // return i + 1; + // }(global_var); + +#if defined(__GNUC__) + int var3 = (({ CHECK_LE(1, 2); }), global_var < 10) ? 1 : 0; + (void)var3; + + int var4 = (({ CHECK_STREQ("a", "a"); }), global_var < 10) ? 1 : 0; + (void)var4; +#endif // __GNUC__ +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG +} + +TEST(CHECKTest, TestDCHECK) { +#ifdef NDEBUG + DCHECK(1 == 2) << " DCHECK's shouldn't be compiled in normal mode"; +#endif + DCHECK(1 == 1); // NOLINT(readability/check) + DCHECK_EQ(1, 1); + DCHECK_NE(1, 2); + DCHECK_GE(1, 1); + DCHECK_GE(2, 1); + DCHECK_LE(1, 1); + DCHECK_LE(1, 2); + DCHECK_GT(2, 1); + DCHECK_LT(1, 2); + + // Test DCHECK on std::nullptr_t + const void* p_null = nullptr; + const void* p_not_null = &p_null; + DCHECK_EQ(p_null, nullptr); + DCHECK_EQ(nullptr, p_null); + DCHECK_NE(p_not_null, nullptr); + DCHECK_NE(nullptr, p_not_null); +} + +TEST(CHECKTest, TestQCHECK) { + // The tests that QCHECK does the same as CHECK + QCHECK(1 == 1); // NOLINT(readability/check) + QCHECK_EQ(1, 1); + QCHECK_NE(1, 2); + QCHECK_GE(1, 1); + QCHECK_GE(2, 1); + QCHECK_LE(1, 1); + QCHECK_LE(1, 2); + QCHECK_GT(2, 1); + QCHECK_LT(1, 2); + + // Tests using QCHECK*() on anonymous enums. + QCHECK_EQ(CASE_A, CASE_A); + QCHECK_NE(CASE_A, CASE_B); + QCHECK_GE(CASE_A, CASE_A); + QCHECK_GE(CASE_B, CASE_A); + QCHECK_LE(CASE_A, CASE_A); + QCHECK_LE(CASE_A, CASE_B); + QCHECK_GT(CASE_B, CASE_A); + QCHECK_LT(CASE_A, CASE_B); +} + +TEST(CHECKTest, TestQCHECKPlacementsInCompoundStatements) { + // check placement inside if/else clauses + if (true) QCHECK(true); + + if (false) + ; // NOLINT + else + QCHECK(true); + + if (false) + ; // NOLINT + else + QCHECK(true); + + switch (0) + case 0: + QCHECK(true); + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + constexpr auto var = [](int i) { + QCHECK(i > 0); // NOLINT + return i + 1; + }(global_var); + (void)var; + +#if defined(__GNUC__) + int var2 = (({ CHECK_LE(1, 2); }), global_var < 10) ? 1 : 0; + (void)var2; +#endif // __GNUC__ +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG +} + +class ComparableType { + public: + explicit ComparableType(int v) : v_(v) {} + + void MethodWithCheck(int i) { + CHECK_EQ(*this, i); + CHECK_EQ(i, *this); + } + + int Get() const { return v_; } + + private: + friend bool operator==(const ComparableType& lhs, const ComparableType& rhs) { + return lhs.v_ == rhs.v_; + } + friend bool operator!=(const ComparableType& lhs, const ComparableType& rhs) { + return lhs.v_ != rhs.v_; + } + friend bool operator<(const ComparableType& lhs, const ComparableType& rhs) { + return lhs.v_ < rhs.v_; + } + friend bool operator<=(const ComparableType& lhs, const ComparableType& rhs) { + return lhs.v_ <= rhs.v_; + } + friend bool operator>(const ComparableType& lhs, const ComparableType& rhs) { + return lhs.v_ > rhs.v_; + } + friend bool operator>=(const ComparableType& lhs, const ComparableType& rhs) { + return lhs.v_ >= rhs.v_; + } + friend bool operator==(const ComparableType& lhs, int rhs) { + return lhs.v_ == rhs; + } + friend bool operator==(int lhs, const ComparableType& rhs) { + return lhs == rhs.v_; + } + + friend std::ostream& operator<<(std::ostream& out, const ComparableType& v) { + return out << "ComparableType{" << v.Get() << "}"; + } + + int v_; +}; + +TEST(CHECKTest, TestUserDefinedCompOp) { + CHECK_EQ(ComparableType{0}, ComparableType{0}); + CHECK_NE(ComparableType{1}, ComparableType{2}); + CHECK_LT(ComparableType{1}, ComparableType{2}); + CHECK_LE(ComparableType{1}, ComparableType{2}); + CHECK_GT(ComparableType{2}, ComparableType{1}); + CHECK_GE(ComparableType{2}, ComparableType{2}); +} + +TEST(CHECKTest, TestCheckInMethod) { + ComparableType v{1}; + v.MethodWithCheck(1); +} + +TEST(CHECKDeathTest, TestUserDefinedStreaming) { + ComparableType v1{1}; + ComparableType v2{2}; + + EXPECT_DEATH( + CHECK_EQ(v1, v2), + HasSubstr( + "Check failed: v1 == v2 (ComparableType{1} vs. ComparableType{2})")); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null.cc new file mode 100644 index 0000000000..19c6a28ed8 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null.cc @@ -0,0 +1,32 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/die_if_null.h" + +#include "absl/base/config.h" +#include "absl/log/log.h" +#include "absl/strings/str_cat.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +void DieBecauseNull(const char* file, int line, const char* exprtext) { + LOG(FATAL).AtLocation(file, line) + << absl::StrCat("Check failed: '", exprtext, "' Must be non-null"); +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null.h new file mode 100644 index 0000000000..127a9ac882 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null.h @@ -0,0 +1,76 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/die_if_null.h +// ----------------------------------------------------------------------------- +// +// This header declares macro `ABSL_DIE_IF_NULL`. + +#ifndef ABSL_LOG_DIE_IF_NULL_H_ +#define ABSL_LOG_DIE_IF_NULL_H_ + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +// ABSL_DIE_IF_NULL() +// +// `ABSL_DIE_IF_NULL` behaves as `CHECK_NE` against `nullptr` but *also* +// "returns" its argument. It is useful in initializers where statements (like +// `CHECK_NE`) can't be used. Outside initializers, prefer `CHECK` or +// `CHECK_NE`. `ABSL_DIE_IF_NULL` works for both raw pointers and (compatible) +// smart pointers including `std::unique_ptr` and `std::shared_ptr`; more +// generally, it works for any type that can be compared to nullptr_t. For +// types that aren't raw pointers, `ABSL_DIE_IF_NULL` returns a reference to +// its argument, preserving the value category. Example: +// +// Foo() : bar_(ABSL_DIE_IF_NULL(MethodReturningUniquePtr())) {} +// +// Use `CHECK(ptr)` or `CHECK(ptr != nullptr)` if the returned pointer is +// unused. +#define ABSL_DIE_IF_NULL(val) \ + ::absl::log_internal::DieIfNull(__FILE__, __LINE__, #val, (val)) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// Crashes the process after logging `exprtext` annotated at the `file` and +// `line` location. Called when `ABSL_DIE_IF_NULL` fails. Calling this function +// generates less code than its implementation would if inlined, for a slight +// code size reduction each time `ABSL_DIE_IF_NULL` is called. +ABSL_ATTRIBUTE_NORETURN ABSL_ATTRIBUTE_NOINLINE void DieBecauseNull( + const char* file, int line, const char* exprtext); + +// Helper for `ABSL_DIE_IF_NULL`. +template +ABSL_MUST_USE_RESULT T DieIfNull(const char* file, int line, + const char* exprtext, T&& t) { + if (ABSL_PREDICT_FALSE(t == nullptr)) { + // Call a non-inline helper function for a small code size improvement. + DieBecauseNull(file, line, exprtext); + } + return std::forward(t); +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_DIE_IF_NULL_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null_test.cc new file mode 100644 index 0000000000..b0aab781c3 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/die_if_null_test.cc @@ -0,0 +1,107 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/die_if_null.h" + +#include + +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/log/internal/test_helpers.h" + +namespace { + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +// TODO(b/69907837): Revisit these tests with the goal of making them less +// convoluted. +TEST(AbslDieIfNull, Simple) { + int64_t t; + void* ptr = static_cast(&t); + void* ref = ABSL_DIE_IF_NULL(ptr); + ASSERT_EQ(ptr, ref); + + char* t_as_char; + t_as_char = ABSL_DIE_IF_NULL(reinterpret_cast(&t)); + (void)t_as_char; + + unsigned char* t_as_uchar; + t_as_uchar = ABSL_DIE_IF_NULL(reinterpret_cast(&t)); + (void)t_as_uchar; + + int* t_as_int; + t_as_int = ABSL_DIE_IF_NULL(reinterpret_cast(&t)); + (void)t_as_int; + + int64_t* t_as_int64_t; + t_as_int64_t = ABSL_DIE_IF_NULL(reinterpret_cast(&t)); + (void)t_as_int64_t; + + std::unique_ptr sptr(new int64_t); + EXPECT_EQ(sptr.get(), ABSL_DIE_IF_NULL(sptr).get()); + ABSL_DIE_IF_NULL(sptr).reset(); + + int64_t* int_ptr = new int64_t(); + EXPECT_EQ(int_ptr, ABSL_DIE_IF_NULL(std::unique_ptr(int_ptr)).get()); +} + +#if GTEST_HAS_DEATH_TEST +TEST(DeathCheckAbslDieIfNull, Simple) { + void* ptr; + ASSERT_DEATH({ ptr = ABSL_DIE_IF_NULL(nullptr); }, ""); + (void)ptr; + + std::unique_ptr sptr; + ASSERT_DEATH(ptr = ABSL_DIE_IF_NULL(sptr).get(), ""); +} +#endif + +// Ensures that ABSL_DIE_IF_NULL works with C++11's std::unique_ptr and +// std::shared_ptr. +TEST(AbslDieIfNull, DoesNotCompareSmartPointerToNULL) { + std::unique_ptr up(new int); + EXPECT_EQ(&up, &ABSL_DIE_IF_NULL(up)); + ABSL_DIE_IF_NULL(up).reset(); + + std::shared_ptr sp(new int); + EXPECT_EQ(&sp, &ABSL_DIE_IF_NULL(sp)); + ABSL_DIE_IF_NULL(sp).reset(); +} + +// Verifies that ABSL_DIE_IF_NULL returns an rvalue reference if its argument is +// an rvalue reference. +TEST(AbslDieIfNull, PreservesRValues) { + int64_t* ptr = new int64_t(); + auto uptr = ABSL_DIE_IF_NULL(std::unique_ptr(ptr)); + EXPECT_EQ(ptr, uptr.get()); +} + +// Verifies that ABSL_DIE_IF_NULL returns an lvalue if its argument is an +// lvalue. +TEST(AbslDieIfNull, PreservesLValues) { + int64_t array[2] = {0}; + int64_t* a = array + 0; + int64_t* b = array + 1; + using std::swap; + swap(ABSL_DIE_IF_NULL(a), ABSL_DIE_IF_NULL(b)); + EXPECT_EQ(array + 1, a); + EXPECT_EQ(array + 0, b); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags.cc new file mode 100644 index 0000000000..b5308881e4 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags.cc @@ -0,0 +1,112 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/flags.h" + +#include + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/flags/flag.h" +#include "absl/flags/marshalling.h" +#include "absl/log/globals.h" +#include "absl/log/internal/config.h" +#include "absl/strings/numbers.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { +namespace { + +void SyncLoggingFlags() { + absl::SetFlag(&FLAGS_minloglevel, static_cast(absl::MinLogLevel())); + absl::SetFlag(&FLAGS_log_prefix, absl::ShouldPrependLogPrefix()); +} + +bool RegisterSyncLoggingFlags() { + log_internal::SetLoggingGlobalsListener(&SyncLoggingFlags); + return true; +} + +ABSL_ATTRIBUTE_UNUSED const bool unused = RegisterSyncLoggingFlags(); + +template +T GetFromEnv(const char* varname, T dflt) { + const char* val = ::getenv(varname); + if (val != nullptr) { + std::string err; + ABSL_INTERNAL_CHECK(absl::ParseFlag(val, &dflt, &err), err.c_str()); + } + return dflt; +} + +constexpr absl::LogSeverityAtLeast StderrThresholdDefault() { + return absl::LogSeverityAtLeast::kError; +} + +} // namespace +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +ABSL_FLAG(int, stderrthreshold, + static_cast(absl::log_internal::StderrThresholdDefault()), + "Log messages at or above this threshold level are copied to stderr.") + .OnUpdate([] { + absl::log_internal::RawSetStderrThreshold( + static_cast( + absl::GetFlag(FLAGS_stderrthreshold))); + }); + +ABSL_FLAG(int, minloglevel, static_cast(absl::LogSeverityAtLeast::kInfo), + "Messages logged at a lower level than this don't actually " + "get logged anywhere") + .OnUpdate([] { + absl::log_internal::RawSetMinLogLevel( + static_cast( + absl::GetFlag(FLAGS_minloglevel))); + }); + +ABSL_FLAG(std::string, log_backtrace_at, "", + "Emit a backtrace when logging at file:linenum.") + .OnUpdate([] { + const std::string log_backtrace_at = + absl::GetFlag(FLAGS_log_backtrace_at); + if (log_backtrace_at.empty()) return; + + const size_t last_colon = log_backtrace_at.rfind(':'); + if (last_colon == log_backtrace_at.npos) return; + + const absl::string_view file = + absl::string_view(log_backtrace_at).substr(0, last_colon); + int line; + if (absl::SimpleAtoi( + absl::string_view(log_backtrace_at).substr(last_colon + 1), + &line)) { + absl::SetLogBacktraceLocation(file, line); + } + }); + +ABSL_FLAG(bool, log_prefix, true, + "Prepend the log prefix to the start of each log line") + .OnUpdate([] { + absl::log_internal::RawEnableLogPrefix(absl::GetFlag(FLAGS_log_prefix)); + }); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags.h new file mode 100644 index 0000000000..146cfdd6ca --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags.h @@ -0,0 +1,43 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/flags.h +// ----------------------------------------------------------------------------- +// + +#ifndef ABSL_LOG_FLAGS_H_ +#define ABSL_LOG_FLAGS_H_ + +// The Abseil Logging library supports the following command line flags to +// configure logging behavior at runtime: +// +// --stderrthreshold= +// Log messages at or above this threshold level are copied to stderr. +// +// --minloglevel= +// Messages logged at a lower level than this are discarded and don't actually +// get logged anywhere. +// +// --log_backtrace_at= +// Emit a backtrace (stack trace) when logging at file:linenum. +// +// To use these commandline flags, the //absl/log:flags library must be +// explicitly linked, and absl::ParseCommandLine() must be called before the +// call to absl::InitializeLog(). +// +// To configure the Log library programmatically, use the interfaces defined in +// absl/log/globals.h. + +#endif // ABSL_LOG_FLAGS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags_test.cc new file mode 100644 index 0000000000..a0f6d7630c --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/flags_test.cc @@ -0,0 +1,184 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/flags.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/log_severity.h" +#include "absl/flags/flag.h" +#include "absl/flags/reflection.h" +#include "absl/log/globals.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/internal/test_matchers.h" +#include "absl/log/log.h" +#include "absl/log/scoped_mock_log.h" +#include "absl/strings/str_cat.h" + +namespace { +using ::absl::log_internal::TextMessage; + +using ::testing::HasSubstr; +using ::testing::Not; + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() { + return absl::LogSeverityAtLeast::kError; +} + +class LogFlagsTest : public ::testing::Test { + protected: + absl::FlagSaver flag_saver_; +}; + +// This test is disabled because it adds order dependency to the test suite. +// This order dependency is currently not fixable due to the way the +// stderrthreshold global value is out of sync with the stderrthreshold flag. +TEST_F(LogFlagsTest, DISABLED_StderrKnobsDefault) { + EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold()); +} + +TEST_F(LogFlagsTest, SetStderrThreshold) { + absl::SetFlag(&FLAGS_stderrthreshold, + static_cast(absl::LogSeverityAtLeast::kInfo)); + + EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kInfo); + + absl::SetFlag(&FLAGS_stderrthreshold, + static_cast(absl::LogSeverityAtLeast::kError)); + + EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError); +} + +TEST_F(LogFlagsTest, SetMinLogLevel) { + absl::SetFlag(&FLAGS_minloglevel, + static_cast(absl::LogSeverityAtLeast::kError)); + + EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError); + + absl::log_internal::ScopedMinLogLevel scoped_min_log_level( + absl::LogSeverityAtLeast::kWarning); + + EXPECT_EQ(absl::GetFlag(FLAGS_minloglevel), + static_cast(absl::LogSeverityAtLeast::kWarning)); +} + +TEST_F(LogFlagsTest, PrependLogPrefix) { + absl::SetFlag(&FLAGS_log_prefix, false); + + EXPECT_EQ(absl::ShouldPrependLogPrefix(), false); + + absl::EnableLogPrefix(true); + + EXPECT_EQ(absl::GetFlag(FLAGS_log_prefix), true); +} + +TEST_F(LogFlagsTest, EmptyBacktraceAtFlag) { + absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); + absl::SetFlag(&FLAGS_log_backtrace_at, ""); + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << "hello world"; +} + +TEST_F(LogFlagsTest, BacktraceAtNonsense) { + absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); + absl::SetFlag(&FLAGS_log_backtrace_at, "gibberish"); + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << "hello world"; +} + +TEST_F(LogFlagsTest, BacktraceAtWrongFile) { + absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(INFO) << "hello world"; }; + absl::SetFlag(&FLAGS_log_backtrace_at, + absl::StrCat("some_other_file.cc:", log_line)); + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); + + test_sink.StartCapturingLogs(); + do_log(); +} + +TEST_F(LogFlagsTest, BacktraceAtWrongLine) { + absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(INFO) << "hello world"; }; + absl::SetFlag(&FLAGS_log_backtrace_at, + absl::StrCat("flags_test.cc:", log_line + 1)); + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); + + test_sink.StartCapturingLogs(); + do_log(); +} + +TEST_F(LogFlagsTest, BacktraceAtWholeFilename) { + absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(INFO) << "hello world"; }; + absl::SetFlag(&FLAGS_log_backtrace_at, absl::StrCat(__FILE__, ":", log_line)); + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); + + test_sink.StartCapturingLogs(); + do_log(); +} + +TEST_F(LogFlagsTest, BacktraceAtNonmatchingSuffix) { + absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(INFO) << "hello world"; }; + absl::SetFlag(&FLAGS_log_backtrace_at, + absl::StrCat("flags_test.cc:", log_line, "gibberish")); + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); + + test_sink.StartCapturingLogs(); + do_log(); +} + +TEST_F(LogFlagsTest, LogsBacktrace) { + absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(INFO) << "hello world"; }; + absl::SetFlag(&FLAGS_log_backtrace_at, + absl::StrCat("flags_test.cc:", log_line)); + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(TextMessage(HasSubstr("(stacktrace:")))); + + test_sink.StartCapturingLogs(); + do_log(); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals.cc new file mode 100644 index 0000000000..6dfe81f006 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals.cc @@ -0,0 +1,148 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/globals.h" + +#include +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/log_severity.h" +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +// These atomics represent logging library configuration. +// Integer types are used instead of absl::LogSeverity to ensure that a +// lock-free std::atomic is used when possible. +ABSL_CONST_INIT std::atomic min_log_level{ + static_cast(absl::LogSeverityAtLeast::kInfo)}; +ABSL_CONST_INIT std::atomic stderrthreshold{ + static_cast(absl::LogSeverityAtLeast::kError)}; +// We evaluate this value as a hash comparison to avoid having to +// hold a mutex or make a copy (to access the value of a string-typed flag) in +// very hot codepath. +ABSL_CONST_INIT std::atomic log_backtrace_at_hash{0}; +ABSL_CONST_INIT std::atomic prepend_log_prefix{true}; + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +absl::base_internal::AtomicHook + logging_globals_listener; + +size_t HashSiteForLogBacktraceAt(absl::string_view file, int line) { + return absl::HashOf(file, line); +} + +void TriggerLoggingGlobalsListener() { + auto* listener = logging_globals_listener.Load(); + if (listener != nullptr) listener(); +} + +} // namespace + +namespace log_internal { + +void RawSetMinLogLevel(absl::LogSeverityAtLeast severity) { + min_log_level.store(static_cast(severity), std::memory_order_release); +} + +void RawSetStderrThreshold(absl::LogSeverityAtLeast severity) { + stderrthreshold.store(static_cast(severity), std::memory_order_release); +} + +void RawEnableLogPrefix(bool on_off) { + prepend_log_prefix.store(on_off, std::memory_order_release); +} + +void SetLoggingGlobalsListener(LoggingGlobalsListener l) { + logging_globals_listener.Store(l); +} + +} // namespace log_internal + +absl::LogSeverityAtLeast MinLogLevel() { + return static_cast( + min_log_level.load(std::memory_order_acquire)); +} + +void SetMinLogLevel(absl::LogSeverityAtLeast severity) { + log_internal::RawSetMinLogLevel(severity); + TriggerLoggingGlobalsListener(); +} + +namespace log_internal { + +ScopedMinLogLevel::ScopedMinLogLevel(absl::LogSeverityAtLeast severity) + : saved_severity_(absl::MinLogLevel()) { + absl::SetMinLogLevel(severity); +} +ScopedMinLogLevel::~ScopedMinLogLevel() { + absl::SetMinLogLevel(saved_severity_); +} + +} // namespace log_internal + +absl::LogSeverityAtLeast StderrThreshold() { + return static_cast( + stderrthreshold.load(std::memory_order_acquire)); +} + +void SetStderrThreshold(absl::LogSeverityAtLeast severity) { + log_internal::RawSetStderrThreshold(severity); + TriggerLoggingGlobalsListener(); +} + +ScopedStderrThreshold::ScopedStderrThreshold(absl::LogSeverityAtLeast severity) + : saved_severity_(absl::StderrThreshold()) { + absl::SetStderrThreshold(severity); +} + +ScopedStderrThreshold::~ScopedStderrThreshold() { + absl::SetStderrThreshold(saved_severity_); +} + +namespace log_internal { + +bool ShouldLogBacktraceAt(absl::string_view file, int line) { + const size_t flag_hash = + log_backtrace_at_hash.load(std::memory_order_acquire); + + return flag_hash != 0 && flag_hash == HashSiteForLogBacktraceAt(file, line); +} + +} // namespace log_internal + +void SetLogBacktraceLocation(absl::string_view file, int line) { + log_backtrace_at_hash.store(HashSiteForLogBacktraceAt(file, line), + std::memory_order_release); +} + +bool ShouldPrependLogPrefix() { + return prepend_log_prefix.load(std::memory_order_acquire); +} + +void EnableLogPrefix(bool on_off) { + log_internal::RawEnableLogPrefix(on_off); + TriggerLoggingGlobalsListener(); +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals.h new file mode 100644 index 0000000000..32b87db058 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals.h @@ -0,0 +1,165 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/globals.h +// ----------------------------------------------------------------------------- +// +// This header declares global logging library configuration knobs. + +#ifndef ABSL_LOG_GLOBALS_H_ +#define ABSL_LOG_GLOBALS_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +//------------------------------------------------------------------------------ +// Minimum Log Level +//------------------------------------------------------------------------------ +// +// Messages logged at or above this severity are directed to all registered log +// sinks or skipped otherwise. This parameter can also be modified using +// command line flag --minloglevel. +// See absl/base/log_severity.h for descriptions of severity levels. + +// MinLogLevel() +// +// Returns the value of the Minimum Log Level parameter. +// This function is async-signal-safe. +ABSL_MUST_USE_RESULT absl::LogSeverityAtLeast MinLogLevel(); + +// SetMinLogLevel() +// +// Updates the value of Minimum Log Level parameter. +// This function is async-signal-safe. +void SetMinLogLevel(absl::LogSeverityAtLeast severity); + +namespace log_internal { + +// ScopedMinLogLevel +// +// RAII type used to temporarily update the Min Log Level parameter. +class ScopedMinLogLevel final { + public: + explicit ScopedMinLogLevel(absl::LogSeverityAtLeast severity); + ScopedMinLogLevel(const ScopedMinLogLevel&) = delete; + ScopedMinLogLevel& operator=(const ScopedMinLogLevel&) = delete; + ~ScopedMinLogLevel(); + + private: + absl::LogSeverityAtLeast saved_severity_; +}; + +} // namespace log_internal + +//------------------------------------------------------------------------------ +// Stderr Threshold +//------------------------------------------------------------------------------ +// +// Messages logged at or above this level are directed to stderr in +// addition to other registered log sinks. This parameter can also be modified +// using command line flag --stderrthreshold. +// See absl/base/log_severity.h for descriptions of severity levels. + +// StderrThreshold() +// +// Returns the value of the Stderr Threshold parameter. +// This function is async-signal-safe. +ABSL_MUST_USE_RESULT absl::LogSeverityAtLeast StderrThreshold(); + +// SetStderrThreshold() +// +// Updates the Stderr Threshold parameter. +// This function is async-signal-safe. +void SetStderrThreshold(absl::LogSeverityAtLeast severity); +inline void SetStderrThreshold(absl::LogSeverity severity) { + absl::SetStderrThreshold(static_cast(severity)); +} + +// ScopedStderrThreshold +// +// RAII type used to temporarily update the Stderr Threshold parameter. +class ScopedStderrThreshold final { + public: + explicit ScopedStderrThreshold(absl::LogSeverityAtLeast severity); + ScopedStderrThreshold(const ScopedStderrThreshold&) = delete; + ScopedStderrThreshold& operator=(const ScopedStderrThreshold&) = delete; + ~ScopedStderrThreshold(); + + private: + absl::LogSeverityAtLeast saved_severity_; +}; + +//------------------------------------------------------------------------------ +// Log Backtrace At +//------------------------------------------------------------------------------ +// +// Users can request backtrace to be logged at specific locations, specified +// by file and line number. + +// ShouldLogBacktraceAt() +// +// Returns true if we should log a backtrace at the specified location. +namespace log_internal { +ABSL_MUST_USE_RESULT bool ShouldLogBacktraceAt(absl::string_view file, + int line); +} // namespace log_internal + +// SetLogBacktraceLocation() +// +// Sets the location the backtrace should be logged at. +void SetLogBacktraceLocation(absl::string_view file, int line); + +//------------------------------------------------------------------------------ +// Prepend Log Prefix +//------------------------------------------------------------------------------ +// +// This option tells the logging library that every logged message +// should include the prefix (severity, date, time, PID, etc.) + +// ShouldPrependLogPrefix() +// +// Returns the value of the Prepend Log Prefix option. +// This function is async-signal-safe. +ABSL_MUST_USE_RESULT bool ShouldPrependLogPrefix(); + +// EnableLogPrefix() +// +// Updates the value of the Prepend Log Prefix option. +// This function is async-signal-safe. +void EnableLogPrefix(bool on_off); + +namespace log_internal { + +using LoggingGlobalsListener = void (*)(); +void SetLoggingGlobalsListener(LoggingGlobalsListener l); + +// Internal implementation for the setter routines. These are used +// to break circular dependencies between flags and globals. Each "Raw" +// routine corresponds to the non-"Raw" counterpart and used to set the +// configuration parameter directly without calling back to the listener. +void RawSetMinLogLevel(absl::LogSeverityAtLeast severity); +void RawSetStderrThreshold(absl::LogSeverityAtLeast severity); +void RawEnableLogPrefix(bool on_off); + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_GLOBALS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals_test.cc new file mode 100644 index 0000000000..6710c5aabb --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/globals_test.cc @@ -0,0 +1,91 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/globals.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/log_severity.h" +#include "absl/log/internal/globals.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/log.h" +#include "absl/log/scoped_mock_log.h" + +namespace { + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +constexpr static absl::LogSeverityAtLeast DefaultMinLogLevel() { + return absl::LogSeverityAtLeast::kInfo; +} +constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() { + return absl::LogSeverityAtLeast::kError; +} + +TEST(TestGlobals, MinLogLevel) { + EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel()); + absl::SetMinLogLevel(absl::LogSeverityAtLeast::kError); + EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError); + absl::SetMinLogLevel(DefaultMinLogLevel()); +} + +TEST(TestGlobals, ScopedMinLogLevel) { + EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel()); + { + absl::log_internal::ScopedMinLogLevel scoped_stderr_threshold( + absl::LogSeverityAtLeast::kError); + EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError); + } + EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel()); +} + +TEST(TestGlobals, StderrThreshold) { + EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold()); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kError); + EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError); + absl::SetStderrThreshold(DefaultStderrThreshold()); +} + +TEST(TestGlobals, ScopedStderrThreshold) { + EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold()); + { + absl::ScopedStderrThreshold scoped_stderr_threshold( + absl::LogSeverityAtLeast::kError); + EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError); + } + EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold()); +} + +TEST(TestGlobals, LogBacktraceAt) { + EXPECT_FALSE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111)); + absl::SetLogBacktraceLocation("some_file.cc", 111); + EXPECT_TRUE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111)); + EXPECT_FALSE( + absl::log_internal::ShouldLogBacktraceAt("another_file.cc", 222)); +} + +TEST(TestGlobals, LogPrefix) { + EXPECT_TRUE(absl::ShouldPrependLogPrefix()); + absl::EnableLogPrefix(false); + EXPECT_FALSE(absl::ShouldPrependLogPrefix()); + absl::EnableLogPrefix(true); + EXPECT_TRUE(absl::ShouldPrependLogPrefix()); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/initialize.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/initialize.cc new file mode 100644 index 0000000000..a3f6d6c142 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/initialize.cc @@ -0,0 +1,34 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/initialize.h" + +#include "absl/base/config.h" +#include "absl/log/internal/globals.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +void InitializeLog() { + // This comes first since it is used by RAW_LOG. + absl::log_internal::SetTimeZone(absl::LocalTimeZone()); + + // Note that initialization is complete, so logs can now be sent to their + // proper destinations rather than stderr. + log_internal::SetInitialized(); +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/initialize.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/initialize.h new file mode 100644 index 0000000000..f600eb606c --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/initialize.h @@ -0,0 +1,45 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/initialize.h +// ----------------------------------------------------------------------------- +// +// This header declares the Abseil Log initialization routine InitializeLog(). + +#ifndef ABSL_LOG_INITIALIZE_H_ +#define ABSL_LOG_INITIALIZE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// InitializeLog() +// +// Initializes the Abseil logging library. +// +// Before this function is called, all log messages are directed only to stderr. +// After initialization is finished, log messages are directed to all registered +// `LogSink`s. +// +// It is an error to call this function twice. +// +// There is no corresponding function to shut down the logging library. +void InitializeLog(); + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INITIALIZE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/append_truncated.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/append_truncated.h new file mode 100644 index 0000000000..096b7517ed --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/append_truncated.h @@ -0,0 +1,40 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_LOG_INTERNAL_APPEND_TRUNCATED_H_ +#define ABSL_LOG_INTERNAL_APPEND_TRUNCATED_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { +// Copies into `dst` as many bytes of `src` as will fit, then truncates the +// copied bytes from the front of `dst` and returns the number of bytes written. +inline size_t AppendTruncated(absl::string_view src, absl::Span &dst) { + if (src.size() > dst.size()) src = src.substr(0, dst.size()); + memcpy(dst.data(), src.data(), src.size()); + dst.remove_prefix(src.size()); + return src.size(); +} +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_APPEND_TRUNCATED_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/check_op.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/check_op.cc new file mode 100644 index 0000000000..f4b67647a6 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/check_op.cc @@ -0,0 +1,118 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/check_op.h" + +#include + +#ifdef _MSC_VER +#define strcasecmp _stricmp +#else +#include // for strcasecmp, but msvc does not have this header +#endif + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/str_cat.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +#define ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(x) \ + template std::string* MakeCheckOpString(x, x, const char*) +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(bool); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(int64_t); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(uint64_t); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(float); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(double); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(char); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(unsigned char); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const std::string&); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const absl::string_view&); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const char*); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const signed char*); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const unsigned char*); +ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(const void*); +#undef ABSL_LOGGING_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING + +CheckOpMessageBuilder::CheckOpMessageBuilder(const char* exprtext) { + stream_ << exprtext << " ("; +} + +std::ostream& CheckOpMessageBuilder::ForVar2() { + stream_ << " vs. "; + return stream_; +} + +std::string* CheckOpMessageBuilder::NewString() { + stream_ << ")"; + return new std::string(stream_.str()); +} + +void MakeCheckOpValueString(std::ostream& os, const char v) { + if (v >= 32 && v <= 126) { + os << "'" << v << "'"; + } else { + os << "char value " << int{v}; + } +} + +void MakeCheckOpValueString(std::ostream& os, const signed char v) { + if (v >= 32 && v <= 126) { + os << "'" << v << "'"; + } else { + os << "signed char value " << int{v}; + } +} + +void MakeCheckOpValueString(std::ostream& os, const unsigned char v) { + if (v >= 32 && v <= 126) { + os << "'" << v << "'"; + } else { + os << "unsigned char value " << int{v}; + } +} + +void MakeCheckOpValueString(std::ostream& os, const void* p) { + if (p == nullptr) { + os << "(null)"; + } else { + os << p; + } +} + +// Helper functions for string comparisons. +#define DEFINE_CHECK_STROP_IMPL(name, func, expected) \ + std::string* Check##func##expected##Impl(const char* s1, const char* s2, \ + const char* exprtext) { \ + bool equal = s1 == s2 || (s1 && s2 && !func(s1, s2)); \ + if (equal == expected) { \ + return nullptr; \ + } else { \ + return new std::string( \ + absl::StrCat(exprtext, " (", s1, " vs. ", s2, ")")); \ + } \ + } +DEFINE_CHECK_STROP_IMPL(CHECK_STREQ, strcmp, true) +DEFINE_CHECK_STROP_IMPL(CHECK_STRNE, strcmp, false) +DEFINE_CHECK_STROP_IMPL(CHECK_STRCASEEQ, strcasecmp, true) +DEFINE_CHECK_STROP_IMPL(CHECK_STRCASENE, strcasecmp, false) +#undef DEFINE_CHECK_STROP_IMPL + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/check_op.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/check_op.h new file mode 100644 index 0000000000..559e5afc88 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/check_op.h @@ -0,0 +1,385 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/check_op.h +// ----------------------------------------------------------------------------- +// +// This file declares helpers routines and macros used to implement `CHECK` +// macros. + +#ifndef ABSL_LOG_INTERNAL_CHECK_OP_H_ +#define ABSL_LOG_INTERNAL_CHECK_OP_H_ + +#include + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/log/internal/nullguard.h" +#include "absl/log/internal/nullstream.h" +#include "absl/log/internal/strip.h" + +// `ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL` wraps string literals that +// should be stripped when `ABSL_MIN_LOG_LEVEL` exceeds `kFatal`. +#ifdef ABSL_MIN_LOG_LEVEL +#define ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(literal) \ + (::absl::LogSeverity::kFatal >= \ + static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \ + ? (literal) \ + : "") +#else +#define ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(literal) (literal) +#endif + +#ifdef NDEBUG +// `NDEBUG` is defined, so `DCHECK_EQ(x, y)` and so on do nothing. However, we +// still want the compiler to parse `x` and `y`, because we don't want to lose +// potentially useful errors and warnings. +#define ABSL_LOG_INTERNAL_DCHECK_NOP(x, y) \ + while (false && ((void)(x), (void)(y), 0)) \ + ::absl::log_internal::NullStream().InternalStream() +#endif + +#define ABSL_LOG_INTERNAL_CHECK_OP(name, op, val1, val2) \ + while ( \ + ::std::string* absl_log_internal_check_op_result ABSL_ATTRIBUTE_UNUSED = \ + ::absl::log_internal::name##Impl( \ + ::absl::log_internal::GetReferenceableValue(val1), \ + ::absl::log_internal::GetReferenceableValue(val2), \ + ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(#val1 " " #op \ + " " #val2))) \ + ABSL_LOG_INTERNAL_CHECK(*absl_log_internal_check_op_result).InternalStream() +#define ABSL_LOG_INTERNAL_QCHECK_OP(name, op, val1, val2) \ + while (::std::string* absl_log_internal_qcheck_op_result = \ + ::absl::log_internal::name##Impl( \ + ::absl::log_internal::GetReferenceableValue(val1), \ + ::absl::log_internal::GetReferenceableValue(val2), \ + ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(#val1 " " #op \ + " " #val2))) \ + ABSL_LOG_INTERNAL_QCHECK(*absl_log_internal_qcheck_op_result).InternalStream() +#define ABSL_LOG_INTERNAL_CHECK_STROP(func, op, expected, s1, s2) \ + while (::std::string* absl_log_internal_check_strop_result = \ + ::absl::log_internal::Check##func##expected##Impl( \ + (s1), (s2), \ + ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(#s1 " " #op " " #s2))) \ + ABSL_LOG_INTERNAL_CHECK(*absl_log_internal_check_strop_result) \ + .InternalStream() +#define ABSL_LOG_INTERNAL_QCHECK_STROP(func, op, expected, s1, s2) \ + while (::std::string* absl_log_internal_qcheck_strop_result = \ + ::absl::log_internal::Check##func##expected##Impl( \ + (s1), (s2), \ + ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(#s1 " " #op " " #s2))) \ + ABSL_LOG_INTERNAL_QCHECK(*absl_log_internal_qcheck_strop_result) \ + .InternalStream() +// This one is tricky: +// * We must evaluate `val` exactly once, yet we need to do two things with it: +// evaluate `.ok()` and (sometimes) `.ToString()`. +// * `val` might be an `absl::Status` or some `absl::StatusOr`. +// * `val` might be e.g. `ATemporary().GetStatus()`, which may return a +// reference to a member of `ATemporary` that is only valid until the end of +// the full expression. +// * We don't want this file to depend on `absl::Status` `#include`s or linkage, +// nor do we want to move the definition to status and introduce a dependency +// in the other direction. We can be assured that callers must already have a +// `Status` and the necessary `#include`s and linkage. +// * Callsites should be small and fast (at least when `val.ok()`): one branch, +// minimal stack footprint. +// * In particular, the string concat stuff should be out-of-line and emitted +// in only one TU to save linker input size +// * We want the `val.ok()` check inline so static analyzers and optimizers can +// see it. +// * As usual, no braces so we can stream into the expansion with `operator<<`. +// * Also as usual, it must expand to a single (partial) statement with no +// ambiguous-else problems. +#define ABSL_LOG_INTERNAL_CHECK_OK(val) \ + for (::std::pair \ + absl_log_internal_check_ok_goo; \ + absl_log_internal_check_ok_goo.first = \ + ::absl::log_internal::AsStatus(val), \ + absl_log_internal_check_ok_goo.second = \ + ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok()) \ + ? nullptr \ + : ::absl::status_internal::MakeCheckFailString( \ + absl_log_internal_check_ok_goo.first, \ + ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(#val " is OK")), \ + !ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok());) \ + ABSL_LOG_INTERNAL_CHECK(*absl_log_internal_check_ok_goo.second) \ + .InternalStream() +#define ABSL_LOG_INTERNAL_QCHECK_OK(val) \ + for (::std::pair \ + absl_log_internal_check_ok_goo; \ + absl_log_internal_check_ok_goo.first = \ + ::absl::log_internal::AsStatus(val), \ + absl_log_internal_check_ok_goo.second = \ + ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok()) \ + ? nullptr \ + : ::absl::status_internal::MakeCheckFailString( \ + absl_log_internal_check_ok_goo.first, \ + ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(#val " is OK")), \ + !ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok());) \ + ABSL_LOG_INTERNAL_QCHECK(*absl_log_internal_check_ok_goo.second) \ + .InternalStream() + +namespace absl { +ABSL_NAMESPACE_BEGIN + +class Status; +template +class StatusOr; + +namespace status_internal { +std::string* MakeCheckFailString(const absl::Status* status, + const char* prefix); +} // namespace status_internal + +namespace log_internal { + +// Convert a Status or a StatusOr to its underlying status value. +// +// (This implementation does not require a dep on absl::Status to work.) +inline const absl::Status* AsStatus(const absl::Status& s) { return &s; } +template +const absl::Status* AsStatus(const absl::StatusOr& s) { + return &s.status(); +} + +// A helper class for formatting `expr (V1 vs. V2)` in a `CHECK_XX` statement. +// See `MakeCheckOpString` for sample usage. +class CheckOpMessageBuilder final { + public: + // Inserts `exprtext` and ` (` to the stream. + explicit CheckOpMessageBuilder(const char* exprtext); + ~CheckOpMessageBuilder() = default; + // For inserting the first variable. + std::ostream& ForVar1() { return stream_; } + // For inserting the second variable (adds an intermediate ` vs. `). + std::ostream& ForVar2(); + // Get the result (inserts the closing `)`). + std::string* NewString(); + + private: + std::ostringstream stream_; +}; + +// This formats a value for a failing `CHECK_XX` statement. Ordinarily, it uses +// the definition for `operator<<`, with a few special cases below. +template +inline void MakeCheckOpValueString(std::ostream& os, const T& v) { + os << log_internal::NullGuard::Guard(v); +} + +// Overloads for char types provide readable values for unprintable characters. +void MakeCheckOpValueString(std::ostream& os, char v); +void MakeCheckOpValueString(std::ostream& os, signed char v); +void MakeCheckOpValueString(std::ostream& os, unsigned char v); +void MakeCheckOpValueString(std::ostream& os, const void* p); + +namespace detect_specialization { + +// MakeCheckOpString is being specialized for every T and U pair that is being +// passed to the CHECK_op macros. However, there is a lot of redundancy in these +// specializations that creates unnecessary library and binary bloat. +// The number of instantiations tends to be O(n^2) because we have two +// independent inputs. This technique works by reducing `n`. +// +// Most user-defined types being passed to CHECK_op end up being printed as a +// builtin type. For example, enums tend to be implicitly converted to its +// underlying type when calling operator<<, and pointers are printed with the +// `const void*` overload. +// To reduce the number of instantiations we coerce these values before calling +// MakeCheckOpString instead of inside it. +// +// To detect if this coercion is needed, we duplicate all the relevant +// operator<< overloads as specified in the standard, just in a different +// namespace. If the call to `stream << value` becomes ambiguous, it means that +// one of these overloads is the one selected by overload resolution. We then +// do overload resolution again just with our overload set to see which one gets +// selected. That tells us which type to coerce to. +// If the augmented call was not ambiguous, it means that none of these were +// selected and we can't coerce the input. +// +// As a secondary step to reduce code duplication, we promote integral types to +// their 64-bit variant. This does not change the printed value, but reduces the +// number of instantiations even further. Promoting an integer is very cheap at +// the call site. +int64_t operator<<(std::ostream&, short value); // NOLINT +int64_t operator<<(std::ostream&, unsigned short value); // NOLINT +int64_t operator<<(std::ostream&, int value); +int64_t operator<<(std::ostream&, unsigned int value); +int64_t operator<<(std::ostream&, long value); // NOLINT +uint64_t operator<<(std::ostream&, unsigned long value); // NOLINT +int64_t operator<<(std::ostream&, long long value); // NOLINT +uint64_t operator<<(std::ostream&, unsigned long long value); // NOLINT +float operator<<(std::ostream&, float value); +double operator<<(std::ostream&, double value); +long double operator<<(std::ostream&, long double value); +bool operator<<(std::ostream&, bool value); +const void* operator<<(std::ostream&, const void* value); +const void* operator<<(std::ostream&, std::nullptr_t); + +// These `char` overloads are specified like this in the standard, so we have to +// write them exactly the same to ensure the call is ambiguous. +// If we wrote it in a different way (eg taking std::ostream instead of the +// template) then one call might have a higher rank than the other and it would +// not be ambiguous. +template +char operator<<(std::basic_ostream&, char); +template +signed char operator<<(std::basic_ostream&, signed char); +template +unsigned char operator<<(std::basic_ostream&, unsigned char); +template +const char* operator<<(std::basic_ostream&, const char*); +template +const signed char* operator<<(std::basic_ostream&, + const signed char*); +template +const unsigned char* operator<<(std::basic_ostream&, + const unsigned char*); + +// This overload triggers when the call is not ambiguous. +// It means that T is being printed with some overload not on this list. +// We keep the value as `const T&`. +template () + << std::declval())> +const T& Detect(int); + +// This overload triggers when the call is ambiguous. +// It means that T is either one from this list or printed as one from this +// list. Eg an enum that decays to `int` for printing. +// We ask the overload set to give us the type we want to convert it to. +template +decltype(detect_specialization::operator<<(std::declval(), + std::declval())) +Detect(char); + +} // namespace detect_specialization + +template +using CheckOpStreamType = decltype(detect_specialization::Detect(0)); + +// Build the error message string. Specify no inlining for code size. +template +ABSL_ATTRIBUTE_RETURNS_NONNULL std::string* MakeCheckOpString( + T1 v1, T2 v2, const char* exprtext) ABSL_ATTRIBUTE_NOINLINE; + +template +std::string* MakeCheckOpString(T1 v1, T2 v2, const char* exprtext) { + CheckOpMessageBuilder comb(exprtext); + MakeCheckOpValueString(comb.ForVar1(), v1); + MakeCheckOpValueString(comb.ForVar2(), v2); + return comb.NewString(); +} + +// Add a few commonly used instantiations as extern to reduce size of objects +// files. +#define ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(x) \ + extern template std::string* MakeCheckOpString(x, x, const char*) +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(bool); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(int64_t); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(uint64_t); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(float); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(double); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(char); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(unsigned char); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const std::string&); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const absl::string_view&); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const char*); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const signed char*); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const unsigned char*); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void*); +#undef ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN + +// Helper functions for `ABSL_LOG_INTERNAL_CHECK_OP` macro family. The +// `(int, int)` override works around the issue that the compiler will not +// instantiate the template version of the function on values of unnamed enum +// type. +#define ABSL_LOG_INTERNAL_CHECK_OP_IMPL(name, op) \ + template \ + inline constexpr ::std::string* name##Impl(const T1& v1, const T2& v2, \ + const char* exprtext) { \ + using U1 = CheckOpStreamType; \ + using U2 = CheckOpStreamType; \ + return ABSL_PREDICT_TRUE(v1 op v2) \ + ? nullptr \ + : MakeCheckOpString(v1, v2, exprtext); \ + } \ + inline constexpr ::std::string* name##Impl(int v1, int v2, \ + const char* exprtext) { \ + return name##Impl(v1, v2, exprtext); \ + } + +ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_EQ, ==) +ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_NE, !=) +ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_LE, <=) +ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_LT, <) +ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_GE, >=) +ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_GT, >) +#undef ABSL_LOG_INTERNAL_CHECK_OP_IMPL + +std::string* CheckstrcmptrueImpl(const char* s1, const char* s2, + const char* exprtext); +std::string* CheckstrcmpfalseImpl(const char* s1, const char* s2, + const char* exprtext); +std::string* CheckstrcasecmptrueImpl(const char* s1, const char* s2, + const char* exprtext); +std::string* CheckstrcasecmpfalseImpl(const char* s1, const char* s2, + const char* exprtext); + +// `CHECK_EQ` and friends want to pass their arguments by reference, however +// this winds up exposing lots of cases where people have defined and +// initialized static const data members but never declared them (i.e. in a .cc +// file), meaning they are not referenceable. This function avoids that problem +// for integers (the most common cases) by overloading for every primitive +// integer type, even the ones we discourage, and returning them by value. +template +inline constexpr const T& GetReferenceableValue(const T& t) { + return t; +} +inline constexpr char GetReferenceableValue(char t) { return t; } +inline constexpr unsigned char GetReferenceableValue(unsigned char t) { + return t; +} +inline constexpr signed char GetReferenceableValue(signed char t) { return t; } +inline constexpr short GetReferenceableValue(short t) { return t; } // NOLINT +inline constexpr unsigned short GetReferenceableValue( // NOLINT + unsigned short t) { // NOLINT + return t; +} +inline constexpr int GetReferenceableValue(int t) { return t; } +inline unsigned int GetReferenceableValue(unsigned int t) { return t; } +inline constexpr long GetReferenceableValue(long t) { return t; } // NOLINT +inline constexpr unsigned long GetReferenceableValue( // NOLINT + unsigned long t) { // NOLINT + return t; +} +inline constexpr long long GetReferenceableValue(long long t) { // NOLINT + return t; +} +inline constexpr unsigned long long GetReferenceableValue( // NOLINT + unsigned long long t) { // NOLINT + return t; +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_CHECK_OP_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/conditions.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/conditions.cc new file mode 100644 index 0000000000..a9f4966f5d --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/conditions.cc @@ -0,0 +1,83 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/conditions.h" + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/cycleclock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { +namespace { + +// The following code behaves like AtomicStatsCounter::LossyAdd() for +// speed since it is fine to lose occasional updates. +// Returns old value of *counter. +uint32_t LossyIncrement(std::atomic* counter) { + const uint32_t value = counter->load(std::memory_order_relaxed); + counter->store(value + 1, std::memory_order_relaxed); + return value; +} + +} // namespace + +bool LogEveryNState::ShouldLog(int n) { + return n > 0 && (LossyIncrement(&counter_) % static_cast(n)) == 0; +} + +bool LogFirstNState::ShouldLog(int n) { + const uint32_t counter_value = counter_.load(std::memory_order_relaxed); + if (static_cast(counter_value) < n) { + counter_.store(counter_value + 1, std::memory_order_relaxed); + return true; + } + return false; +} + +bool LogEveryPow2State::ShouldLog() { + const uint32_t new_value = LossyIncrement(&counter_) + 1; + return (new_value & (new_value - 1)) == 0; +} + +bool LogEveryNSecState::ShouldLog(double seconds) { + using absl::base_internal::CycleClock; + LossyIncrement(&counter_); + const int64_t now_cycles = CycleClock::Now(); + int64_t next_cycles = next_log_time_cycles_.load(std::memory_order_relaxed); +#if defined(__myriad2__) + // myriad2 does not have 8-byte compare and exchange. Use a racy version that + // is "good enough" but will over-log in the face of concurrent logging. + if (now_cycles > next_cycles) { + next_log_time_cycles_.store(now_cycles + seconds * CycleClock::Frequency(), + std::memory_order_relaxed); + return true; + } + return false; +#else + do { + if (now_cycles <= next_cycles) return false; + } while (!next_log_time_cycles_.compare_exchange_weak( + next_cycles, now_cycles + seconds * CycleClock::Frequency(), + std::memory_order_relaxed, std::memory_order_relaxed)); + return true; +#endif +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/conditions.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/conditions.h new file mode 100644 index 0000000000..b89f1dfd7b --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/conditions.h @@ -0,0 +1,222 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/conditions.h +// ----------------------------------------------------------------------------- +// +// This file contains implementation of conditional log statements, like LOG_IF +// including all the ABSL_LOG_INTERNAL_..._CONDITION_... macros and +// various condition classes like LogEveryNState. + +#ifndef ABSL_LOG_INTERNAL_CONDITIONS_H_ +#define ABSL_LOG_INTERNAL_CONDITIONS_H_ + +#ifdef _WIN32 +#include +#else +#include +#endif +#include + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/log/internal/voidify.h" + +// `ABSL_LOG_INTERNAL_CONDITION` prefixes another macro that expands to a +// temporary `LogMessage` instantiation followed by zero or more streamed +// expressions. This definition is tricky to read correctly. It evaluates to +// either +// +// (void)0; +// +// or +// +// ::absl::log_internal::Voidify() && +// ::absl::log_internal::LogMessage(...) << "the user's message"; +// +// If the condition is evaluable at compile time, as is often the case, it +// compiles away to just one side or the other. +// +// Although this is not used anywhere a statement (e.g. `if`) could not go, +// the ternary expression does a better job avoiding spurious diagnostics +// (dangling else, missing switch case) and preserving noreturn semantics (e.g. +// on `LOG(FATAL)`) without requiring braces. +#define ABSL_LOG_INTERNAL_STATELESS_CONDITION(condition) \ + switch (0) \ + case 0: \ + !(condition) ? (void)0 : ::absl::log_internal::Voidify()&& + +// `ABSL_LOG_INTERNAL_STATEFUL_CONDITION` applies a condition like +// `ABSL_LOG_INTERNAL_CONDITION` but adds to that a series of variable +// declarations, including a local static object which stores the state needed +// to implement the stateful macros like `LOG_EVERY_N`. +// +// `for`-loops are used to declare scoped variables without braces (to permit +// streaming into the macro's expansion) and without the dangling-`else` +// problems/diagnostics that come with `if`. +// +// Two more variables are declared in separate `for`-loops: +// +// * `COUNTER` implements a streamable token whose value when streamed is the +// number of times execution has passed through the macro. +// * A boolean flag is used to prevent any of the `for`-loops from ever actually +// looping. +#define ABSL_LOG_INTERNAL_STATEFUL_CONDITION(condition) \ + for (bool absl_log_internal_stateful_condition_do_log(condition); \ + absl_log_internal_stateful_condition_do_log; \ + absl_log_internal_stateful_condition_do_log = false) \ + ABSL_LOG_INTERNAL_STATEFUL_CONDITION_IMPL +#define ABSL_LOG_INTERNAL_STATEFUL_CONDITION_IMPL(kind, ...) \ + for (static ::absl::log_internal::Log##kind##State \ + absl_log_internal_stateful_condition_state; \ + absl_log_internal_stateful_condition_do_log && \ + absl_log_internal_stateful_condition_state.ShouldLog(__VA_ARGS__); \ + absl_log_internal_stateful_condition_do_log = false) \ + for (const uint32_t COUNTER ABSL_ATTRIBUTE_UNUSED = \ + absl_log_internal_stateful_condition_state.counter(); \ + absl_log_internal_stateful_condition_do_log; \ + absl_log_internal_stateful_condition_do_log = false) + +// `ABSL_LOG_INTERNAL_CONDITION_*` serve to combine any conditions from the +// macro (e.g. `LOG_IF` or `VLOG`) with inherent conditions (e.g. +// `ABSL_MIN_LOG_LEVEL`) into a single boolean expression. We could chain +// ternary operators instead, however some versions of Clang sometimes issue +// spurious diagnostics after such expressions due to a control flow analysis +// bug. +#ifdef ABSL_MIN_LOG_LEVEL +#define ABSL_LOG_INTERNAL_CONDITION_INFO(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + (condition) && ::absl::LogSeverity::kInfo >= \ + static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL)) +#define ABSL_LOG_INTERNAL_CONDITION_WARNING(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + (condition) && ::absl::LogSeverity::kWarning >= \ + static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL)) +#define ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + (condition) && ::absl::LogSeverity::kError >= \ + static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL)) +// NOTE: Use ternary operators instead of short-circuiting to mitigate +// https://bugs.llvm.org/show_bug.cgi?id=51928. +#define ABSL_LOG_INTERNAL_CONDITION_FATAL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + ((condition) \ + ? (::absl::LogSeverity::kFatal >= \ + static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \ + ? true \ + : (::absl::log_internal::AbortQuietly(), false)) \ + : false)) +// NOTE: Use ternary operators instead of short-circuiting to mitigate +// https://bugs.llvm.org/show_bug.cgi?id=51928. +#define ABSL_LOG_INTERNAL_CONDITION_QFATAL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + ((condition) \ + ? (::absl::LogSeverity::kFatal >= \ + static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \ + ? true \ + : (::absl::log_internal::ExitQuietly(), false)) \ + : false)) + +#define ABSL_LOG_INTERNAL_CONDITION_LEVEL(severity) \ + for (int log_internal_severity_loop = 1; log_internal_severity_loop; \ + log_internal_severity_loop = 0) \ + for (const absl::LogSeverity log_internal_severity = \ + ::absl::NormalizeLogSeverity(severity); \ + log_internal_severity_loop; log_internal_severity_loop = 0) \ + ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL +#define ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + (condition) && \ + (log_internal_severity >= \ + static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) || \ + (log_internal_severity == ::absl::LogSeverity::kFatal && \ + (::absl::log_internal::AbortQuietly(), false)))) +#else // ndef ABSL_MIN_LOG_LEVEL +#define ABSL_LOG_INTERNAL_CONDITION_INFO(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION(condition) +#define ABSL_LOG_INTERNAL_CONDITION_WARNING(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION(condition) +#define ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION(condition) +#define ABSL_LOG_INTERNAL_CONDITION_FATAL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION(condition) +#define ABSL_LOG_INTERNAL_CONDITION_QFATAL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION(condition) +#define ABSL_LOG_INTERNAL_CONDITION_LEVEL(severity) \ + for (int log_internal_severity_loop = 1; log_internal_severity_loop; \ + log_internal_severity_loop = 0) \ + for (const absl::LogSeverity log_internal_severity = \ + ::absl::NormalizeLogSeverity(severity); \ + log_internal_severity_loop; log_internal_severity_loop = 0) \ + ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL +#define ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION(condition) +#endif // ndef ABSL_MIN_LOG_LEVEL + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// Stateful condition class name should be "Log" + name + "State". +class LogEveryNState final { + public: + bool ShouldLog(int n); + uint32_t counter() { return counter_.load(std::memory_order_relaxed); } + + private: + std::atomic counter_{0}; +}; + +class LogFirstNState final { + public: + bool ShouldLog(int n); + uint32_t counter() { return counter_.load(std::memory_order_relaxed); } + + private: + std::atomic counter_{0}; +}; + +class LogEveryPow2State final { + public: + bool ShouldLog(); + uint32_t counter() { return counter_.load(std::memory_order_relaxed); } + + private: + std::atomic counter_{0}; +}; + +class LogEveryNSecState final { + public: + bool ShouldLog(double seconds); + uint32_t counter() { return counter_.load(std::memory_order_relaxed); } + + private: + std::atomic counter_{0}; + // Cycle count according to CycleClock that we should next log at. + std::atomic next_log_time_cycles_{0}; +}; + +// Helper routines to abort the application quietly + +ABSL_ATTRIBUTE_NORETURN inline void AbortQuietly() { abort(); } +ABSL_ATTRIBUTE_NORETURN inline void ExitQuietly() { _exit(1); } +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_CONDITIONS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/config.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/config.h new file mode 100644 index 0000000000..379e9ab974 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/config.h @@ -0,0 +1,45 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/config.h +// ----------------------------------------------------------------------------- +// + +#ifndef ABSL_LOG_INTERNAL_CONFIG_H_ +#define ABSL_LOG_INTERNAL_CONFIG_H_ + +#include "absl/base/config.h" + +#ifdef _WIN32 +#include +#else +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +#ifdef _WIN32 +using Tid = uint32_t; +#else +using Tid = pid_t; +#endif + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_CONFIG_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/flags.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/flags.h new file mode 100644 index 0000000000..0c5e81edee --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/flags.h @@ -0,0 +1,53 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/log_flags.h +// ----------------------------------------------------------------------------- +// +// This header declares set of flags which can be used to configure Abseil +// Logging library behaviour at runtime. + +#ifndef ABSL_LOG_INTERNAL_FLAGS_H_ +#define ABSL_LOG_INTERNAL_FLAGS_H_ + +#include + +#include "absl/flags/declare.h" + +// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +// These flags should not be used in C++ code to access logging library +// configuration knobs. Use interfaces defined in absl/log/globals.h +// instead. It is still ok to use these flags on a command line. +// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +// Log messages at this severity or above are sent to stderr in *addition* to +// logfiles. Defaults to `ERROR`. See log_severity.h for numeric values of +// severity levels. +ABSL_DECLARE_FLAG(int, stderrthreshold); + +// Log messages at this severity or above are logged; others are discarded. +// Defaults to `INFO`, i.e. log all severities. See log_severity.h for numeric +// values of severity levels. +ABSL_DECLARE_FLAG(int, minloglevel); + +// If specified in the form file:linenum, any messages logged from a matching +// location will also include a backtrace. +ABSL_DECLARE_FLAG(std::string, log_backtrace_at); + +// If true, the log prefix (severity, date, time, PID, etc.) is prepended to +// each message logged. Defaults to true. +ABSL_DECLARE_FLAG(bool, log_prefix); + +#endif // ABSL_LOG_INTERNAL_FLAGS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/globals.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/globals.cc new file mode 100644 index 0000000000..863b047f33 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/globals.cc @@ -0,0 +1,125 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/globals.h" + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/log_severity.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +namespace { +// Keeps track of whether Logging initialization is finalized. +// Log messages generated before that will go to stderr. +ABSL_CONST_INIT std::atomic logging_initialized(false); + +// The TimeZone used for logging. This may only be set once. +ABSL_CONST_INIT std::atomic timezone_ptr{nullptr}; + +// If true, the logging library will symbolize stack in fatal messages +ABSL_CONST_INIT std::atomic symbolize_stack_trace(true); + +// Specifies maximum number of stack frames to report in fatal messages. +ABSL_CONST_INIT std::atomic max_frames_in_stack_trace(64); + +ABSL_CONST_INIT std::atomic exit_on_dfatal(true); +ABSL_CONST_INIT std::atomic suppress_sigabort_trace(false); +} // namespace + +bool IsInitialized() { + return logging_initialized.load(std::memory_order_acquire); +} + +void SetInitialized() { + logging_initialized.store(true, std::memory_order_release); +} + +void WriteToStderr(absl::string_view message, absl::LogSeverity severity) { + // Avoid using std::cerr from this module since we may get called during + // exit code, and cerr may be partially or fully destroyed by then. + std::fwrite(message.data(), message.size(), 1, stderr); + +#if defined(_WIN64) || defined(_WIN32) || defined(_WIN16) + // C99 requires stderr to not be fully-buffered by default (7.19.3.7), but + // MS CRT buffers it anyway, so we must `fflush` to ensure the string hits + // the console/file before the program dies (and takes the libc buffers + // with it). + // https://docs.microsoft.com/en-us/cpp/c-runtime-library/stream-i-o + if (severity >= absl::LogSeverity::kWarning) { + std::fflush(stderr); + } +#else + // Avoid unused parameter warning in this branch. + (void)severity; +#endif +} + +void SetTimeZone(absl::TimeZone tz) { + absl::TimeZone* expected = nullptr; + absl::TimeZone* new_tz = new absl::TimeZone(tz); + // timezone_ptr can only be set once, otherwise new_tz is leaked. + if (!timezone_ptr.compare_exchange_strong(expected, new_tz, + std::memory_order_release, + std::memory_order_relaxed)) { + ABSL_RAW_LOG(FATAL, + "absl::log_internal::SetTimeZone() has already been called"); + } +} + +const absl::TimeZone* TimeZone() { + return timezone_ptr.load(std::memory_order_acquire); +} + +bool ShouldSymbolizeLogStackTrace() { + return symbolize_stack_trace.load(std::memory_order_acquire); +} + +void EnableSymbolizeLogStackTrace(bool on_off) { + symbolize_stack_trace.store(on_off, std::memory_order_release); +} + +int MaxFramesInLogStackTrace() { + return max_frames_in_stack_trace.load(std::memory_order_acquire); +} + +void SetMaxFramesInLogStackTrace(int max_num_frames) { + max_frames_in_stack_trace.store(max_num_frames, std::memory_order_release); +} + +bool ExitOnDFatal() { return exit_on_dfatal.load(std::memory_order_acquire); } + +void SetExitOnDFatal(bool on_off) { + exit_on_dfatal.store(on_off, std::memory_order_release); +} + +bool SuppressSigabortTrace() { + return suppress_sigabort_trace.load(std::memory_order_acquire); +} + +bool SetSuppressSigabortTrace(bool on_off) { + return suppress_sigabort_trace.exchange(on_off); +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/globals.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/globals.h new file mode 100644 index 0000000000..27bc0d0984 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/globals.h @@ -0,0 +1,101 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/globals.h +// ----------------------------------------------------------------------------- +// +// This header file contains various global objects and static helper routines +// use in logging implementation. + +#ifndef ABSL_LOG_INTERNAL_GLOBALS_H_ +#define ABSL_LOG_INTERNAL_GLOBALS_H_ + +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// IsInitialized returns true if the logging library is initialized. +// This function is async-signal-safe +bool IsInitialized(); + +// SetLoggingInitialized is called once after logging initialization is done. +void SetInitialized(); + +// Unconditionally write a `message` to stderr. If `severity` exceeds kInfo +// we also flush the stderr stream. +void WriteToStderr(absl::string_view message, absl::LogSeverity severity); + +// Set the TimeZone used for human-friendly times (for example, the log message +// prefix) printed by the logging library. This may only be called once. +void SetTimeZone(absl::TimeZone tz); + +// Returns the TimeZone used for human-friendly times (for example, the log +// message prefix) printed by the logging library Returns nullptr prior to +// initialization. +const absl::TimeZone* TimeZone(); + +// Returns true if stack traces emitted by the logging library should be +// symbolized. This function is async-signal-safe. +bool ShouldSymbolizeLogStackTrace(); + +// Enables or disables symbolization of stack traces emitted by the +// logging library. This function is async-signal-safe. +void EnableSymbolizeLogStackTrace(bool on_off); + +// Returns the maximum number of frames that appear in stack traces +// emitted by the logging library. This function is async-signal-safe. +int MaxFramesInLogStackTrace(); + +// Sets the maximum number of frames that appear in stack traces emitted by +// the logging library. This function is async-signal-safe. +void SetMaxFramesInLogStackTrace(int max_num_frames); + +// Determines whether we exit the program for a LOG(DFATAL) message in +// debug mode. It does this by skipping the call to Fail/FailQuietly. +// This is intended for testing only. +// +// This can have some effects on LOG(FATAL) as well. Failure messages +// are always allocated (rather than sharing a buffer), the crash +// reason is not recorded, the "gwq" status message is not updated, +// and the stack trace is not recorded. The LOG(FATAL) *will* still +// exit the program. Since this function is used only in testing, +// these differences are acceptable. +// +// Additionally, LOG(LEVEL(FATAL)) is indistinguishable from LOG(DFATAL) and +// will not terminate the program if SetExitOnDFatal(false) has been called. +bool ExitOnDFatal(); + +// SetExitOnDFatal() sets the ExitOnDFatal() status +void SetExitOnDFatal(bool on_off); + +// Determines if the logging library should suppress logging of stacktraces in +// the `SIGABRT` handler, typically because we just logged a stacktrace as part +// of `LOG(FATAL)` and are about to send ourselves a `SIGABRT` to end the +// program. +bool SuppressSigabortTrace(); + +// Sets the SuppressSigabortTrace() status and returns the previous state. +bool SetSuppressSigabortTrace(bool on_off); + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_GLOBALS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_format.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_format.cc new file mode 100644 index 0000000000..b8cd5ac40e --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_format.cc @@ -0,0 +1,203 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/log_format.h" + +#include + +#ifdef _MSC_VER +#include // For timeval +#else +#include +#endif + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/base/optimization.h" +#include "absl/log/internal/append_truncated.h" +#include "absl/log/internal/config.h" +#include "absl/log/internal/globals.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" +#include "absl/time/civil_time.h" +#include "absl/time/time.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { +namespace { + +// This templated function avoids compiler warnings about tautological +// comparisons when log_internal::Tid is unsigned. It can be replaced with a +// constexpr if once the minimum C++ version Abseil suppports is C++17. +template +inline std::enable_if_t::value> +PutLeadingWhitespace(T tid, char*& p) { + if (tid < 10) *p++ = ' '; + if (tid < 100) *p++ = ' '; + if (tid < 1000) *p++ = ' '; + if (tid < 10000) *p++ = ' '; + if (tid < 100000) *p++ = ' '; + if (tid < 1000000) *p++ = ' '; +} + +template +inline std::enable_if_t::value> +PutLeadingWhitespace(T tid, char*& p) { + if (tid >= 0 && tid < 10) *p++ = ' '; + if (tid > -10 && tid < 100) *p++ = ' '; + if (tid > -100 && tid < 1000) *p++ = ' '; + if (tid > -1000 && tid < 10000) *p++ = ' '; + if (tid > -10000 && tid < 100000) *p++ = ' '; + if (tid > -100000 && tid < 1000000) *p++ = ' '; +} + +// The fields before the filename are all fixed-width except for the thread ID, +// which is of bounded width. +size_t FormatBoundedFields(absl::LogSeverity severity, absl::Time timestamp, + log_internal::Tid tid, absl::Span& buf) { + constexpr size_t kBoundedFieldsMaxLen = + sizeof("SMMDD HH:MM:SS.NNNNNN ") + + (1 + std::numeric_limits::digits10 + 1) - sizeof(""); + if (ABSL_PREDICT_FALSE(buf.size() < kBoundedFieldsMaxLen)) { + // We don't bother trying to truncate these fields if the buffer is too + // short (or almost too short) because it would require doing a lot more + // length checking (slow) and it should never happen. A 15kB buffer should + // be enough for anyone. Instead we mark `buf` full without writing + // anything. + buf.remove_suffix(buf.size()); + return 0; + } + + // We can't call absl::LocalTime(), localtime_r(), or anything else here that + // isn't async-signal-safe. We can only use the time zone if it has already + // been loaded. + const absl::TimeZone* tz = absl::log_internal::TimeZone(); + if (ABSL_PREDICT_FALSE(tz == nullptr)) { + // If a time zone hasn't been set yet because we are logging before the + // logging library has been initialized, we fallback to a simpler, slower + // method. Just report the raw Unix time in seconds. We cram this into the + // normal time format for the benefit of parsers. + auto tv = absl::ToTimeval(timestamp); + int snprintf_result = absl::SNPrintF( + buf.data(), buf.size(), "%c0000 00:00:%02d.%06d %7d ", + absl::LogSeverityName(severity)[0], static_cast(tv.tv_sec), + static_cast(tv.tv_usec), static_cast(tid)); + if (snprintf_result >= 0) { + buf.remove_prefix(static_cast(snprintf_result)); + return static_cast(snprintf_result); + } + return 0; + } + + char* p = buf.data(); + *p++ = absl::LogSeverityName(severity)[0]; + const absl::TimeZone::CivilInfo ci = tz->At(timestamp); + absl::numbers_internal::PutTwoDigits(static_cast(ci.cs.month()), p); + p += 2; + absl::numbers_internal::PutTwoDigits(static_cast(ci.cs.day()), p); + p += 2; + *p++ = ' '; + absl::numbers_internal::PutTwoDigits(static_cast(ci.cs.hour()), p); + p += 2; + *p++ = ':'; + absl::numbers_internal::PutTwoDigits(static_cast(ci.cs.minute()), p); + p += 2; + *p++ = ':'; + absl::numbers_internal::PutTwoDigits(static_cast(ci.cs.second()), p); + p += 2; + *p++ = '.'; + const int64_t usecs = absl::ToInt64Microseconds(ci.subsecond); + absl::numbers_internal::PutTwoDigits(static_cast(usecs / 10000), p); + p += 2; + absl::numbers_internal::PutTwoDigits(static_cast(usecs / 100 % 100), + p); + p += 2; + absl::numbers_internal::PutTwoDigits(static_cast(usecs % 100), p); + p += 2; + *p++ = ' '; + PutLeadingWhitespace(tid, p); + p = absl::numbers_internal::FastIntToBuffer(tid, p); + *p++ = ' '; + const size_t bytes_formatted = static_cast(p - buf.data()); + buf.remove_prefix(bytes_formatted); + return bytes_formatted; +} + +size_t FormatLineNumber(int line, absl::Span& buf) { + constexpr size_t kLineFieldMaxLen = + sizeof(":] ") + (1 + std::numeric_limits::digits10 + 1) - sizeof(""); + if (ABSL_PREDICT_FALSE(buf.size() < kLineFieldMaxLen)) { + // As above, we don't bother trying to truncate this if the buffer is too + // short and it should never happen. + buf.remove_suffix(buf.size()); + return 0; + } + char* p = buf.data(); + *p++ = ':'; + p = absl::numbers_internal::FastIntToBuffer(line, p); + *p++ = ']'; + *p++ = ' '; + const size_t bytes_formatted = static_cast(p - buf.data()); + buf.remove_prefix(bytes_formatted); + return bytes_formatted; +} + +} // namespace + +std::string FormatLogMessage(absl::LogSeverity severity, + absl::CivilSecond civil_second, + absl::Duration subsecond, log_internal::Tid tid, + absl::string_view basename, int line, + PrefixFormat format, absl::string_view message) { + return absl::StrFormat( + "%c%02d%02d %02d:%02d:%02d.%06d %7d %s:%d] %s%s", + absl::LogSeverityName(severity)[0], civil_second.month(), + civil_second.day(), civil_second.hour(), civil_second.minute(), + civil_second.second(), absl::ToInt64Microseconds(subsecond), tid, + basename, line, format == PrefixFormat::kRaw ? "RAW: " : "", message); +} + +// This method is fairly hot, and the library always passes a huge `buf`, so we +// save some bounds-checking cycles by not trying to do precise truncation. +// Truncating at a field boundary is probably a better UX anyway. +// +// The prefix is written in three parts, each of which does a single +// bounds-check and truncation: +// 1. severity, timestamp, and thread ID +// 2. filename +// 3. line number and bracket +size_t FormatLogPrefix(absl::LogSeverity severity, absl::Time timestamp, + log_internal::Tid tid, absl::string_view basename, + int line, PrefixFormat format, absl::Span& buf) { + auto prefix_size = FormatBoundedFields(severity, timestamp, tid, buf); + prefix_size += log_internal::AppendTruncated(basename, buf); + prefix_size += FormatLineNumber(line, buf); + if (format == PrefixFormat::kRaw) + prefix_size += log_internal::AppendTruncated("RAW: ", buf); + return prefix_size; +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_format.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_format.h new file mode 100644 index 0000000000..95a45edf61 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_format.h @@ -0,0 +1,78 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/log_format.h +// ----------------------------------------------------------------------------- +// +// This file declares routines implementing formatting of log message and log +// prefix. + +#ifndef ABSL_LOG_INTERNAL_LOG_FORMAT_H_ +#define ABSL_LOG_INTERNAL_LOG_FORMAT_H_ + +#include + +#include + +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/internal/config.h" +#include "absl/strings/string_view.h" +#include "absl/time/civil_time.h" +#include "absl/time/time.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +enum class PrefixFormat { + kNotRaw, + kRaw, +}; + +// Formats log message based on provided data. +std::string FormatLogMessage(absl::LogSeverity severity, + absl::CivilSecond civil_second, + absl::Duration subsecond, log_internal::Tid tid, + absl::string_view basename, int line, + PrefixFormat format, absl::string_view message); + +// Formats various entry metadata into a text string meant for use as a +// prefix on a log message string. Writes into `buf`, advances `buf` to point +// at the remainder of the buffer (i.e. past any written bytes), and returns the +// number of bytes written. +// +// In addition to calling `buf->remove_prefix()` (or the equivalent), this +// function may also do `buf->remove_suffix(buf->size())` in cases where no more +// bytes (i.e. no message data) should be written into the buffer. For example, +// if the prefix ought to be: +// I0926 09:00:00.000000 1234567 foo.cc:123] +// `buf` is too small, the function might fill the whole buffer: +// I0926 09:00:00.000000 1234 +// (note the apparrently incorrect thread ID), or it might write less: +// I0926 09:00:00.000000 +// In this case, it might also empty `buf` prior to returning to prevent +// message data from being written into the space where a reader would expect to +// see a thread ID. +size_t FormatLogPrefix(absl::LogSeverity severity, absl::Time timestamp, + log_internal::Tid tid, absl::string_view basename, + int line, PrefixFormat format, absl::Span& buf); + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_LOG_FORMAT_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_message.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_message.cc new file mode 100644 index 0000000000..b73135516d --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_message.cc @@ -0,0 +1,583 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/log_message.h" + +#include +#include +#include +#include + +#ifndef _WIN32 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/strerror.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/base/log_severity.h" +#include "absl/container/inlined_vector.h" +#include "absl/debugging/internal/examine_stack.h" +#include "absl/log/globals.h" +#include "absl/log/internal/append_truncated.h" +#include "absl/log/internal/globals.h" +#include "absl/log/internal/log_format.h" +#include "absl/log/internal/log_sink_set.h" +#include "absl/log/internal/proto.h" +#include "absl/log/log_entry.h" +#include "absl/log/log_sink.h" +#include "absl/log/log_sink_registry.h" +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" +#include "absl/types/span.h" + +extern "C" ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL( + AbslInternalOnFatalLogMessage)(const absl::LogEntry&) { + // Default - Do nothing +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +namespace { +// message `logging.proto.Event` +enum EventTag : uint8_t { + kValue = 7, +}; + +// message `logging.proto.Value` +enum ValueTag : uint8_t { + kString = 1, + kStringLiteral = 6, +}; + +// Decodes a `logging.proto.Value` from `buf` and writes a string representation +// into `dst`. The string representation will be truncated if `dst` is not +// large enough to hold it. Returns false if `dst` has size zero or one (i.e. +// sufficient only for a nul-terminator) and no decoded data could be written. +// This function may or may not write a nul-terminator into `dst`, and it may or +// may not truncate the data it writes in order to do make space for that nul +// terminator. In any case, `dst` will be advanced to point at the byte where +// subsequent writes should begin. +bool PrintValue(absl::Span& dst, absl::Span buf) { + if (dst.size() <= 1) return false; + ProtoField field; + while (field.DecodeFrom(&buf)) { + switch (field.tag()) { + case ValueTag::kString: + case ValueTag::kStringLiteral: + if (field.type() == WireType::kLengthDelimited) + if (log_internal::AppendTruncated(field.string_value(), dst) < + field.string_value().size()) + return false; + } + } + return true; +} + +absl::string_view Basename(absl::string_view filepath) { +#ifdef _WIN32 + size_t path = filepath.find_last_of("/\\"); +#else + size_t path = filepath.find_last_of('/'); +#endif + if (path != filepath.npos) filepath.remove_prefix(path + 1); + return filepath; +} + +void WriteToString(const char* data, void* str) { + reinterpret_cast(str)->append(data); +} +void WriteToStream(const char* data, void* os) { + auto* cast_os = static_cast(os); + *cast_os << data; +} +} // namespace + +struct LogMessage::LogMessageData final { + LogMessageData(const char* file, int line, absl::LogSeverity severity, + absl::Time timestamp); + LogMessageData(const LogMessageData&) = delete; + LogMessageData& operator=(const LogMessageData&) = delete; + + // `LogEntry` sent to `LogSink`s; contains metadata. + absl::LogEntry entry; + + // true => this was first fatal msg + bool first_fatal; + // true => all failures should be quiet + bool fail_quietly; + // true => PLOG was requested + bool is_perror; + + // Extra `LogSink`s to log to, in addition to `global_sinks`. + absl::InlinedVector extra_sinks; + // If true, log to `extra_sinks` but not to `global_sinks` or hardcoded + // non-sink targets (e.g. stderr, log files). + bool extra_sinks_only; + + std::ostream manipulated; // ostream with IO manipulators applied + + // A `logging.proto.Event` proto message is built into `encoded_buf`. + std::array encoded_buf; + // `encoded_remaining` is the suffix of `encoded_buf` that has not been filled + // yet. If a datum to be encoded does not fit into `encoded_remaining` and + // cannot be truncated to fit, the size of `encoded_remaining` will be zeroed + // to prevent encoding of any further data. Note that in this case its data() + // pointer will not point past the end of `encoded_buf`. + absl::Span encoded_remaining; + + // A formatted string message is built in `string_buf`. + std::array string_buf; + + void FinalizeEncodingAndFormat(); +}; + +LogMessage::LogMessageData::LogMessageData(const char* file, int line, + absl::LogSeverity severity, + absl::Time timestamp) + : extra_sinks_only(false), + manipulated(nullptr), + encoded_remaining(encoded_buf) { + // Legacy defaults for LOG's ostream: + manipulated.setf(std::ios_base::showbase | std::ios_base::boolalpha); + entry.full_filename_ = file; + entry.base_filename_ = Basename(file); + entry.line_ = line; + entry.prefix_ = absl::ShouldPrependLogPrefix(); + entry.severity_ = absl::NormalizeLogSeverity(severity); + entry.verbose_level_ = absl::LogEntry::kNoVerbosityLevel; + entry.timestamp_ = timestamp; + entry.tid_ = absl::base_internal::GetCachedTID(); +} + +void LogMessage::LogMessageData::FinalizeEncodingAndFormat() { + // Note that `encoded_remaining` may have zero size without pointing past the + // end of `encoded_buf`, so the difference between `data()` pointers is used + // to compute the size of `encoded_data`. + absl::Span encoded_data( + encoded_buf.data(), + static_cast(encoded_remaining.data() - encoded_buf.data())); + // `string_remaining` is the suffix of `string_buf` that has not been filled + // yet. + absl::Span string_remaining(string_buf); + // We may need to write a newline and nul-terminator at the end of the decoded + // string data. Rather than worry about whether those should overwrite the + // end of the string (if the buffer is full) or be appended, we avoid writing + // into the last two bytes so we always have space to append. + string_remaining.remove_suffix(2); + entry.prefix_len_ = + entry.prefix() ? log_internal::FormatLogPrefix( + entry.log_severity(), entry.timestamp(), entry.tid(), + entry.source_basename(), entry.source_line(), + log_internal::ThreadIsLoggingToLogSink() + ? PrefixFormat::kRaw + : PrefixFormat::kNotRaw, + string_remaining) + : 0; + // Decode data from `encoded_buf` until we run out of data or we run out of + // `string_remaining`. + ProtoField field; + while (field.DecodeFrom(&encoded_data)) { + switch (field.tag()) { + case EventTag::kValue: + if (field.type() != WireType::kLengthDelimited) continue; + if (PrintValue(string_remaining, field.bytes_value())) continue; + break; + } + break; + } + auto chars_written = + static_cast(string_remaining.data() - string_buf.data()); + string_buf[chars_written++] = '\n'; + string_buf[chars_written++] = '\0'; + entry.text_message_with_prefix_and_newline_and_nul_ = + absl::MakeSpan(string_buf).subspan(0, chars_written); +} + +LogMessage::LogMessage(const char* file, int line, absl::LogSeverity severity) + : data_(absl::make_unique(file, line, severity, + absl::Now())) { + data_->first_fatal = false; + data_->is_perror = false; + data_->fail_quietly = false; + + // This logs a backtrace even if the location is subsequently changed using + // AtLocation. This quirk, and the behavior when AtLocation is called twice, + // are fixable but probably not worth fixing. + LogBacktraceIfNeeded(); +} + +LogMessage::~LogMessage() { +#ifdef ABSL_MIN_LOG_LEVEL + if (data_->entry.log_severity() < + static_cast(ABSL_MIN_LOG_LEVEL) && + data_->entry.log_severity() < absl::LogSeverity::kFatal) { + return; + } +#endif + Flush(); +} + +LogMessage& LogMessage::AtLocation(absl::string_view file, int line) { + data_->entry.full_filename_ = file; + data_->entry.base_filename_ = Basename(file); + data_->entry.line_ = line; + LogBacktraceIfNeeded(); + return *this; +} + +LogMessage& LogMessage::NoPrefix() { + data_->entry.prefix_ = false; + return *this; +} + +LogMessage& LogMessage::WithVerbosity(int verbose_level) { + if (verbose_level == absl::LogEntry::kNoVerbosityLevel) { + data_->entry.verbose_level_ = absl::LogEntry::kNoVerbosityLevel; + } else { + data_->entry.verbose_level_ = std::max(0, verbose_level); + } + return *this; +} + +LogMessage& LogMessage::WithTimestamp(absl::Time timestamp) { + data_->entry.timestamp_ = timestamp; + return *this; +} + +LogMessage& LogMessage::WithThreadID(absl::LogEntry::tid_t tid) { + data_->entry.tid_ = tid; + return *this; +} + +LogMessage& LogMessage::WithMetadataFrom(const absl::LogEntry& entry) { + data_->entry.full_filename_ = entry.full_filename_; + data_->entry.base_filename_ = entry.base_filename_; + data_->entry.line_ = entry.line_; + data_->entry.prefix_ = entry.prefix_; + data_->entry.severity_ = entry.severity_; + data_->entry.verbose_level_ = entry.verbose_level_; + data_->entry.timestamp_ = entry.timestamp_; + data_->entry.tid_ = entry.tid_; + return *this; +} + +LogMessage& LogMessage::WithPerror() { + data_->is_perror = true; + return *this; +} + +LogMessage& LogMessage::ToSinkAlso(absl::LogSink* sink) { + ABSL_INTERNAL_CHECK(sink, "null LogSink*"); + data_->extra_sinks.push_back(sink); + return *this; +} + +LogMessage& LogMessage::ToSinkOnly(absl::LogSink* sink) { + ABSL_INTERNAL_CHECK(sink, "null LogSink*"); + data_->extra_sinks.clear(); + data_->extra_sinks.push_back(sink); + data_->extra_sinks_only = true; + return *this; +} + +#ifdef __ELF__ +extern "C" void __gcov_dump() ABSL_ATTRIBUTE_WEAK; +extern "C" void __gcov_flush() ABSL_ATTRIBUTE_WEAK; +#endif + +void LogMessage::FailWithoutStackTrace() { + // Now suppress repeated trace logging: + log_internal::SetSuppressSigabortTrace(true); +#if defined _DEBUG && defined COMPILER_MSVC + // When debugging on windows, avoid the obnoxious dialog. + __debugbreak(); +#endif + +#ifdef __ELF__ + // For b/8737634, flush coverage if we are in coverage mode. + if (&__gcov_dump != nullptr) { + __gcov_dump(); + } else if (&__gcov_flush != nullptr) { + __gcov_flush(); + } +#endif + + abort(); +} + +void LogMessage::FailQuietly() { + // _exit. Calling abort() would trigger all sorts of death signal handlers + // and a detailed stack trace. Calling exit() would trigger the onexit + // handlers, including the heap-leak checker, which is guaranteed to fail in + // this case: we probably just new'ed the std::string that we logged. + // Anyway, if you're calling Fail or FailQuietly, you're trying to bail out + // of the program quickly, and it doesn't make much sense for FailQuietly to + // offer different guarantees about exit behavior than Fail does. (And as a + // consequence for QCHECK and CHECK to offer different exit behaviors) + _exit(1); +} + +LogMessage& LogMessage::operator<<(const std::string& v) { + return LogString(false, v); +} + +LogMessage& LogMessage::operator<<(absl::string_view v) { + return LogString(false, v); +} +LogMessage& LogMessage::operator<<(std::ostream& (*m)(std::ostream& os)) { + OstreamView view(*data_); + data_->manipulated << m; + return *this; +} +LogMessage& LogMessage::operator<<(std::ios_base& (*m)(std::ios_base& os)) { + OstreamView view(*data_); + data_->manipulated << m; + return *this; +} +template LogMessage& LogMessage::operator<<(const char& v); +template LogMessage& LogMessage::operator<<(const signed char& v); +template LogMessage& LogMessage::operator<<(const unsigned char& v); +template LogMessage& LogMessage::operator<<(const short& v); // NOLINT +template LogMessage& LogMessage::operator<<(const unsigned short& v); // NOLINT +template LogMessage& LogMessage::operator<<(const int& v); +template LogMessage& LogMessage::operator<<(const unsigned int& v); +template LogMessage& LogMessage::operator<<(const long& v); // NOLINT +template LogMessage& LogMessage::operator<<(const unsigned long& v); // NOLINT +template LogMessage& LogMessage::operator<<(const long long& v); // NOLINT +template LogMessage& LogMessage::operator<<( + const unsigned long long& v); // NOLINT +template LogMessage& LogMessage::operator<<(void* const& v); +template LogMessage& LogMessage::operator<<(const void* const& v); +template LogMessage& LogMessage::operator<<(const float& v); +template LogMessage& LogMessage::operator<<(const double& v); +template LogMessage& LogMessage::operator<<(const bool& v); + +void LogMessage::Flush() { + if (data_->entry.log_severity() < absl::MinLogLevel()) + return; + + if (data_->is_perror) { + InternalStream() << ": " << absl::base_internal::StrError(errno_saver_()) + << " [" << errno_saver_() << "]"; + } + + // Have we already seen a fatal message? + ABSL_CONST_INIT static std::atomic seen_fatal(false); + if (data_->entry.log_severity() == absl::LogSeverity::kFatal && + absl::log_internal::ExitOnDFatal()) { + // Exactly one LOG(FATAL) message is responsible for aborting the process, + // even if multiple threads LOG(FATAL) concurrently. + bool expected_seen_fatal = false; + if (seen_fatal.compare_exchange_strong(expected_seen_fatal, true, + std::memory_order_relaxed)) { + data_->first_fatal = true; + } + } + + data_->FinalizeEncodingAndFormat(); + data_->entry.encoding_ = + absl::string_view(data_->encoded_buf.data(), + static_cast(data_->encoded_remaining.data() - + data_->encoded_buf.data())); + SendToLog(); +} + +void LogMessage::SetFailQuietly() { data_->fail_quietly = true; } + +LogMessage::OstreamView::OstreamView(LogMessageData& message_data) + : data_(message_data), encoded_remaining_copy_(data_.encoded_remaining) { + // This constructor sets the `streambuf` up so that streaming into an attached + // ostream encodes string data in-place. To do that, we write appropriate + // headers into the buffer using a copy of the buffer view so that we can + // decide not to keep them later if nothing is ever streamed in. We don't + // know how much data we'll get, but we can use the size of the remaining + // buffer as an upper bound and fill in the right size once we know it. + message_start_ = + EncodeMessageStart(EventTag::kValue, encoded_remaining_copy_.size(), + &encoded_remaining_copy_); + string_start_ = + EncodeMessageStart(ValueTag::kString, encoded_remaining_copy_.size(), + &encoded_remaining_copy_); + setp(encoded_remaining_copy_.data(), + encoded_remaining_copy_.data() + encoded_remaining_copy_.size()); + data_.manipulated.rdbuf(this); +} + +LogMessage::OstreamView::~OstreamView() { + const absl::Span contents(pbase(), + static_cast(pptr() - pbase())); + encoded_remaining_copy_.remove_prefix(contents.size()); + if (!string_start_.data()) { + // The headers didn't fit; we won't write anything to the buffer, but we + // also need to zero the size of `data_->encoded_remaining` so that no more + // data is encoded. + data_.encoded_remaining.remove_suffix(data_.encoded_remaining.size()); + } else if (!contents.empty()) { + EncodeMessageLength(string_start_, &encoded_remaining_copy_); + EncodeMessageLength(message_start_, &encoded_remaining_copy_); + data_.encoded_remaining = encoded_remaining_copy_; + } + data_.manipulated.rdbuf(nullptr); +} + +std::ostream& LogMessage::OstreamView::stream() { return data_.manipulated; } + +bool LogMessage::IsFatal() const { + return data_->entry.log_severity() == absl::LogSeverity::kFatal && + absl::log_internal::ExitOnDFatal(); +} + +void LogMessage::PrepareToDie() { + // If we log a FATAL message, flush all the log destinations, then toss + // a signal for others to catch. We leave the logs in a state that + // someone else can use them (as long as they flush afterwards) + if (data_->first_fatal) { + // Notify observers about the upcoming fatal error. + ABSL_INTERNAL_C_SYMBOL(AbslInternalOnFatalLogMessage)(data_->entry); + } + + if (!data_->fail_quietly) { + // Log the message first before we start collecting stack trace. + log_internal::LogToSinks(data_->entry, absl::MakeSpan(data_->extra_sinks), + data_->extra_sinks_only); + + // `DumpStackTrace` generates an empty string under MSVC. + // Adding the constant prefix here simplifies testing. + data_->entry.stacktrace_ = "*** Check failure stack trace: ***\n"; + debugging_internal::DumpStackTrace( + 0, log_internal::MaxFramesInLogStackTrace(), + log_internal::ShouldSymbolizeLogStackTrace(), WriteToString, + &data_->entry.stacktrace_); + } +} + +void LogMessage::Die() { + absl::FlushLogSinks(); + + if (data_->fail_quietly) { + FailQuietly(); + } else { + FailWithoutStackTrace(); + } +} + +void LogMessage::SendToLog() { + if (IsFatal()) PrepareToDie(); + // Also log to all registered sinks, even if OnlyLogToStderr() is set. + log_internal::LogToSinks(data_->entry, absl::MakeSpan(data_->extra_sinks), + data_->extra_sinks_only); + if (IsFatal()) Die(); +} + +void LogMessage::LogBacktraceIfNeeded() { + if (!absl::log_internal::IsInitialized()) return; + + if (!absl::log_internal::ShouldLogBacktraceAt(data_->entry.source_basename(), + data_->entry.source_line())) + return; + OstreamView view(*data_); + view.stream() << " (stacktrace:\n"; + debugging_internal::DumpStackTrace( + 1, log_internal::MaxFramesInLogStackTrace(), + log_internal::ShouldSymbolizeLogStackTrace(), WriteToStream, + &view.stream()); + view.stream() << ") "; +} + +// Encodes a partial `logging.proto.Event` containing the specified string data +// into `data_->encoded_remaining`. +LogMessage& LogMessage::LogString(bool literal, absl::string_view str) { + // Don't commit the MessageStart if the String tag_type and length don't fit. + auto encoded_remaining_copy = data_->encoded_remaining; + auto start = EncodeMessageStart( + EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + str.size(), + &encoded_remaining_copy); + if (EncodeStringTruncate( + literal ? ValueTag::kStringLiteral : ValueTag::kString, str, + &encoded_remaining_copy)) { + EncodeMessageLength(start, &encoded_remaining_copy); + data_->encoded_remaining = encoded_remaining_copy; + } + return *this; +} + +LogMessageFatal::LogMessageFatal(const char* file, int line) + : LogMessage(file, line, absl::LogSeverity::kFatal) {} + +LogMessageFatal::LogMessageFatal(const char* file, int line, + absl::string_view failure_msg) + : LogMessage(file, line, absl::LogSeverity::kFatal) { + *this << "Check failed: " << failure_msg << " "; +} + +// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so +// disable msvc's warning about the d'tor never returning. +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable : 4722) +#endif +LogMessageFatal::~LogMessageFatal() { + Flush(); + FailWithoutStackTrace(); +} +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif + +LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line) + : LogMessage(file, line, absl::LogSeverity::kFatal) { + SetFailQuietly(); +} + +LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line, + absl::string_view failure_msg) + : LogMessage(file, line, absl::LogSeverity::kFatal) { + SetFailQuietly(); + *this << "Check failed: " << failure_msg << " "; +} + +// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so +// disable msvc's warning about the d'tor never returning. +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable : 4722) +#endif +LogMessageQuietlyFatal::~LogMessageQuietlyFatal() { + Flush(); + FailQuietly(); +} +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif + +} // namespace log_internal + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_message.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_message.h new file mode 100644 index 0000000000..5914a535f3 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_message.h @@ -0,0 +1,345 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/log_message.h +// ----------------------------------------------------------------------------- +// +// This file declares `class absl::log_internal::LogMessage`. This class more or +// less represents a particular log message. LOG/CHECK macros create a +// temporary instance of `LogMessage` and then stream values to it. At the end +// of the LOG/CHECK statement, LogMessage instance goes out of scope and +// `~LogMessage` directs the message to the registered log sinks. +// Heap-allocation of `LogMessage` is unsupported. Construction outside of a +// `LOG` macro is unsupported. + +#ifndef ABSL_LOG_INTERNAL_LOG_MESSAGE_H_ +#define ABSL_LOG_INTERNAL_LOG_MESSAGE_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/errno_saver.h" +#include "absl/base/log_severity.h" +#include "absl/log/internal/nullguard.h" +#include "absl/log/log_entry.h" +#include "absl/log/log_sink.h" +#include "absl/strings/internal/has_absl_stringify.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { +class AsLiteralImpl; + +constexpr int kLogMessageBufferSize = 15000; + +class LogMessage { + public: + // Used for `LOG`. + LogMessage(const char* file, int line, + absl::LogSeverity severity) ABSL_ATTRIBUTE_COLD; + LogMessage(const LogMessage&) = delete; + LogMessage& operator=(const LogMessage&) = delete; + ~LogMessage() ABSL_ATTRIBUTE_COLD; + + // Overrides the location inferred from the callsite. The string pointed to + // by `file` must be valid until the end of the statement. + LogMessage& AtLocation(absl::string_view file, int line); + // Omits the prefix from this line. The prefix includes metadata about the + // logged data such as source code location and timestamp. + LogMessage& NoPrefix(); + // Sets the verbosity field of the logged message as if it was logged by + // `VLOG(verbose_level)`. Unlike `VLOG`, this method does not affect + // evaluation of the statement when the specified `verbose_level` has been + // disabled. The only effect is on `absl::LogSink` implementations which + // make use of the `absl::LogSink::verbosity()` value. The value + // `absl::LogEntry::kNoVerbosityLevel` can be specified to mark the message + // not verbose. + LogMessage& WithVerbosity(int verbose_level); + // Uses the specified timestamp instead of one collected in the constructor. + LogMessage& WithTimestamp(absl::Time timestamp); + // Uses the specified thread ID instead of one collected in the constructor. + LogMessage& WithThreadID(absl::LogEntry::tid_t tid); + // Copies all metadata (but no data) from the specified `absl::LogEntry`. + LogMessage& WithMetadataFrom(const absl::LogEntry& entry); + // Appends to the logged message a colon, a space, a textual description of + // the current value of `errno` (as by strerror(3)), and the numerical value + // of `errno`. + LogMessage& WithPerror(); + // Sends this message to `*sink` in addition to whatever other sinks it would + // otherwise have been sent to. `sink` must not be null. + LogMessage& ToSinkAlso(absl::LogSink* sink); + // Sends this message to `*sink` and no others. `sink` must not be null. + LogMessage& ToSinkOnly(absl::LogSink* sink); + + // Don't call this method from outside this library. + LogMessage& InternalStream() { return *this; } + + // By-value overloads for small, common types let us overlook common failures + // to define globals and static data members (i.e. in a .cc file). + // clang-format off + // The CUDA toolchain cannot handle these <<<'s: + LogMessage& operator<<(char v) { return operator<< (v); } + LogMessage& operator<<(signed char v) { return operator<< (v); } + LogMessage& operator<<(unsigned char v) { + return operator<< (v); + } + LogMessage& operator<<(signed short v) { // NOLINT + return operator<< (v); // NOLINT + } + LogMessage& operator<<(signed int v) { return operator<< (v); } + LogMessage& operator<<(signed long v) { // NOLINT + return operator<< (v); // NOLINT + } + LogMessage& operator<<(signed long long v) { // NOLINT + return operator<< (v); // NOLINT + } + LogMessage& operator<<(unsigned short v) { // NOLINT + return operator<< (v); // NOLINT + } + LogMessage& operator<<(unsigned int v) { + return operator<< (v); + } + LogMessage& operator<<(unsigned long v) { // NOLINT + return operator<< (v); // NOLINT + } + LogMessage& operator<<(unsigned long long v) { // NOLINT + return operator<< (v); // NOLINT + } + LogMessage& operator<<(void* v) { return operator<< (v); } + LogMessage& operator<<(const void* v) { return operator<< (v); } + LogMessage& operator<<(float v) { return operator<< (v); } + LogMessage& operator<<(double v) { return operator<< (v); } + LogMessage& operator<<(bool v) { return operator<< (v); } + // clang-format on + + // These overloads are more efficient since no `ostream` is involved. + LogMessage& operator<<(const std::string& v); + LogMessage& operator<<(absl::string_view v); + + // Handle stream manipulators e.g. std::endl. + LogMessage& operator<<(std::ostream& (*m)(std::ostream& os)); + LogMessage& operator<<(std::ios_base& (*m)(std::ios_base& os)); + + // Literal strings. This allows us to record C string literals as literals in + // the logging.proto.Value. + // + // Allow this overload to be inlined to prevent generating instantiations of + // this template for every value of `SIZE` encountered in each source code + // file. That significantly increases linker input sizes. Inlining is cheap + // because the argument to this overload is almost always a string literal so + // the call to `strlen` can be replaced at compile time. The overload for + // `char[]` below should not be inlined. The compiler typically does not have + // the string at compile time and cannot replace the call to `strlen` so + // inlining it increases the binary size. See the discussion on + // cl/107527369. + template + LogMessage& operator<<(const char (&buf)[SIZE]); + + // This prevents non-const `char[]` arrays from looking like literals. + template + LogMessage& operator<<(char (&buf)[SIZE]) ABSL_ATTRIBUTE_NOINLINE; + + // Types that support `AbslStringify()` are serialized that way. + template ::value, int>::type = 0> + LogMessage& operator<<(const T& v) ABSL_ATTRIBUTE_NOINLINE; + + // Types that don't support `AbslStringify()` but do support streaming into a + // `std::ostream&` are serialized that way. + template ::value, int>::type = 0> + LogMessage& operator<<(const T& v) ABSL_ATTRIBUTE_NOINLINE; + + // Note: We explicitly do not support `operator<<` for non-const references + // because it breaks logging of non-integer bitfield types (i.e., enums). + + protected: + // Call `abort()` or similar to perform `LOG(FATAL)` crash. It is assumed + // that the caller has already generated and written the trace as appropriate. + ABSL_ATTRIBUTE_NORETURN static void FailWithoutStackTrace(); + + // Similar to `FailWithoutStackTrace()`, but without `abort()`. Terminates + // the process with an error exit code. + ABSL_ATTRIBUTE_NORETURN static void FailQuietly(); + + // Dispatches the completed `absl::LogEntry` to applicable `absl::LogSink`s. + // This might as well be inlined into `~LogMessage` except that + // `~LogMessageFatal` needs to call it early. + void Flush(); + + // After this is called, failures are done as quiet as possible for this log + // message. + void SetFailQuietly(); + + private: + struct LogMessageData; // Opaque type containing message state + friend class AsLiteralImpl; + + // This streambuf writes directly into the structured logging buffer so that + // arbitrary types can be encoded as string data (using + // `operator<<(std::ostream &, ...)` without any extra allocation or copying. + // Space is reserved before the data to store the length field, which is + // filled in by `~OstreamView`. + class OstreamView final : public std::streambuf { + public: + explicit OstreamView(LogMessageData& message_data); + ~OstreamView() override; + OstreamView(const OstreamView&) = delete; + OstreamView& operator=(const OstreamView&) = delete; + std::ostream& stream(); + + private: + LogMessageData& data_; + absl::Span encoded_remaining_copy_; + absl::Span message_start_; + absl::Span string_start_; + }; + + // Returns `true` if the message is fatal or enabled debug-fatal. + bool IsFatal() const; + + // Records some tombstone-type data in anticipation of `Die`. + void PrepareToDie(); + void Die(); + + void SendToLog(); + + // Checks `FLAGS_log_backtrace_at` and appends a backtrace if appropriate. + void LogBacktraceIfNeeded(); + + LogMessage& LogString(bool literal, + absl::string_view str) ABSL_ATTRIBUTE_NOINLINE; + + // This should be the first data member so that its initializer captures errno + // before any other initializers alter it (e.g. with calls to new) and so that + // no other destructors run afterward an alter it (e.g. with calls to delete). + absl::base_internal::ErrnoSaver errno_saver_; + + // We keep the data in a separate struct so that each instance of `LogMessage` + // uses less stack space. + std::unique_ptr data_; +}; + +// Helper class so that `AbslStringify()` can modify the LogMessage. +class StringifySink final { + public: + explicit StringifySink(LogMessage& message) : message_(message) {} + + void Append(size_t count, char ch) { message_ << std::string(count, ch); } + + void Append(absl::string_view v) { message_ << v; } + + // For types that implement `AbslStringify` using `absl::Format()`. + friend void AbslFormatFlush(StringifySink* sink, absl::string_view v) { + sink->Append(v); + } + + private: + LogMessage& message_; +}; + +// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE` +template ::value, + int>::type> +LogMessage& LogMessage::operator<<(const T& v) { + StringifySink sink(*this); + // Replace with public API. + AbslStringify(sink, v); + return *this; +} + +// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE` +template ::value, + int>::type> +LogMessage& LogMessage::operator<<(const T& v) { + OstreamView view(*data_); + view.stream() << log_internal::NullGuard().Guard(v); + return *this; +} + +template +LogMessage& LogMessage::operator<<(const char (&buf)[SIZE]) { + const bool literal = true; + return LogString(literal, buf); +} + +// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE` +template +LogMessage& LogMessage::operator<<(char (&buf)[SIZE]) { + const bool literal = false; + return LogString(literal, buf); +} +// We instantiate these specializations in the library's TU to save space in +// other TUs. Since the template is marked `ABSL_ATTRIBUTE_NOINLINE` we will be +// emitting a function call either way. +extern template LogMessage& LogMessage::operator<<(const char& v); +extern template LogMessage& LogMessage::operator<<(const signed char& v); +extern template LogMessage& LogMessage::operator<<(const unsigned char& v); +extern template LogMessage& LogMessage::operator<<(const short& v); // NOLINT +extern template LogMessage& LogMessage::operator<<( + const unsigned short& v); // NOLINT +extern template LogMessage& LogMessage::operator<<(const int& v); +extern template LogMessage& LogMessage::operator<<( + const unsigned int& v); // NOLINT +extern template LogMessage& LogMessage::operator<<(const long& v); // NOLINT +extern template LogMessage& LogMessage::operator<<( + const unsigned long& v); // NOLINT +extern template LogMessage& LogMessage::operator<<( + const long long& v); // NOLINT +extern template LogMessage& LogMessage::operator<<( + const unsigned long long& v); // NOLINT +extern template LogMessage& LogMessage::operator<<(void* const& v); +extern template LogMessage& LogMessage::operator<<(const void* const& v); +extern template LogMessage& LogMessage::operator<<(const float& v); +extern template LogMessage& LogMessage::operator<<(const double& v); +extern template LogMessage& LogMessage::operator<<(const bool& v); + +// `LogMessageFatal` ensures the process will exit in failure after logging this +// message. +class LogMessageFatal final : public LogMessage { + public: + LogMessageFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD; + LogMessageFatal(const char* file, int line, + absl::string_view failure_msg) ABSL_ATTRIBUTE_COLD; + ABSL_ATTRIBUTE_NORETURN ~LogMessageFatal(); +}; + +class LogMessageQuietlyFatal final : public LogMessage { + public: + LogMessageQuietlyFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD; + LogMessageQuietlyFatal(const char* file, int line, + absl::string_view failure_msg) ABSL_ATTRIBUTE_COLD; + ABSL_ATTRIBUTE_NORETURN ~LogMessageQuietlyFatal(); +}; + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +extern "C" ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL( + AbslInternalOnFatalLogMessage)(const absl::LogEntry&); + +#endif // ABSL_LOG_INTERNAL_LOG_MESSAGE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_sink_set.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_sink_set.cc new file mode 100644 index 0000000000..f9d030aa22 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_sink_set.cc @@ -0,0 +1,296 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/log_sink_set.h" + +#ifndef ABSL_HAVE_THREAD_LOCAL +#include +#endif + +#ifdef __ANDROID__ +#include +#endif + +#ifdef _WIN32 +#include +#endif + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/call_once.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/log_severity.h" +#include "absl/base/thread_annotations.h" +#include "absl/cleanup/cleanup.h" +#include "absl/log/globals.h" +#include "absl/log/internal/config.h" +#include "absl/log/internal/globals.h" +#include "absl/log/log_entry.h" +#include "absl/log/log_sink.h" +#include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { +namespace { + +// Returns a mutable reference to a thread-local variable that should be true if +// a globally-registered `LogSink`'s `Send()` is currently being invoked on this +// thread. +bool& ThreadIsLoggingStatus() { +#ifdef ABSL_HAVE_THREAD_LOCAL + ABSL_CONST_INIT thread_local bool thread_is_logging = false; + return thread_is_logging; +#else + ABSL_CONST_INIT static pthread_key_t thread_is_logging_key; + static const bool unused = [] { + if (pthread_key_create(&thread_is_logging_key, [](void* data) { + delete reinterpret_cast(data); + })) { + perror("pthread_key_create failed!"); + abort(); + } + return true; + }(); + (void)unused; // Fixes -wunused-variable warning + bool* thread_is_logging_ptr = + reinterpret_cast(pthread_getspecific(thread_is_logging_key)); + + if (ABSL_PREDICT_FALSE(!thread_is_logging_ptr)) { + thread_is_logging_ptr = new bool{false}; + if (pthread_setspecific(thread_is_logging_key, thread_is_logging_ptr)) { + perror("pthread_setspecific failed"); + abort(); + } + } + return *thread_is_logging_ptr; +#endif +} + +class StderrLogSink final : public LogSink { + public: + ~StderrLogSink() override = default; + + void Send(const absl::LogEntry& entry) override { + if (entry.log_severity() < absl::StderrThreshold() && + absl::log_internal::IsInitialized()) { + return; + } + + ABSL_CONST_INIT static absl::once_flag warn_if_not_initialized; + absl::call_once(warn_if_not_initialized, []() { + if (absl::log_internal::IsInitialized()) return; + const char w[] = + "WARNING: All log messages before absl::InitializeLog() is called" + " are written to STDERR\n"; + absl::log_internal::WriteToStderr(w, absl::LogSeverity::kWarning); + }); + + if (!entry.stacktrace().empty()) { + absl::log_internal::WriteToStderr(entry.stacktrace(), + entry.log_severity()); + } else { + // TODO(b/226937039): do this outside else condition once we avoid + // ReprintFatalMessage + absl::log_internal::WriteToStderr( + entry.text_message_with_prefix_and_newline(), entry.log_severity()); + } + } +}; + +#if defined(__ANDROID__) +class AndroidLogSink final : public LogSink { + public: + ~AndroidLogSink() override = default; + + void Send(const absl::LogEntry& entry) override { + const int level = AndroidLogLevel(entry); + // TODO(b/37587197): make the tag ("native") configurable. + __android_log_write(level, "native", + entry.text_message_with_prefix_and_newline_c_str()); + if (entry.log_severity() == absl::LogSeverity::kFatal) + __android_log_write(ANDROID_LOG_FATAL, "native", "terminating.\n"); + } + + private: + static int AndroidLogLevel(const absl::LogEntry& entry) { + switch (entry.log_severity()) { + case absl::LogSeverity::kFatal: + return ANDROID_LOG_FATAL; + case absl::LogSeverity::kError: + return ANDROID_LOG_ERROR; + case absl::LogSeverity::kWarning: + return ANDROID_LOG_WARN; + default: + if (entry.verbosity() >= 2) return ANDROID_LOG_VERBOSE; + if (entry.verbosity() == 1) return ANDROID_LOG_DEBUG; + return ANDROID_LOG_INFO; + } + } +}; +#endif // !defined(__ANDROID__) + +#if defined(_WIN32) +class WindowsDebuggerLogSink final : public LogSink { + public: + ~WindowsDebuggerLogSink() override = default; + + void Send(const absl::LogEntry& entry) override { + if (entry.log_severity() < absl::StderrThreshold() && + absl::log_internal::IsInitialized()) { + return; + } + ::OutputDebugStringA(entry.text_message_with_prefix_and_newline_c_str()); + } +}; +#endif // !defined(_WIN32) + +class GlobalLogSinkSet final { + public: + GlobalLogSinkSet() { +#if defined(__myriad2__) || defined(__Fuchsia__) + // myriad2 and Fuchsia do not log to stderr by default. +#else + static StderrLogSink* stderr_log_sink = new StderrLogSink; + AddLogSink(stderr_log_sink); +#endif +#ifdef __ANDROID__ + static AndroidLogSink* android_log_sink = new AndroidLogSink; + AddLogSink(android_log_sink); +#endif +#if defined(_WIN32) + static WindowsDebuggerLogSink* debugger_log_sink = + new WindowsDebuggerLogSink; + AddLogSink(debugger_log_sink); +#endif // !defined(_WIN32) + } + + void LogToSinks(const absl::LogEntry& entry, + absl::Span extra_sinks, bool extra_sinks_only) + ABSL_LOCKS_EXCLUDED(guard_) { + SendToSinks(entry, extra_sinks); + + if (!extra_sinks_only) { + if (ThreadIsLoggingToLogSink()) { + absl::log_internal::WriteToStderr( + entry.text_message_with_prefix_and_newline(), entry.log_severity()); + } else { + absl::ReaderMutexLock global_sinks_lock(&guard_); + ThreadIsLoggingStatus() = true; + // Ensure the "thread is logging" status is reverted upon leaving the + // scope even in case of exceptions. + auto status_cleanup = + absl::MakeCleanup([] { ThreadIsLoggingStatus() = false; }); + SendToSinks(entry, absl::MakeSpan(sinks_)); + } + } + } + + void AddLogSink(absl::LogSink* sink) ABSL_LOCKS_EXCLUDED(guard_) { + { + absl::WriterMutexLock global_sinks_lock(&guard_); + auto pos = std::find(sinks_.begin(), sinks_.end(), sink); + if (pos == sinks_.end()) { + sinks_.push_back(sink); + return; + } + } + ABSL_INTERNAL_LOG(FATAL, "Duplicate log sinks are not supported"); + } + + void RemoveLogSink(absl::LogSink* sink) ABSL_LOCKS_EXCLUDED(guard_) { + { + absl::WriterMutexLock global_sinks_lock(&guard_); + auto pos = std::find(sinks_.begin(), sinks_.end(), sink); + if (pos != sinks_.end()) { + sinks_.erase(pos); + return; + } + } + ABSL_INTERNAL_LOG(FATAL, "Mismatched log sink being removed"); + } + + void FlushLogSinks() ABSL_LOCKS_EXCLUDED(guard_) { + if (ThreadIsLoggingToLogSink()) { + // The thread_local condition demonstrates that we're already holding the + // lock in order to iterate over `sinks_` for dispatch. The thread-safety + // annotations don't know this, so we use `ABSL_NO_THREAD_SAFETY_ANALYSIS` + guard_.AssertReaderHeld(); + FlushLogSinksLocked(); + } else { + absl::ReaderMutexLock global_sinks_lock(&guard_); + // In case if LogSink::Flush overload decides to log + ThreadIsLoggingStatus() = true; + // Ensure the "thread is logging" status is reverted upon leaving the + // scope even in case of exceptions. + auto status_cleanup = + absl::MakeCleanup([] { ThreadIsLoggingStatus() = false; }); + FlushLogSinksLocked(); + } + } + + private: + void FlushLogSinksLocked() ABSL_SHARED_LOCKS_REQUIRED(guard_) { + for (absl::LogSink* sink : sinks_) { + sink->Flush(); + } + } + + // Helper routine for LogToSinks. + static void SendToSinks(const absl::LogEntry& entry, + absl::Span sinks) { + for (absl::LogSink* sink : sinks) { + sink->Send(entry); + } + } + + using LogSinksSet = std::vector; + absl::Mutex guard_; + LogSinksSet sinks_ ABSL_GUARDED_BY(guard_); +}; + +// Returns reference to the global LogSinks set. +GlobalLogSinkSet& GlobalSinks() { + static GlobalLogSinkSet* global_sinks = new GlobalLogSinkSet; + return *global_sinks; +} + +} // namespace + +bool ThreadIsLoggingToLogSink() { return ThreadIsLoggingStatus(); } + +void LogToSinks(const absl::LogEntry& entry, + absl::Span extra_sinks, bool extra_sinks_only) { + log_internal::GlobalSinks().LogToSinks(entry, extra_sinks, extra_sinks_only); +} + +void AddLogSink(absl::LogSink* sink) { + log_internal::GlobalSinks().AddLogSink(sink); +} + +void RemoveLogSink(absl::LogSink* sink) { + log_internal::GlobalSinks().RemoveLogSink(sink); +} + +void FlushLogSinks() { log_internal::GlobalSinks().FlushLogSinks(); } + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_sink_set.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_sink_set.h new file mode 100644 index 0000000000..88ab073bbd --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/log_sink_set.h @@ -0,0 +1,54 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/log_sink_set.h +// ----------------------------------------------------------------------------- + +#ifndef ABSL_LOG_INTERNAL_LOG_SINK_SET_H_ +#define ABSL_LOG_INTERNAL_LOG_SINK_SET_H_ + +#include "absl/base/config.h" +#include "absl/log/log_entry.h" +#include "absl/log/log_sink.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// Returns true if a globally-registered `LogSink`'s `Send()` is currently +// being invoked on this thread. +bool ThreadIsLoggingToLogSink(); + +// This function may log to two sets of sinks: +// +// * If `extra_sinks_only` is true, it will dispatch only to `extra_sinks`. +// `LogMessage::ToSinkAlso` and `LogMessage::ToSinkOnly` are used to attach +// extra sinks to the entry. +// * Otherwise it will also log to the global sinks set. This set is managed +// by `absl::AddLogSink` and `absl::RemoveLogSink`. +void LogToSinks(const absl::LogEntry& entry, + absl::Span extra_sinks, bool extra_sinks_only); + +// Implementation for operations with log sink set. +void AddLogSink(absl::LogSink* sink); +void RemoveLogSink(absl::LogSink* sink); +void FlushLogSinks(); + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_LOG_SINK_SET_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/nullguard.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/nullguard.h new file mode 100644 index 0000000000..147ca8145c --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/nullguard.h @@ -0,0 +1,56 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/nullguard.h +// ----------------------------------------------------------------------------- +// +// NullGuard exists such that NullGuard::Guard(v) returns v, unless passed a +// nullptr_t, or a null char* or const char*, in which case it returns "(null)". +// This allows streaming NullGuard::Guard(v) to an output stream without +// hitting undefined behavior for null values. + +#ifndef ABSL_LOG_INTERNAL_NULLGUARD_H_ +#define ABSL_LOG_INTERNAL_NULLGUARD_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +template +struct NullGuard final { + static const T& Guard(const T& v) { return v; } +}; +template <> +struct NullGuard final { + static const char* Guard(const char* v) { return v ? v : "(null)"; } +}; +template <> +struct NullGuard final { + static const char* Guard(const char* v) { return v ? v : "(null)"; } +}; +template <> +struct NullGuard final { + static const char* Guard(const std::nullptr_t&) { return "(null)"; } +}; + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_NULLGUARD_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/nullstream.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/nullstream.h new file mode 100644 index 0000000000..8ed63d5295 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/nullstream.h @@ -0,0 +1,134 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/nullstream.h +// ----------------------------------------------------------------------------- +// +// Classes `NullStream`, `NullStreamMaybeFatal ` and `NullStreamFatal` +// implement a subset of the `LogMessage` API and are used instead when logging +// of messages has been disabled. + +#ifndef ABSL_LOG_INTERNAL_NULLSTREAM_H_ +#define ABSL_LOG_INTERNAL_NULLSTREAM_H_ + +#ifdef _WIN32 +#include +#else +#include +#endif +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// A `NullStream` implements the API of `LogMessage` (a few methods and +// `operator<<`) but does nothing. All methods are defined inline so the +// compiler can eliminate the whole instance and discard anything that's +// streamed in. +class NullStream { + public: + NullStream& AtLocation(absl::string_view, int) { return *this; } + template + NullStream& AtLocation(SourceLocationType) { + return *this; + } + NullStream& NoPrefix() { return *this; } + NullStream& WithVerbosity(int) { return *this; } + template + NullStream& WithTimestamp(TimeType) { + return *this; + } + template + NullStream& WithThreadID(Tid) { + return *this; + } + template + NullStream& WithMetadataFrom(const LogEntryType&) { + return *this; + } + NullStream& WithPerror() { return *this; } + template + NullStream& ToSinkAlso(LogSinkType*) { + return *this; + } + template + NullStream& ToSinkOnly(LogSinkType*) { + return *this; + } + template + NullStream& OutputToSink(LogSinkType*, bool) { + return *this; + } + NullStream& InternalStream() { return *this; } +}; +template +inline NullStream& operator<<(NullStream& str, const T&) { + return str; +} +inline NullStream& operator<<(NullStream& str, + std::ostream& (*)(std::ostream& os)) { + return str; +} +inline NullStream& operator<<(NullStream& str, + std::ios_base& (*)(std::ios_base& os)) { + return str; +} + +// `NullStreamMaybeFatal` implements the process termination semantics of +// `LogMessage`, which is used for `DFATAL` severity and expression-defined +// severity e.g. `LOG(LEVEL(HowBadIsIt()))`. Like `LogMessage`, it terminates +// the process when destroyed if the passed-in severity equals `FATAL`. +class NullStreamMaybeFatal final : public NullStream { + public: + explicit NullStreamMaybeFatal(absl::LogSeverity severity) + : fatal_(severity == absl::LogSeverity::kFatal) {} + ~NullStreamMaybeFatal() { + if (fatal_) _exit(1); + } + + private: + bool fatal_; +}; + +// `NullStreamFatal` implements the process termination semantics of +// `LogMessageFatal`, which means it always terminates the process. `DFATAL` +// and expression-defined severity use `NullStreamMaybeFatal` above. +class NullStreamFatal final : public NullStream { + public: + NullStreamFatal() {} + // ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so + // disable msvc's warning about the d'tor never returning. +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable : 4722) +#endif + ABSL_ATTRIBUTE_NORETURN ~NullStreamFatal() { _exit(1); } +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif +}; + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_GLOBALS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/proto.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/proto.cc new file mode 100644 index 0000000000..80d78eedab --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/proto.cc @@ -0,0 +1,214 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/proto.h" + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { +namespace { +void EncodeRawVarint(uint64_t value, size_t size, absl::Span *buf) { + for (size_t s = 0; s < size; s++) { + (*buf)[s] = (value & 0x7f) | (s + 1 == size ? 0 : 0x80); + value >>= 7; + } + buf->remove_prefix(size); +} +constexpr uint64_t MakeTagType(uint64_t tag, WireType type) { + return tag << 3 | static_cast(type); +} +} // namespace + +bool EncodeVarint(uint64_t tag, uint64_t value, absl::Span *buf) { + const uint64_t tag_type = MakeTagType(tag, WireType::kVarint); + const uint64_t tag_type_size = VarintSize(tag_type); + const uint64_t value_size = VarintSize(value); + if (tag_type_size + value_size > buf->size()) { + buf->remove_suffix(buf->size()); + return false; + } + EncodeRawVarint(tag_type, tag_type_size, buf); + EncodeRawVarint(value, value_size, buf); + return true; +} + +bool Encode64Bit(uint64_t tag, uint64_t value, absl::Span *buf) { + const uint64_t tag_type = MakeTagType(tag, WireType::k64Bit); + const uint64_t tag_type_size = VarintSize(tag_type); + if (tag_type_size + sizeof(value) > buf->size()) { + buf->remove_suffix(buf->size()); + return false; + } + EncodeRawVarint(tag_type, tag_type_size, buf); + for (size_t s = 0; s < sizeof(value); s++) { + (*buf)[s] = value & 0xff; + value >>= 8; + } + buf->remove_prefix(sizeof(value)); + return true; +} + +bool Encode32Bit(uint64_t tag, uint32_t value, absl::Span *buf) { + const uint64_t tag_type = MakeTagType(tag, WireType::k32Bit); + const uint64_t tag_type_size = VarintSize(tag_type); + if (tag_type_size + sizeof(value) > buf->size()) { + buf->remove_suffix(buf->size()); + return false; + } + EncodeRawVarint(tag_type, tag_type_size, buf); + for (size_t s = 0; s < sizeof(value); s++) { + (*buf)[s] = value & 0xff; + value >>= 8; + } + buf->remove_prefix(sizeof(value)); + return true; +} + +bool EncodeBytes(uint64_t tag, absl::Span value, + absl::Span *buf) { + const uint64_t tag_type = MakeTagType(tag, WireType::kLengthDelimited); + const uint64_t tag_type_size = VarintSize(tag_type); + uint64_t length = value.size(); + const uint64_t length_size = VarintSize(length); + if (tag_type_size + length_size + value.size() > buf->size()) { + buf->remove_suffix(buf->size()); + return false; + } + EncodeRawVarint(tag_type, tag_type_size, buf); + EncodeRawVarint(length, length_size, buf); + memcpy(buf->data(), value.data(), value.size()); + buf->remove_prefix(value.size()); + return true; +} + +bool EncodeBytesTruncate(uint64_t tag, absl::Span value, + absl::Span *buf) { + const uint64_t tag_type = MakeTagType(tag, WireType::kLengthDelimited); + const uint64_t tag_type_size = VarintSize(tag_type); + uint64_t length = value.size(); + const uint64_t length_size = VarintSize(length); + if (tag_type_size + length_size <= buf->size() && + tag_type_size + length_size + value.size() > buf->size()) { + value.remove_suffix(tag_type_size + length_size + value.size() - + buf->size()); + length = value.size(); + } + if (tag_type_size + length_size + value.size() > buf->size()) { + buf->remove_suffix(buf->size()); + return false; + } + EncodeRawVarint(tag_type, tag_type_size, buf); + EncodeRawVarint(length, length_size, buf); + memcpy(buf->data(), value.data(), value.size()); + buf->remove_prefix(value.size()); + return true; +} + +ABSL_MUST_USE_RESULT absl::Span EncodeMessageStart( + uint64_t tag, uint64_t max_size, absl::Span *buf) { + const uint64_t tag_type = MakeTagType(tag, WireType::kLengthDelimited); + const uint64_t tag_type_size = VarintSize(tag_type); + max_size = std::min(max_size, buf->size()); + const uint64_t length_size = VarintSize(max_size); + if (tag_type_size + length_size > buf->size()) { + buf->remove_suffix(buf->size()); + return absl::Span(); + } + EncodeRawVarint(tag_type, tag_type_size, buf); + const absl::Span ret = buf->subspan(0, length_size); + EncodeRawVarint(0, length_size, buf); + return ret; +} + +void EncodeMessageLength(absl::Span msg, const absl::Span *buf) { + if (!msg.data()) return; + const uint64_t length_size = msg.size(); + EncodeRawVarint(buf->data() - msg.data() - length_size, length_size, &msg); +} + +namespace { +uint64_t DecodeVarint(absl::Span *buf) { + uint64_t value = 0; + size_t s = 0; + while (s < buf->size()) { + value |= static_cast(static_cast((*buf)[s]) & 0x7f) + << 7 * s; + if (!((*buf)[s++] & 0x80)) break; + } + buf->remove_prefix(s); + return value; +} + +uint64_t Decode64Bit(absl::Span *buf) { + uint64_t value = 0; + size_t s = 0; + while (s < buf->size()) { + value |= static_cast(static_cast((*buf)[s])) + << 8 * s; + if (++s == sizeof(value)) break; + } + buf->remove_prefix(s); + return value; +} + +uint32_t Decode32Bit(absl::Span *buf) { + uint32_t value = 0; + size_t s = 0; + while (s < buf->size()) { + value |= static_cast(static_cast((*buf)[s])) + << 8 * s; + if (++s == sizeof(value)) break; + } + buf->remove_prefix(s); + return value; +} +} // namespace + +bool ProtoField::DecodeFrom(absl::Span *data) { + if (data->empty()) return false; + const uint64_t tag_type = DecodeVarint(data); + tag_ = tag_type >> 3; + type_ = static_cast(tag_type & 0x07); + switch (type_) { + case WireType::kVarint: + value_ = DecodeVarint(data); + break; + case WireType::k64Bit: + value_ = Decode64Bit(data); + break; + case WireType::kLengthDelimited: { + value_ = DecodeVarint(data); + data_ = data->subspan(0, std::min(value_, data->size())); + data->remove_prefix(data_.size()); + break; + } + case WireType::k32Bit: + value_ = Decode32Bit(data); + break; + } + return true; +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/proto.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/proto.h new file mode 100644 index 0000000000..63c4e98647 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/proto.h @@ -0,0 +1,267 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// ----------------------------------------------------------------------------- +// File: internal/proto.h +// ----------------------------------------------------------------------------- +// +// Declares functions for serializing and deserializing data to and from memory +// buffers in protocol buffer wire format. This library takes no steps to +// ensure that the encoded data matches with any message specification. + +#ifndef ABSL_LOG_INTERNAL_PROTO_H_ +#define ABSL_LOG_INTERNAL_PROTO_H_ + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/casts.h" +#include "absl/base/config.h" +#include "absl/strings/string_view.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// absl::Span represents a view into the available space in a mutable +// buffer during encoding. Encoding functions shrink the span as they go so +// that the same view can be passed to a series of Encode functions. If the +// data do not fit, nothing is encoded, the view is set to size zero (so that +// all subsequent encode calls fail), and false is returned. Otherwise true is +// returned. + +// In particular, attempting to encode a series of data into an insufficient +// buffer has consistent and efficient behavior without any caller-side error +// checking. Individual values will be encoded in their entirety or not at all +// (unless one of the `Truncate` functions is used). Once a value is omitted +// because it does not fit, no subsequent values will be encoded to preserve +// ordering; the decoded sequence will be a prefix of the original sequence. + +// There are two ways to encode a message-typed field: +// +// * Construct its contents in a separate buffer and use `EncodeBytes` to copy +// it into the primary buffer with type, tag, and length. +// * Use `EncodeMessageStart` to write type and tag fields and reserve space for +// the length field, then encode the contents directly into the buffer, then +// use `EncodeMessageLength` to write the actual length into the reserved +// bytes. This works fine if the actual length takes fewer bytes to encode +// than were reserved, although you don't get your extra bytes back. +// This approach will always produce a valid encoding, but your protocol may +// require that the whole message field by omitted if the buffer is too small +// to contain all desired subfields. In this case, operate on a copy of the +// buffer view and assign back only if everything fit, i.e. if the last +// `Encode` call returned true. + +// Encodes the specified integer as a varint field and returns true if it fits. +// Used for int32_t, int64_t, uint32_t, uint64_t, bool, and enum field types. +// Consumes up to kMaxVarintSize * 2 bytes (20). +bool EncodeVarint(uint64_t tag, uint64_t value, absl::Span *buf); + +// Encodes the specified integer as a varint field using ZigZag encoding and +// returns true if it fits. +// Used for sint32 and sint64 field types. +// Consumes up to kMaxVarintSize * 2 bytes (20). +inline bool EncodeVarintZigZag(uint64_t tag, int64_t value, + absl::Span *buf) { + if (value < 0) + return EncodeVarint(tag, 2 * static_cast(-(value + 1)) + 1, buf); + return EncodeVarint(tag, 2 * static_cast(value), buf); +} + +// Encodes the specified integer as a 64-bit field and returns true if it fits. +// Used for fixed64 and sfixed64 field types. +// Consumes up to kMaxVarintSize + 8 bytes (18). +bool Encode64Bit(uint64_t tag, uint64_t value, absl::Span *buf); + +// Encodes the specified double as a 64-bit field and returns true if it fits. +// Used for double field type. +// Consumes up to kMaxVarintSize + 8 bytes (18). +inline bool EncodeDouble(uint64_t tag, double value, absl::Span *buf) { + return Encode64Bit(tag, absl::bit_cast(value), buf); +} + +// Encodes the specified integer as a 32-bit field and returns true if it fits. +// Used for fixed32 and sfixed32 field types. +// Consumes up to kMaxVarintSize + 4 bytes (14). +bool Encode32Bit(uint64_t tag, uint32_t value, absl::Span *buf); + +// Encodes the specified float as a 32-bit field and returns true if it fits. +// Used for float field type. +// Consumes up to kMaxVarintSize + 4 bytes (14). +inline bool EncodeFloat(uint64_t tag, float value, absl::Span *buf) { + return Encode32Bit(tag, absl::bit_cast(value), buf); +} + +// Encodes the specified bytes as a length-delimited field and returns true if +// they fit. +// Used for string, bytes, message, and packed-repeated field type. +// Consumes up to kMaxVarintSize * 2 + value.size() bytes (20 + value.size()). +bool EncodeBytes(uint64_t tag, absl::Span value, + absl::Span *buf); + +// Encodes as many of the specified bytes as will fit as a length-delimited +// field and returns true as long as the field header (`tag_type` and `length`) +// fits. +// Used for string, bytes, message, and packed-repeated field type. +// Consumes up to kMaxVarintSize * 2 + value.size() bytes (20 + value.size()). +bool EncodeBytesTruncate(uint64_t tag, absl::Span value, + absl::Span *buf); + +// Encodes the specified string as a length-delimited field and returns true if +// it fits. +// Used for string, bytes, message, and packed-repeated field type. +// Consumes up to kMaxVarintSize * 2 + value.size() bytes (20 + value.size()). +inline bool EncodeString(uint64_t tag, absl::string_view value, + absl::Span *buf) { + return EncodeBytes(tag, value, buf); +} + +// Encodes as much of the specified string as will fit as a length-delimited +// field and returns true as long as the field header (`tag_type` and `length`) +// fits. +// Used for string, bytes, message, and packed-repeated field type. +// Consumes up to kMaxVarintSize * 2 + value.size() bytes (20 + value.size()). +inline bool EncodeStringTruncate(uint64_t tag, absl::string_view value, + absl::Span *buf) { + return EncodeBytesTruncate(tag, value, buf); +} + +// Encodes the header for a length-delimited field containing up to `max_size` +// bytes or the number remaining in the buffer, whichever is less. If the +// header fits, a non-nullptr `Span` is returned; this must be passed to +// `EncodeMessageLength` after all contents are encoded to finalize the length +// field. If the header does not fit, a nullptr `Span` is returned which is +// safe to pass to `EncodeMessageLength` but need not be. +// Used for string, bytes, message, and packed-repeated field type. +// Consumes up to kMaxVarintSize * 2 bytes (20). +ABSL_MUST_USE_RESULT absl::Span EncodeMessageStart(uint64_t tag, + uint64_t max_size, + absl::Span *buf); + +// Finalizes the length field in `msg` so that it encompasses all data encoded +// since the call to `EncodeMessageStart` which returned `msg`. Does nothing if +// `msg` is a `nullptr` `Span`. +void EncodeMessageLength(absl::Span msg, const absl::Span *buf); + +enum class WireType : uint64_t { + kVarint = 0, + k64Bit = 1, + kLengthDelimited = 2, + k32Bit = 5, +}; + +constexpr uint64_t VarintSize(uint64_t value) { + return value < 128 ? 1 : 1 + VarintSize(value >> 7); +} +constexpr uint64_t MinVarintSize() { + return VarintSize((std::numeric_limits::min)()); +} +constexpr uint64_t MaxVarintSize() { + return VarintSize((std::numeric_limits::max)()); +} + +constexpr uint64_t MaxVarintForSize(size_t size) { + return size >= 10 ? (std::numeric_limits::max)() + : (static_cast(1) << size * 7) - 1; +} + +// `BufferSizeFor` returns a number of bytes guaranteed to be sufficient to +// store encoded fields of the specified WireTypes regardless of tag numbers and +// data values. This only makes sense for `WireType::kLengthDelimited` if you +// add in the length of the contents yourself, e.g. for string and bytes fields +// by adding the lengths of any encoded strings to the return value or for +// submessage fields by enumerating the fields you may encode into their +// contents. +constexpr size_t BufferSizeFor() { return 0; } +template +constexpr size_t BufferSizeFor(WireType type, T... tail) { + // tag_type + data + ... + return MaxVarintSize() + + (type == WireType::kVarint ? MaxVarintSize() : // + type == WireType::k64Bit ? 8 : // + type == WireType::k32Bit ? 4 : MaxVarintSize()) + // + BufferSizeFor(tail...); +} + +// absl::Span represents a view into the un-processed space in a +// buffer during decoding. Decoding functions shrink the span as they go so +// that the same view can be decoded iteratively until all data are processed. +// In general, if the buffer is exhausted but additional bytes are expected by +// the decoder, it will return values as if the additional bytes were zeros. +// Length-delimited fields are an exception - if the encoded length field +// indicates more data bytes than are available in the buffer, the `bytes_value` +// and `string_value` accessors will return truncated views. + +class ProtoField final { + public: + // Consumes bytes from `data` and returns true if there were any bytes to + // decode. + bool DecodeFrom(absl::Span *data); + uint64_t tag() const { return tag_; } + WireType type() const { return type_; } + + // These value accessors will return nonsense if the data were not encoded in + // the corresponding wiretype from the corresponding C++ (or other language) + // type. + + double double_value() const { return absl::bit_cast(value_); } + float float_value() const { + return absl::bit_cast(static_cast(value_)); + } + int32_t int32_value() const { return static_cast(value_); } + int64_t int64_value() const { return static_cast(value_); } + int32_t sint32_value() const { + if (value_ % 2) return static_cast(0 - ((value_ - 1) / 2) - 1); + return static_cast(value_ / 2); + } + int64_t sint64_value() const { + if (value_ % 2) return 0 - ((value_ - 1) / 2) - 1; + return value_ / 2; + } + uint32_t uint32_value() const { return static_cast(value_); } + uint64_t uint64_value() const { return value_; } + bool bool_value() const { return value_ != 0; } + // To decode an enum, call int32_value() and cast to the appropriate type. + // Note that the official C++ proto compiler treats enum fields with values + // that do not correspond to a defined enumerator as unknown fields. + + // To decode fields within a submessage field, call + // `DecodeNextField(field.BytesValue())`. + absl::Span bytes_value() const { return data_; } + absl::string_view string_value() const { + const auto data = bytes_value(); + return absl::string_view(data.data(), data.size()); + } + // Returns the encoded length of a length-delimited field. This equals + // `bytes_value().size()` except when the latter has been truncated due to + // buffer underrun. + uint64_t encoded_length() const { return value_; } + + private: + uint64_t tag_; + WireType type_; + // For `kTypeVarint`, `kType64Bit`, and `kType32Bit`, holds the decoded value. + // For `kTypeLengthDelimited`, holds the decoded length. + uint64_t value_; + absl::Span data_; +}; + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_PROTO_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/stderr_log_sink_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/stderr_log_sink_test.cc new file mode 100644 index 0000000000..763690d1b3 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/stderr_log_sink_test.cc @@ -0,0 +1,105 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/log.h" + +namespace { +using ::testing::AllOf; +using ::testing::HasSubstr; + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +MATCHER_P2(HasSubstrTimes, substr, expected_count, "") { + int count = 0; + std::string::size_type pos = 0; + std::string needle(substr); + while ((pos = arg.find(needle, pos)) != std::string::npos) { + ++count; + pos += needle.size(); + } + + return count == expected_count; +} + +TEST(StderrLogSinkDeathTest, InfoMessagesInStderr) { + EXPECT_DEATH_IF_SUPPORTED( + { + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); + LOG(INFO) << "INFO message"; + exit(1); + }, + "INFO message"); +} + +TEST(StderrLogSinkDeathTest, WarningMessagesInStderr) { + EXPECT_DEATH_IF_SUPPORTED( + { + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); + LOG(WARNING) << "WARNING message"; + exit(1); + }, + "WARNING message"); +} + +TEST(StderrLogSinkDeathTest, ErrorMessagesInStderr) { + EXPECT_DEATH_IF_SUPPORTED( + { + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); + LOG(ERROR) << "ERROR message"; + exit(1); + }, + "ERROR message"); +} + +TEST(StderrLogSinkDeathTest, FatalMessagesInStderr) { + char message[] = "FATAL message"; + char stacktrace[] = "*** Check failure stack trace: ***"; + + int expected_count = 1; + + EXPECT_DEATH_IF_SUPPORTED( + { + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); + LOG(FATAL) << message; + }, + AllOf(HasSubstrTimes(message, expected_count), HasSubstr(stacktrace))); +} + +TEST(StderrLogSinkDeathTest, SecondaryFatalMessagesInStderr) { + auto MessageGen = []() -> std::string { + LOG(FATAL) << "Internal failure"; + return "External failure"; + }; + + EXPECT_DEATH_IF_SUPPORTED( + { + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); + LOG(FATAL) << MessageGen(); + }, + "Internal failure"); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/strip.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/strip.h new file mode 100644 index 0000000000..848c38672d --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/strip.h @@ -0,0 +1,71 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/strip.h +// ----------------------------------------------------------------------------- +// + +#ifndef ABSL_LOG_INTERNAL_STRIP_H_ +#define ABSL_LOG_INTERNAL_STRIP_H_ + +#include "absl/base/log_severity.h" +#include "absl/log/internal/log_message.h" +#include "absl/log/internal/nullstream.h" + +// `ABSL_LOGGING_INTERNAL_LOG_*` evaluates to a temporary `LogMessage` object or +// to a related object with a compatible API but different behavior. This set +// of defines comes in three flavors: vanilla, plus two variants that strip some +// logging in subtly different ways for subtly different reasons (see below). +#if defined(STRIP_LOG) && STRIP_LOG +#define ABSL_LOGGING_INTERNAL_LOG_INFO ::absl::log_internal::NullStream() +#define ABSL_LOGGING_INTERNAL_LOG_WARNING ::absl::log_internal::NullStream() +#define ABSL_LOGGING_INTERNAL_LOG_ERROR ::absl::log_internal::NullStream() +#define ABSL_LOGGING_INTERNAL_LOG_FATAL ::absl::log_internal::NullStreamFatal() +#define ABSL_LOGGING_INTERNAL_LOG_QFATAL ::absl::log_internal::NullStreamFatal() +#define ABSL_LOGGING_INTERNAL_LOG_DFATAL \ + ::absl::log_internal::NullStreamMaybeFatal(::absl::kLogDebugFatal) +#define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \ + ::absl::log_internal::NullStreamMaybeFatal(log_internal_severity) +#define ABSL_LOG_INTERNAL_CHECK(failure_message) ABSL_LOGGING_INTERNAL_LOG_FATAL +#define ABSL_LOG_INTERNAL_QCHECK(failure_message) \ + ABSL_LOGGING_INTERNAL_LOG_QFATAL +#else // !defined(STRIP_LOG) || !STRIP_LOG +#define ABSL_LOGGING_INTERNAL_LOG_INFO \ + ::absl::log_internal::LogMessage(__FILE__, __LINE__, \ + ::absl::LogSeverity::kInfo) +#define ABSL_LOGGING_INTERNAL_LOG_WARNING \ + ::absl::log_internal::LogMessage(__FILE__, __LINE__, \ + ::absl::LogSeverity::kWarning) +#define ABSL_LOGGING_INTERNAL_LOG_ERROR \ + ::absl::log_internal::LogMessage(__FILE__, __LINE__, \ + ::absl::LogSeverity::kError) +#define ABSL_LOGGING_INTERNAL_LOG_FATAL \ + ::absl::log_internal::LogMessageFatal(__FILE__, __LINE__) +#define ABSL_LOGGING_INTERNAL_LOG_QFATAL \ + ::absl::log_internal::LogMessageQuietlyFatal(__FILE__, __LINE__) +#define ABSL_LOGGING_INTERNAL_LOG_DFATAL \ + ::absl::log_internal::LogMessage(__FILE__, __LINE__, ::absl::kLogDebugFatal) +#define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \ + ::absl::log_internal::LogMessage(__FILE__, __LINE__, log_internal_severity) +// These special cases dispatch to special-case constructors that allow us to +// avoid an extra function call and shrink non-LTO binaries by a percent or so. +#define ABSL_LOG_INTERNAL_CHECK(failure_message) \ + ::absl::log_internal::LogMessageFatal(__FILE__, __LINE__, failure_message) +#define ABSL_LOG_INTERNAL_QCHECK(failure_message) \ + ::absl::log_internal::LogMessageQuietlyFatal(__FILE__, __LINE__, \ + failure_message) +#endif // !defined(STRIP_LOG) || !STRIP_LOG + +#endif // ABSL_LOG_INTERNAL_STRIP_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/structured.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/structured.h new file mode 100644 index 0000000000..8888e7d716 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/structured.h @@ -0,0 +1,57 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/structured.h +// ----------------------------------------------------------------------------- + +#ifndef ABSL_LOG_INTERNAL_STRUCTURED_H_ +#define ABSL_LOG_INTERNAL_STRUCTURED_H_ + +#include + +#include "absl/base/config.h" +#include "absl/log/internal/log_message.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +class ABSL_MUST_USE_RESULT AsLiteralImpl final { + public: + explicit AsLiteralImpl(absl::string_view str) : str_(str) {} + AsLiteralImpl(const AsLiteralImpl&) = default; + AsLiteralImpl& operator=(const AsLiteralImpl&) = default; + + private: + absl::string_view str_; + + friend std::ostream& operator<<(std::ostream& os, AsLiteralImpl as_literal) { + return os << as_literal.str_; + } + log_internal::LogMessage& AddToMessage(log_internal::LogMessage& m) { + return m.LogString(/* literal = */ true, str_); + } + friend log_internal::LogMessage& operator<<(log_internal::LogMessage& m, + AsLiteralImpl as_literal) { + return as_literal.AddToMessage(m); + } +}; + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_STRUCTURED_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_actions.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_actions.cc new file mode 100644 index 0000000000..bb4af17f9c --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_actions.cc @@ -0,0 +1,74 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/test_actions.h" + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/strings/escaping.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +void WriteToStderrWithFilename::operator()(const absl::LogEntry& entry) const { + std::cerr << message << " (file: " << entry.source_filename() << ")\n"; +} + +void WriteEntryToStderr::operator()(const absl::LogEntry& entry) const { + if (!message.empty()) std::cerr << message << "\n"; + + const std::string source_filename = absl::CHexEscape(entry.source_filename()); + const std::string source_basename = absl::CHexEscape(entry.source_basename()); + const std::string text_message = absl::CHexEscape(entry.text_message()); + const std::string encoded_message = absl::CHexEscape(entry.encoded_message()); + std::string encoded_message_str; + std::cerr << "LogEntry{\n" // + << " source_filename: \"" << source_filename << "\"\n" // + << " source_basename: \"" << source_basename << "\"\n" // + << " source_line: " << entry.source_line() << "\n" // + << " prefix: " << (entry.prefix() ? "true\n" : "false\n") // + << " log_severity: " << entry.log_severity() << "\n" // + << " timestamp: " << entry.timestamp() << "\n" // + << " text_message: \"" << text_message << "\"\n" // + << " verbosity: " << entry.verbosity() << "\n" // + << " encoded_message (raw): \"" << encoded_message << "\"\n" // + << encoded_message_str // + << "}\n"; +} + +void WriteEntryToStderr::operator()(absl::LogSeverity severity, + absl::string_view filename, + absl::string_view log_message) const { + if (!message.empty()) std::cerr << message << "\n"; + const std::string source_filename = absl::CHexEscape(filename); + const std::string text_message = absl::CHexEscape(log_message); + std::cerr << "LogEntry{\n" // + << " source_filename: \"" << source_filename << "\"\n" // + << " log_severity: " << severity << "\n" // + << " text_message: \"" << text_message << "\"\n" // + << "}\n"; +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_actions.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_actions.h new file mode 100644 index 0000000000..649a050521 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_actions.h @@ -0,0 +1,90 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/test_actions.h +// ----------------------------------------------------------------------------- +// +// This file declares Googletest's actions used in the Abseil Logging library +// unit tests. + +#ifndef ABSL_LOG_INTERNAL_TEST_ACTIONS_H_ +#define ABSL_LOG_INTERNAL_TEST_ACTIONS_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/log_entry.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// These actions are used by the child process in a death test. +// +// Expectations set in the child cannot cause test failure in the parent +// directly. Instead, the child can use these actions with +// `EXPECT_CALL`/`WillOnce` and `ON_CALL`/`WillByDefault` (for unexpected calls) +// to write messages to stderr that the parent can match against. +struct WriteToStderr final { + explicit WriteToStderr(absl::string_view m) : message(m) {} + std::string message; + + template + void operator()(const Args&...) const { + std::cerr << message << std::endl; + } +}; + +struct WriteToStderrWithFilename final { + explicit WriteToStderrWithFilename(absl::string_view m) : message(m) {} + + std::string message; + + void operator()(const absl::LogEntry& entry) const; +}; + +struct WriteEntryToStderr final { + explicit WriteEntryToStderr(absl::string_view m) : message(m) {} + + std::string message = ""; + + void operator()(const absl::LogEntry& entry) const; + void operator()(absl::LogSeverity, absl::string_view, + absl::string_view) const; +}; + +// See the documentation for `DeathTestValidateExpectations` above. +// `DeathTestExpectedLogging` should be used once in a given death test, and the +// applicable severity level is the one that should be passed to +// `DeathTestValidateExpectations`. +inline WriteEntryToStderr DeathTestExpectedLogging() { + return WriteEntryToStderr{"Mock received expected entry:"}; +} + +// `DeathTestUnexpectedLogging` should be used zero or more times to mark +// messages that should not hit the logs as the process dies. +inline WriteEntryToStderr DeathTestUnexpectedLogging() { + return WriteEntryToStderr{"Mock received unexpected entry:"}; +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_TEST_ACTIONS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_helpers.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_helpers.cc new file mode 100644 index 0000000000..0de5b96b43 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_helpers.cc @@ -0,0 +1,82 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "absl/log/internal/test_helpers.h" + +#ifdef __Fuchsia__ +#include +#endif + +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "absl/log/initialize.h" +#include "absl/log/internal/globals.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// Returns false if the specified severity level is disabled by +// `ABSL_MIN_LOG_LEVEL` or `absl::MinLogLevel()`. +bool LoggingEnabledAt(absl::LogSeverity severity) { + return severity >= kAbslMinLogLevel && severity >= absl::MinLogLevel(); +} + +// ----------------------------------------------------------------------------- +// Googletest Death Test Predicates +// ----------------------------------------------------------------------------- + +#if GTEST_HAS_DEATH_TEST + +bool DiedOfFatal(int exit_status) { +#if defined(_WIN32) + // Depending on NDEBUG and (configuration?) MSVC's abort either results + // in error code 3 (SIGABRT) or error code 0x80000003 (breakpoint + // triggered). + return ::testing::ExitedWithCode(3)(exit_status & 0x7fffffff); +#elif defined(__Fuchsia__) + // The Fuchsia death test implementation kill()'s the process when it detects + // an exception, so it should exit with the corresponding code. See + // FuchsiaDeathTest::Wait(). + return ::testing::ExitedWithCode(ZX_TASK_RETCODE_SYSCALL_KILL)(exit_status); +#elif defined(__ANDROID__) && defined(__aarch64__) + // These are all run under a qemu config that eats died-due-to-signal exit + // statuses. + return true; +#else + return ::testing::KilledBySignal(SIGABRT)(exit_status); +#endif +} + +bool DiedOfQFatal(int exit_status) { + return ::testing::ExitedWithCode(1)(exit_status); +} + +#endif + +// ----------------------------------------------------------------------------- +// Helper for Log inititalization in test +// ----------------------------------------------------------------------------- + +void LogTestEnvironment::SetUp() { + if (!absl::log_internal::IsInitialized()) { + absl::InitializeLog(); + } +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_helpers.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_helpers.h new file mode 100644 index 0000000000..fd06e295a2 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_helpers.h @@ -0,0 +1,71 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/test_helpers.h +// ----------------------------------------------------------------------------- +// +// This file declares testing helpers for the logging library. + +#ifndef ABSL_LOG_INTERNAL_TEST_HELPERS_H_ +#define ABSL_LOG_INTERNAL_TEST_HELPERS_H_ + +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// `ABSL_MIN_LOG_LEVEL` can't be used directly since it is not always defined. +constexpr auto kAbslMinLogLevel = +#ifdef ABSL_MIN_LOG_LEVEL + static_cast(ABSL_MIN_LOG_LEVEL); +#else + absl::LogSeverityAtLeast::kInfo; +#endif + +// Returns false if the specified severity level is disabled by +// `ABSL_MIN_LOG_LEVEL` or `absl::MinLogLevel()`. +bool LoggingEnabledAt(absl::LogSeverity severity); + +// ----------------------------------------------------------------------------- +// Googletest Death Test Predicates +// ----------------------------------------------------------------------------- + +#if GTEST_HAS_DEATH_TEST + +bool DiedOfFatal(int exit_status); +bool DiedOfQFatal(int exit_status); + +#endif + +// ----------------------------------------------------------------------------- +// Helper for Log inititalization in test +// ----------------------------------------------------------------------------- + +class LogTestEnvironment : public ::testing::Environment { + public: + ~LogTestEnvironment() override = default; + + void SetUp() override; +}; + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_TEST_HELPERS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_matchers.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_matchers.cc new file mode 100644 index 0000000000..72ca704e75 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_matchers.cc @@ -0,0 +1,169 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/internal/test_matchers.h" + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/strings/string_view.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +::testing::Matcher SourceFilename( + const ::testing::Matcher& source_filename) { + return Property("source_filename", &absl::LogEntry::source_filename, + source_filename); +} + +::testing::Matcher SourceBasename( + const ::testing::Matcher& source_basename) { + return Property("source_basename", &absl::LogEntry::source_basename, + source_basename); +} + +::testing::Matcher SourceLine( + const ::testing::Matcher& source_line) { + return Property("source_line", &absl::LogEntry::source_line, source_line); +} + +::testing::Matcher Prefix( + const ::testing::Matcher& prefix) { + return Property("prefix", &absl::LogEntry::prefix, prefix); +} + +::testing::Matcher LogSeverity( + const ::testing::Matcher& log_severity) { + return Property("log_severity", &absl::LogEntry::log_severity, log_severity); +} + +::testing::Matcher Timestamp( + const ::testing::Matcher& timestamp) { + return Property("timestamp", &absl::LogEntry::timestamp, timestamp); +} + +::testing::Matcher TimestampInMatchWindow() { + return Property("timestamp", &absl::LogEntry::timestamp, + ::testing::AllOf(::testing::Ge(absl::Now()), + ::testing::Truly([](absl::Time arg) { + return arg <= absl::Now(); + }))); +} + +::testing::Matcher ThreadID( + const ::testing::Matcher& tid) { + return Property("tid", &absl::LogEntry::tid, tid); +} + +::testing::Matcher TextMessageWithPrefixAndNewline( + const ::testing::Matcher& + text_message_with_prefix_and_newline) { + return Property("text_message_with_prefix_and_newline", + &absl::LogEntry::text_message_with_prefix_and_newline, + text_message_with_prefix_and_newline); +} + +::testing::Matcher TextMessageWithPrefix( + const ::testing::Matcher& text_message_with_prefix) { + return Property("text_message_with_prefix", + &absl::LogEntry::text_message_with_prefix, + text_message_with_prefix); +} + +::testing::Matcher TextMessage( + const ::testing::Matcher& text_message) { + return Property("text_message", &absl::LogEntry::text_message, text_message); +} + +::testing::Matcher TextPrefix( + const ::testing::Matcher& text_prefix) { + return ResultOf( + [](const absl::LogEntry& entry) { + absl::string_view msg = entry.text_message_with_prefix(); + msg.remove_suffix(entry.text_message().size()); + return msg; + }, + text_prefix); +} + +::testing::Matcher Verbosity( + const ::testing::Matcher& verbosity) { + return Property("verbosity", &absl::LogEntry::verbosity, verbosity); +} + +::testing::Matcher Stacktrace( + const ::testing::Matcher& stacktrace) { + return Property("stacktrace", &absl::LogEntry::stacktrace, stacktrace); +} + +class MatchesOstreamImpl final + : public ::testing::MatcherInterface { + public: + explicit MatchesOstreamImpl(std::string expected) + : expected_(std::move(expected)) {} + bool MatchAndExplain(absl::string_view actual, + ::testing::MatchResultListener*) const override { + return actual == expected_; + } + void DescribeTo(std::ostream* os) const override { + *os << "matches the contents of the ostringstream, which are \"" + << expected_ << "\""; + } + + void DescribeNegationTo(std::ostream* os) const override { + *os << "does not match the contents of the ostringstream, which are \"" + << expected_ << "\""; + } + + private: + const std::string expected_; +}; +::testing::Matcher MatchesOstream( + const std::ostringstream& stream) { + return ::testing::MakeMatcher(new MatchesOstreamImpl(stream.str())); +} + +// We need to validate what is and isn't logged as the process dies due to +// `FATAL`, `QFATAL`, `CHECK`, etc., but assertions inside a death test +// subprocess don't directly affect the pass/fail status of the parent process. +// Instead, we use the mock actions `DeathTestExpectedLogging` and +// `DeathTestUnexpectedLogging` to write specific phrases to `stderr` that we +// can validate in the parent process using this matcher. +::testing::Matcher DeathTestValidateExpectations() { + if (log_internal::LoggingEnabledAt(absl::LogSeverity::kFatal)) { + return ::testing::Matcher(::testing::AllOf( + ::testing::HasSubstr("Mock received expected entry"), + Not(::testing::HasSubstr("Mock received unexpected entry")))); + } + // If `FATAL` logging is disabled, neither message should have been written. + return ::testing::Matcher(::testing::AllOf( + Not(::testing::HasSubstr("Mock received expected entry")), + Not(::testing::HasSubstr("Mock received unexpected entry")))); +} + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_matchers.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_matchers.h new file mode 100644 index 0000000000..c18cb6e901 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/test_matchers.h @@ -0,0 +1,88 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/test_matchers.h +// ----------------------------------------------------------------------------- +// +// This file declares Googletest's matchers used in the Abseil Logging library +// unit tests. + +#ifndef ABSL_LOG_INTERNAL_TEST_MATCHERS_H_ +#define ABSL_LOG_INTERNAL_TEST_MATCHERS_H_ + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/log_entry.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +// These matchers correspond to the components of `absl::LogEntry`. +::testing::Matcher SourceFilename( + const ::testing::Matcher& source_filename); +::testing::Matcher SourceBasename( + const ::testing::Matcher& source_basename); +// Be careful with this one; multi-line statements using `__LINE__` evaluate +// differently on different platforms. In particular, the MSVC implementation +// of `EXPECT_DEATH` returns the line number of the macro expansion to all lines +// within the code block that's expected to die. +::testing::Matcher SourceLine( + const ::testing::Matcher& source_line); +::testing::Matcher Prefix( + const ::testing::Matcher& prefix); +::testing::Matcher LogSeverity( + const ::testing::Matcher& log_severity); +::testing::Matcher Timestamp( + const ::testing::Matcher& timestamp); +// Matches if the `LogEntry`'s timestamp falls after the instantiation of this +// matcher and before its execution, as is normal when used with EXPECT_CALL. +::testing::Matcher TimestampInMatchWindow(); +::testing::Matcher ThreadID( + const ::testing::Matcher&); +::testing::Matcher TextMessageWithPrefixAndNewline( + const ::testing::Matcher& + text_message_with_prefix_and_newline); +::testing::Matcher TextMessageWithPrefix( + const ::testing::Matcher& text_message_with_prefix); +::testing::Matcher TextMessage( + const ::testing::Matcher& text_message); +::testing::Matcher TextPrefix( + const ::testing::Matcher& text_prefix); +::testing::Matcher Verbosity( + const ::testing::Matcher& verbosity); +::testing::Matcher Stacktrace( + const ::testing::Matcher& stacktrace); +// Behaves as `Eq(stream.str())`, but produces better failure messages. +::testing::Matcher MatchesOstream( + const std::ostringstream& stream); +::testing::Matcher DeathTestValidateExpectations(); + +#define ENCODED_MESSAGE(message_matcher) ::testing::_ + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_TEST_MATCHERS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/voidify.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/voidify.h new file mode 100644 index 0000000000..8f62da20a0 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/internal/voidify.h @@ -0,0 +1,44 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/internal/voidify.h +// ----------------------------------------------------------------------------- +// +// This class is used to explicitly ignore values in the conditional logging +// macros. This avoids compiler warnings like "value computed is not used" and +// "statement has no effect". + +#ifndef ABSL_LOG_INTERNAL_VOIDIFY_H_ +#define ABSL_LOG_INTERNAL_VOIDIFY_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +class Voidify final { + public: + // This has to be an operator with a precedence lower than << but higher than + // ?: + template + void operator&&(const T&) const&& {} +}; + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_INTERNAL_VOIDIFY_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log.h new file mode 100644 index 0000000000..4cd52041f1 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log.h @@ -0,0 +1,435 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/log.h +// ----------------------------------------------------------------------------- +// +// This header declares a family of LOG macros. +// +// Basic invocation looks like this: +// +// LOG(INFO) << "Found " << num_cookies << " cookies"; +// +// Most `LOG` macros take a severity level argument. The severity levels are +// `INFO`, `WARNING`, `ERROR`, and `FATAL`. They are defined +// in absl/base/log_severity.h. +// * The `FATAL` severity level terminates the program with a stack trace after +// logging its message. Error handlers registered with `RunOnFailure` +// (process_state.h) are run, but exit handlers registered with `atexit(3)` +// are not. +// * The `QFATAL` pseudo-severity level is equivalent to `FATAL` but triggers +// quieter termination messages, e.g. without a full stack trace, and skips +// running registered error handlers. +// Some preprocessor shenanigans are used to ensure that e.g. `LOG(INFO)` has +// the same meaning even if a local symbol or preprocessor macro named `INFO` is +// defined. To specify a severity level using an expression instead of a +// literal, use `LEVEL(expr)`. +// Example: +// +// LOG(LEVEL(stale ? absl::LogSeverity::kWarning : absl::LogSeverity::kInfo)) +// << "Cookies are " << days << " days old"; + +// `LOG` macros evaluate to an unterminated statement. The value at the end of +// the statement supports some chainable methods: +// +// * .AtLocation(absl::string_view file, int line) +// .AtLocation(absl::SourceLocation loc) +// Overrides the location inferred from the callsite. The string pointed to +// by `file` must be valid until the end of the statement. +// * .NoPrefix() +// Omits the prefix from this line. The prefix includes metadata about the +// logged data such as source code location and timestamp. +// * .WithTimestamp(absl::Time timestamp) +// Uses the specified timestamp instead of one collected at the time of +// execution. +// * .WithThreadID(absl::LogEntry::tid_t tid) +// Uses the specified thread ID instead of one collected at the time of +// execution. +// * .WithMetadataFrom(const absl::LogEntry &entry) +// Copies all metadata (but no data) from the specified `absl::LogEntry`. +// This can be used to change the severity of a message, but it has some +// limitations: +// * `ABSL_MIN_LOG_LEVEL` is evaluated against the severity passed into +// `LOG` (or the implicit `FATAL` level of `CHECK`). +// * `LOG(FATAL)` and `CHECK` terminate the process unconditionally, even if +// the severity is changed later. +// `.WithMetadataFrom(entry)` should almost always be used in combination +// with `LOG(LEVEL(entry.log_severity()))`. +// * .WithPerror() +// Appends to the logged message a colon, a space, a textual description of +// the current value of `errno` (as by `strerror(3)`), and the numerical +// value of `errno`. +// * .ToSinkAlso(absl::LogSink* sink) +// Sends this message to `*sink` in addition to whatever other sinks it +// would otherwise have been sent to. `sink` must not be null. +// * .ToSinkOnly(absl::LogSink* sink) +// Sends this message to `*sink` and no others. `sink` must not be null. +// +// No interfaces in this header are async-signal-safe; their use in signal +// handlers is unsupported and may deadlock your program or eat your lunch. +// +// Many logging statements are inherently conditional. For example, +// `LOG_IF(INFO, !foo)` does nothing if `foo` is true. Even seemingly +// unconditional statements like `LOG(INFO)` might be disabled at +// compile-time to minimize binary size or for security reasons. +// +// * Except for the condition in a `CHECK` or `QCHECK` statement, programs must +// not rely on evaluation of expressions anywhere in logging statements for +// correctness. For example, this is ok: +// +// CHECK((fp = fopen("config.ini", "r")) != nullptr); +// +// But this is probably not ok: +// +// LOG(INFO) << "Server status: " << StartServerAndReturnStatusString(); +// +// The example below is bad too; the `i++` in the `LOG_IF` condition might +// not be evaluated, resulting in an infinite loop: +// +// for (int i = 0; i < 1000000;) +// LOG_IF(INFO, i++ % 1000 == 0) << "Still working..."; +// +// * Except where otherwise noted, conditions which cause a statement not to log +// also cause expressions not to be evaluated. Programs may rely on this for +// performance reasons, e.g. by streaming the result of an expensive function +// call into a `DLOG` or `LOG_EVERY_N` statement. +// * Care has been taken to ensure that expressions are parsed by the compiler +// even if they are never evaluated. This means that syntax errors will be +// caught and variables will be considered used for the purposes of +// unused-variable diagnostics. For example, this statement won't compile +// even if `INFO`-level logging has been compiled out: +// +// int number_of_cakes = 40; +// LOG(INFO) << "Number of cakes: " << number_of_cake; // Note the typo! +// +// Similarly, this won't produce unused-variable compiler diagnostics even +// if `INFO`-level logging is compiled out: +// +// { +// char fox_line1[] = "Hatee-hatee-hatee-ho!"; +// LOG_IF(ERROR, false) << "The fox says " << fox_line1; +// char fox_line2[] = "A-oo-oo-oo-ooo!"; +// LOG(INFO) << "The fox also says " << fox_line2; +// } +// +// This error-checking is not perfect; for example, symbols that have been +// declared but not defined may not produce link errors if used in logging +// statements that compile away. +// +// Expressions streamed into these macros are formatted using `operator<<` just +// as they would be if streamed into a `std::ostream`, however it should be +// noted that their actual type is unspecified. +// +// To implement a custom formatting operator for a type you own, there are two +// options: `AbslStringify()` or `std::ostream& operator<<(std::ostream&, ...)`. +// It is recommended that users make their types loggable through +// `AbslStringify()` as it is a universal stringification extension that also +// enables `absl::StrFormat` and `absl::StrCat` support. If both +// `AbslStringify()` and `std::ostream& operator<<(std::ostream&, ...)` are +// defined, `AbslStringify()` will be used. +// +// To use the `AbslStringify()` API, define a friend function template in your +// type's namespace with the following signature: +// +// template +// void AbslStringify(Sink& sink, const UserDefinedType& value); +// +// `Sink` has the same interface as `absl::FormatSink`, but without +// `PutPaddedString()`. +// +// Example: +// +// struct Point { +// template +// friend void AbslStringify(Sink& sink, const Point& p) { +// absl::Format(&sink, "(%v, %v)", p.x, p.y); +// } +// +// int x; +// int y; +// }; +// +// To use `std::ostream& operator<<(std::ostream&, ...)`, define +// `std::ostream& operator<<(std::ostream&, ...)` in your type's namespace (for +// ADL) just as you would to stream it to `std::cout`. +// +// Currently `AbslStringify()` ignores output manipulators but this is not +// guaranteed behavior and may be subject to change in the future. If you would +// like guaranteed behavior regarding output manipulators, please use +// `std::ostream& operator<<(std::ostream&, ...)` to make custom types loggable +// instead. +// +// Those macros that support streaming honor output manipulators and `fmtflag` +// changes that output data (e.g. `std::ends`) or control formatting of data +// (e.g. `std::hex` and `std::fixed`), however flushing such a stream is +// ignored. The message produced by a log statement is sent to registered +// `absl::LogSink` instances at the end of the statement; those sinks are +// responsible for their own flushing (e.g. to disk) semantics. +// +// Flag settings are not carried over from one `LOG` statement to the next; this +// is a bit different than e.g. `std::cout`: +// +// LOG(INFO) << std::hex << 0xdeadbeef; // logs "0xdeadbeef" +// LOG(INFO) << 0xdeadbeef; // logs "3735928559" + +#ifndef ABSL_LOG_LOG_H_ +#define ABSL_LOG_LOG_H_ + +#include "absl/log/internal/conditions.h" +#include "absl/log/internal/log_message.h" +#include "absl/log/internal/strip.h" + +// LOG() +// +// `LOG` takes a single argument which is a severity level. Data streamed in +// comprise the logged message. +// Example: +// +// LOG(INFO) << "Found " << num_cookies << " cookies"; +#define LOG(severity) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATELESS, true) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +// PLOG() +// +// `PLOG` behaves like `LOG` except that a description of the current state of +// `errno` is appended to the streamed message. +#define PLOG(severity) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATELESS, true) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +// DLOG() +// +// `DLOG` behaves like `LOG` in debug mode (i.e. `#ifndef NDEBUG`). Otherwise +// it compiles away and does nothing. Note that `DLOG(FATAL)` does not +// terminate the program if `NDEBUG` is defined. +#ifndef NDEBUG +#define DLOG(severity) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATELESS, true) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() +#else +#define DLOG(severity) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATELESS, false) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() +#endif + +// `LOG_IF` and friends add a second argument which specifies a condition. If +// the condition is false, nothing is logged. +// Example: +// +// LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies"; +#define LOG_IF(severity, condition) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATELESS, condition) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() +#define PLOG_IF(severity, condition) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATELESS, condition) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#ifndef NDEBUG +#define DLOG_IF(severity, condition) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATELESS, condition) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() +#else +#define DLOG_IF(severity, condition) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATELESS, false && (condition)) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() +#endif + +// LOG_EVERY_N +// +// An instance of `LOG_EVERY_N` increments a hidden zero-initialized counter +// every time execution passes through it and logs the specified message when +// the counter's value is a multiple of `n`, doing nothing otherwise. Each +// instance has its own counter. The counter's value can be logged by streaming +// the symbol `COUNTER`. `LOG_EVERY_N` is thread-safe. +// Example: +// +// LOG_EVERY_N(WARNING, 1000) << "Got a packet with a bad CRC (" << COUNTER +// << " total)"; +#define LOG_EVERY_N(severity, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, true)(EveryN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +// LOG_FIRST_N +// +// `LOG_FIRST_N` behaves like `LOG_EVERY_N` except that the specified message is +// logged when the counter's value is less than `n`. `LOG_FIRST_N` is +// thread-safe. +#define LOG_FIRST_N(severity, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, true)(FirstN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +// LOG_EVERY_POW_2 +// +// `LOG_EVERY_POW_2` behaves like `LOG_EVERY_N` except that the specified +// message is logged when the counter's value is a power of 2. +// `LOG_EVERY_POW_2` is thread-safe. +#define LOG_EVERY_POW_2(severity) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, true)(EveryPow2) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +// LOG_EVERY_N_SEC +// +// An instance of `LOG_EVERY_N_SEC` uses a hidden state variable to log the +// specified message at most once every `n_seconds`. A hidden counter of +// executions (whether a message is logged or not) is also maintained and can be +// logged by streaming the symbol `COUNTER`. `LOG_EVERY_N_SEC` is thread-safe. +// Example: +// +// LOG_EVERY_N_SEC(INFO, 2.5) << "Got " << COUNTER << " cookies so far"; +#define LOG_EVERY_N_SEC(severity, n_seconds) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, true)(EveryNSec, n_seconds) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define PLOG_EVERY_N(severity, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, true)(EveryN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#define PLOG_FIRST_N(severity, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, true)(FirstN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#define PLOG_EVERY_POW_2(severity) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, true)(EveryPow2) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#define PLOG_EVERY_N_SEC(severity, n_seconds) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, true)(EveryNSec, n_seconds) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#ifndef NDEBUG +#define DLOG_EVERY_N(severity, n) \ + ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \ + (EveryN, n) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_FIRST_N(severity, n) \ + ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \ + (FirstN, n) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_EVERY_POW_2(severity) \ + ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \ + (EveryPow2) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_EVERY_N_SEC(severity, n_seconds) \ + ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \ + (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#else // def NDEBUG +#define DLOG_EVERY_N(severity, n) \ + ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \ + (EveryN, n) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_FIRST_N(severity, n) \ + ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \ + (FirstN, n) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_EVERY_POW_2(severity) \ + ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \ + (EveryPow2) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_EVERY_N_SEC(severity, n_seconds) \ + ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \ + (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() +#endif // def NDEBUG + +// `LOG_IF_EVERY_N` and friends behave as the corresponding `LOG_EVERY_N` +// but neither increment a counter nor log a message if condition is false (as +// `LOG_IF`). +// Example: +// +// LOG_IF_EVERY_N(INFO, (size > 1024), 10) << "Got the " << COUNTER +// << "th big cookie"; +#define LOG_IF_EVERY_N(severity, condition, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define LOG_IF_FIRST_N(severity, condition, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(FirstN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define LOG_IF_EVERY_POW_2(severity, condition) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryPow2) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define LOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryNSec, \ + n_seconds) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define PLOG_IF_EVERY_N(severity, condition, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#define PLOG_IF_FIRST_N(severity, condition, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(FirstN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#define PLOG_IF_EVERY_POW_2(severity, condition) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryPow2) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#define PLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryNSec, \ + n_seconds) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() \ + .WithPerror() + +#ifndef NDEBUG +#define DLOG_IF_EVERY_N(severity, condition, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_IF_FIRST_N(severity, condition, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(FirstN, n) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_IF_EVERY_POW_2(severity, condition) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryPow2) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, condition)(EveryNSec, \ + n_seconds) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#else // def NDEBUG +#define DLOG_IF_EVERY_N(severity, condition, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, false && (condition))( \ + EveryN, n) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_IF_FIRST_N(severity, condition, n) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, false && (condition))( \ + FirstN, n) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_IF_EVERY_POW_2(severity, condition) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, false && (condition))( \ + EveryPow2) ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() + +#define DLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \ + ABSL_LOG_INTERNAL_CONDITION_##severity(STATEFUL, false && (condition))( \ + EveryNSec, n_seconds) \ + ABSL_LOGGING_INTERNAL_LOG_##severity.InternalStream() +#endif // def NDEBUG + +#endif // ABSL_LOG_LOG_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_benchmark.cc new file mode 100644 index 0000000000..45d9a5d6fd --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_benchmark.cc @@ -0,0 +1,97 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/attributes.h" +#include "absl/base/log_severity.h" +#include "absl/flags/flag.h" +#include "absl/log/check.h" +#include "absl/log/globals.h" +#include "absl/log/log.h" +#include "absl/log/log_entry.h" +#include "absl/log/log_sink.h" +#include "absl/log/log_sink_registry.h" +#include "benchmark/benchmark.h" + +namespace { + +class NullLogSink : public absl::LogSink { + public: + NullLogSink() { absl::AddLogSink(this); } + + ~NullLogSink() override { absl::RemoveLogSink(this); } + + void Send(const absl::LogEntry&) override {} +}; + +constexpr int x = -1; + +void BM_SuccessfulBinaryCheck(benchmark::State& state) { + int n = 0; + while (state.KeepRunningBatch(8)) { + CHECK_GE(n, x); + CHECK_GE(n, x); + CHECK_GE(n, x); + CHECK_GE(n, x); + CHECK_GE(n, x); + CHECK_GE(n, x); + CHECK_GE(n, x); + CHECK_GE(n, x); + ++n; + } + benchmark::DoNotOptimize(n); +} +BENCHMARK(BM_SuccessfulBinaryCheck); + +static void BM_SuccessfulUnaryCheck(benchmark::State& state) { + int n = 0; + while (state.KeepRunningBatch(8)) { + CHECK(n >= x); + CHECK(n >= x); + CHECK(n >= x); + CHECK(n >= x); + CHECK(n >= x); + CHECK(n >= x); + CHECK(n >= x); + CHECK(n >= x); + ++n; + } + benchmark::DoNotOptimize(n); +} +BENCHMARK(BM_SuccessfulUnaryCheck); + +static void BM_DisabledLogOverhead(benchmark::State& state) { + absl::ScopedStderrThreshold disable_stderr_logging( + absl::LogSeverityAtLeast::kInfinity); + absl::log_internal::ScopedMinLogLevel scoped_min_log_level( + absl::LogSeverityAtLeast::kInfinity); + for (auto _ : state) { + LOG(INFO); + } +} +BENCHMARK(BM_DisabledLogOverhead); + +static void BM_EnabledLogOverhead(benchmark::State& state) { + absl::ScopedStderrThreshold stderr_logging( + absl::LogSeverityAtLeast::kInfinity); + absl::log_internal::ScopedMinLogLevel scoped_min_log_level( + absl::LogSeverityAtLeast::kInfo); + ABSL_ATTRIBUTE_UNUSED NullLogSink null_sink; + for (auto _ : state) { + LOG(INFO); + } +} +BENCHMARK(BM_EnabledLogOverhead); + +} // namespace + diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check_disable.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry.cc similarity index 63% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check_disable.cc rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry.cc index 924d6e3d54..19c3b3f1be 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/debugging/leak_check_disable.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry.cc @@ -1,4 +1,5 @@ -// Copyright 2017 The Abseil Authors. +// +// Copyright 2022 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,9 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Disable LeakSanitizer when this file is linked in. -// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h -extern "C" int __lsan_is_turned_off(); -extern "C" int __lsan_is_turned_off() { - return 1; -} +#include "absl/log/log_entry.h" + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr int LogEntry::kNoVerbosityLevel; +constexpr int LogEntry::kNoVerboseLevel; +#endif + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry.h new file mode 100644 index 0000000000..9e4ae8eb76 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry.h @@ -0,0 +1,220 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/log_entry.h +// ----------------------------------------------------------------------------- +// +// This header declares `class absl::LogEntry`, which represents a log record as +// passed to `LogSink::Send`. Data returned by pointer or by reference or by +// `absl::string_view` must be copied if they are needed after the lifetime of +// the `absl::LogEntry`. + +#ifndef ABSL_LOG_LOG_ENTRY_H_ +#define ABSL_LOG_LOG_ENTRY_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/internal/config.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace log_internal { +// Test only friend. +class LogEntryTestPeer; +class LogMessage; +} // namespace log_internal + +// LogEntry +// +// Represents a single entry in a log, i.e., one `LOG` statement or failed +// `CHECK`. +// +// `LogEntry` is thread-compatible. +class LogEntry final { + public: + using tid_t = log_internal::Tid; + + // For non-verbose log entries, `verbosity()` returns `kNoVerbosityLevel`. + static constexpr int kNoVerbosityLevel = -1; + static constexpr int kNoVerboseLevel = -1; // TO BE removed + + // Pass `LogEntry` by reference, and do not store it as its state does not + // outlive the call to `LogSink::Send()`. + LogEntry(const LogEntry&) = delete; + LogEntry& operator=(const LogEntry&) = delete; + + // Source file and line where the log message occurred. Taken from `__FILE__` + // and `__LINE__` unless overridden by `LOG(...).AtLocation(...)`. + // + // Take special care not to use the values returned by `source_filename()` and + // `source_basename()` after the lifetime of the entry. This is always + // incorrect, but it will often work in practice because they usually point + // into a statically allocated character array obtained from `__FILE__`. + // Statements like `LOG(INFO).AtLocation(std::string(...), ...)` will expose + // the bug. If you need the data later, you must copy them. + absl::string_view source_filename() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return full_filename_; + } + absl::string_view source_basename() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return base_filename_; + } + int source_line() const { return line_; } + + // LogEntry::prefix() + // + // True unless the metadata prefix was suppressed once by + // `LOG(...).NoPrefix()` or globally by `absl::EnableLogPrefix(false)`. + // Implies `text_message_with_prefix() == text_message()`. + bool prefix() const { return prefix_; } + + // LogEntry::log_severity() + // + // Returns this entry's severity. For `LOG`, taken from the first argument; + // for `CHECK`, always `absl::LogSeverity::kFatal`. + absl::LogSeverity log_severity() const { return severity_; } + + // LogEntry::verbosity() + // + // Returns this entry's verbosity, or `kNoVerbosityLevel` for a non-verbose + // entry. Verbosity control is not available outside of Google yet. + int verbosity() const { return verbose_level_; } + + // LogEntry::timestamp() + // + // Returns the time at which this entry was written. Captured during + // evaluation of `LOG`, but can be overridden by + // `LOG(...).WithTimestamp(...)`. + // + // Take care not to rely on timestamps increasing monotonically, or even to + // rely on timestamps having any particular relationship with reality (since + // they can be overridden). + absl::Time timestamp() const { return timestamp_; } + + // LogEntry::tid() + // + // Returns the ID of the thread that wrote this entry. Captured during + // evaluation of `LOG`, but can be overridden by `LOG(...).WithThreadID(...)`. + // + // Take care not to *rely* on reported thread IDs as they can be overridden as + // specified above. + tid_t tid() const { return tid_; } + + // Text-formatted version of the log message. An underlying buffer holds + // these contiguous data: + // + // * A prefix formed by formatting metadata (timestamp, filename, line number, + // etc.) + // The prefix may be empty - see `LogEntry::prefix()` - and may rarely be + // truncated if the metadata are very long. + // * The streamed data + // The data may be empty if nothing was streamed, or may be truncated to fit + // the buffer. + // * A newline + // * A nul terminator + // + // The newline and nul terminator will be present even if the prefix and/or + // data are truncated. + // + // These methods give access to the most commonly useful substrings of the + // buffer's contents. Other combinations can be obtained with substring + // arithmetic. + // + // The buffer does not outlive the entry; if you need the data later, you must + // copy them. + absl::string_view text_message_with_prefix_and_newline() const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return absl::string_view( + text_message_with_prefix_and_newline_and_nul_.data(), + text_message_with_prefix_and_newline_and_nul_.size() - 1); + } + absl::string_view text_message_with_prefix() const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return absl::string_view( + text_message_with_prefix_and_newline_and_nul_.data(), + text_message_with_prefix_and_newline_and_nul_.size() - 2); + } + absl::string_view text_message_with_newline() const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return absl::string_view( + text_message_with_prefix_and_newline_and_nul_.data() + prefix_len_, + text_message_with_prefix_and_newline_and_nul_.size() - prefix_len_ - 1); + } + absl::string_view text_message() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return absl::string_view( + text_message_with_prefix_and_newline_and_nul_.data() + prefix_len_, + text_message_with_prefix_and_newline_and_nul_.size() - prefix_len_ - 2); + } + const char* text_message_with_prefix_and_newline_c_str() const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return text_message_with_prefix_and_newline_and_nul_.data(); + } + + // Returns a serialized protobuf holding the operands streamed into this + // log message. The message definition is not yet published. + // + // The buffer does not outlive the entry; if you need the data later, you must + // copy them. + absl::string_view encoded_message() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return encoding_; + } + + // LogEntry::stacktrace() + // + // Optional stacktrace, e.g. for `FATAL` logs and failed `CHECK`s. + // + // Fatal entries are dispatched to each sink twice: first with all data and + // metadata but no stacktrace, and then with the stacktrace. This is done + // because stacktrace collection is sometimes slow and fallible, and it's + // critical to log enough information to diagnose the failure even if the + // stacktrace collection hangs. + // + // The buffer does not outlive the entry; if you need the data later, you must + // copy them. + absl::string_view stacktrace() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return stacktrace_; + } + + private: + LogEntry() = default; + + absl::string_view full_filename_; + absl::string_view base_filename_; + int line_; + bool prefix_; + absl::LogSeverity severity_; + int verbose_level_; // >=0 for `VLOG`, etc.; otherwise `kNoVerbosityLevel`. + absl::Time timestamp_; + tid_t tid_; + absl::Span text_message_with_prefix_and_newline_and_nul_; + size_t prefix_len_; + absl::string_view encoding_; + std::string stacktrace_; + + friend class log_internal::LogEntryTestPeer; + friend class log_internal::LogMessage; +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_LOG_ENTRY_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry_test.cc new file mode 100644 index 0000000000..d9bfa1f49d --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_entry_test.cc @@ -0,0 +1,468 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/log_entry.h" + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/internal/append_truncated.h" +#include "absl/log/internal/log_format.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" +#include "absl/time/civil_time.h" +#include "absl/time/time.h" +#include "absl/types/span.h" + +namespace { +using ::absl::log_internal::LogEntryTestPeer; +using ::testing::Eq; +using ::testing::IsTrue; +using ::testing::StartsWith; +using ::testing::StrEq; + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); +} // namespace + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace log_internal { + +class LogEntryTestPeer { + public: + LogEntryTestPeer(absl::string_view base_filename, int line, bool prefix, + absl::LogSeverity severity, absl::string_view timestamp, + absl::LogEntry::tid_t tid, PrefixFormat format, + absl::string_view text_message) + : format_{format}, buf_(15000, '\0') { + entry_.base_filename_ = base_filename; + entry_.line_ = line; + entry_.prefix_ = prefix; + entry_.severity_ = severity; + std::string time_err; + EXPECT_THAT( + absl::ParseTime("%Y-%m-%d%ET%H:%M:%E*S", timestamp, + absl::LocalTimeZone(), &entry_.timestamp_, &time_err), + IsTrue()) + << "Failed to parse time " << timestamp << ": " << time_err; + entry_.tid_ = tid; + std::pair timestamp_bits = + absl::StrSplit(timestamp, absl::ByChar('.')); + EXPECT_THAT(absl::ParseCivilTime(timestamp_bits.first, &ci_.cs), IsTrue()) + << "Failed to parse time " << timestamp_bits.first; + timestamp_bits.second.resize(9, '0'); + int64_t nanos = 0; + EXPECT_THAT(absl::SimpleAtoi(timestamp_bits.second, &nanos), IsTrue()) + << "Failed to parse time " << timestamp_bits.first; + ci_.subsecond = absl::Nanoseconds(nanos); + + absl::Span view = absl::MakeSpan(buf_); + view.remove_suffix(2); + entry_.prefix_len_ = + entry_.prefix_ + ? log_internal::FormatLogPrefix( + entry_.log_severity(), entry_.timestamp(), entry_.tid(), + entry_.source_basename(), entry_.source_line(), format_, view) + : 0; + + EXPECT_THAT(entry_.prefix_len_, + Eq(static_cast(view.data() - buf_.data()))); + log_internal::AppendTruncated(text_message, view); + view = absl::Span(view.data(), view.size() + 2); + view[0] = '\n'; + view[1] = '\0'; + view.remove_prefix(2); + buf_.resize(static_cast(view.data() - buf_.data())); + entry_.text_message_with_prefix_and_newline_and_nul_ = absl::MakeSpan(buf_); + } + LogEntryTestPeer(const LogEntryTestPeer&) = delete; + LogEntryTestPeer& operator=(const LogEntryTestPeer&) = delete; + + std::string FormatLogMessage() const { + return log_internal::FormatLogMessage( + entry_.log_severity(), ci_.cs, ci_.subsecond, entry_.tid(), + entry_.source_basename(), entry_.source_line(), format_, + entry_.text_message()); + } + std::string FormatPrefixIntoSizedBuffer(size_t sz) { + std::string str(sz, '\0'); + absl::Span buf(&str[0], str.size()); + const size_t prefix_size = log_internal::FormatLogPrefix( + entry_.log_severity(), entry_.timestamp(), entry_.tid(), + entry_.source_basename(), entry_.source_line(), format_, buf); + EXPECT_THAT(prefix_size, Eq(static_cast(buf.data() - str.data()))); + str.resize(prefix_size); + return str; + } + const absl::LogEntry& entry() const { return entry_; } + + private: + absl::LogEntry entry_; + PrefixFormat format_; + absl::TimeZone::CivilInfo ci_; + std::vector buf_; +}; + +} // namespace log_internal +ABSL_NAMESPACE_END +} // namespace absl + +namespace { +constexpr bool kUsePrefix = true, kNoPrefix = false; + +TEST(LogEntryTest, Baseline) { + LogEntryTestPeer entry("foo.cc", 1234, kUsePrefix, absl::LogSeverity::kInfo, + "2020-01-02T03:04:05.6789", 451, + absl::log_internal::PrefixFormat::kNotRaw, + "hello world"); + EXPECT_THAT(entry.FormatLogMessage(), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world")); + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] ")); + for (size_t sz = strlen("I0102 03:04:05.678900 451 foo.cc:1234] ") + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] ", + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("I0102 03:04:05.678900 451 foo.cc:1234] hello world\n")); + EXPECT_THAT(entry.entry().text_message_with_prefix(), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world")); + EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); +} + +TEST(LogEntryTest, NoPrefix) { + LogEntryTestPeer entry("foo.cc", 1234, kNoPrefix, absl::LogSeverity::kInfo, + "2020-01-02T03:04:05.6789", 451, + absl::log_internal::PrefixFormat::kNotRaw, + "hello world"); + EXPECT_THAT(entry.FormatLogMessage(), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world")); + // These methods are not responsible for honoring `prefix()`. + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] ")); + for (size_t sz = strlen("I0102 03:04:05.678900 451 foo.cc:1234] ") + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] ", + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(), + Eq("hello world\n")); + EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("hello world\n")); + EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("hello world")); + EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); +} + +TEST(LogEntryTest, EmptyFields) { + LogEntryTestPeer entry("", 0, kUsePrefix, absl::LogSeverity::kInfo, + "2020-01-02T03:04:05", 0, + absl::log_internal::PrefixFormat::kNotRaw, ""); + const std::string format_message = entry.FormatLogMessage(); + EXPECT_THAT(format_message, Eq("I0102 03:04:05.000000 0 :0] ")); + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq(format_message)); + for (size_t sz = format_message.size() + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT(format_message, + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(), + Eq("I0102 03:04:05.000000 0 :0] \n")); + EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("I0102 03:04:05.000000 0 :0] \n")); + EXPECT_THAT(entry.entry().text_message_with_prefix(), + Eq("I0102 03:04:05.000000 0 :0] ")); + EXPECT_THAT(entry.entry().text_message(), Eq("")); +} + +TEST(LogEntryTest, NegativeFields) { + // When Abseil's minimum C++ version is C++17, this conditional can be + // converted to a constexpr if and the static_cast below removed. + if (std::is_signed::value) { + LogEntryTestPeer entry( + "foo.cc", -1234, kUsePrefix, absl::LogSeverity::kInfo, + "2020-01-02T03:04:05.6789", static_cast(-451), + absl::log_internal::PrefixFormat::kNotRaw, "hello world"); + EXPECT_THAT(entry.FormatLogMessage(), + Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world")); + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), + Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] ")); + for (size_t sz = + strlen("I0102 03:04:05.678900 -451 foo.cc:-1234] ") + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT("I0102 03:04:05.678900 -451 foo.cc:-1234] ", + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline(), + Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\n")); + EXPECT_THAT(entry.entry().text_message_with_prefix(), + Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world")); + EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); + } else { + LogEntryTestPeer entry("foo.cc", -1234, kUsePrefix, + absl::LogSeverity::kInfo, "2020-01-02T03:04:05.6789", + 451, absl::log_internal::PrefixFormat::kNotRaw, + "hello world"); + EXPECT_THAT(entry.FormatLogMessage(), + Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world")); + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), + Eq("I0102 03:04:05.678900 451 foo.cc:-1234] ")); + for (size_t sz = + strlen("I0102 03:04:05.678900 451 foo.cc:-1234] ") + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:-1234] ", + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline(), + Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world\n")); + EXPECT_THAT(entry.entry().text_message_with_prefix(), + Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world")); + EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); + } +} + +TEST(LogEntryTest, LongFields) { + LogEntryTestPeer entry( + "I am the very model of a modern Major-General / " + "I've information vegetable, animal, and mineral.", + 2147483647, kUsePrefix, absl::LogSeverity::kInfo, + "2020-01-02T03:04:05.678967896789", 2147483647, + absl::log_internal::PrefixFormat::kNotRaw, + "I know the kings of England, and I quote the fights historical / " + "From Marathon to Waterloo, in order categorical."); + EXPECT_THAT(entry.FormatLogMessage(), + Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.")); + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), + Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:2147483647] ")); + for (size_t sz = + strlen("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:2147483647] ") + + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT( + "I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:2147483647] ", + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(), + Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.\n")); + EXPECT_THAT(entry.entry().text_message_with_prefix(), + Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.")); + EXPECT_THAT( + entry.entry().text_message(), + Eq("I know the kings of England, and I quote the fights historical / " + "From Marathon to Waterloo, in order categorical.")); +} + +TEST(LogEntryTest, LongNegativeFields) { + // When Abseil's minimum C++ version is C++17, this conditional can be + // converted to a constexpr if and the static_cast below removed. + if (std::is_signed::value) { + LogEntryTestPeer entry( + "I am the very model of a modern Major-General / " + "I've information vegetable, animal, and mineral.", + -2147483647, kUsePrefix, absl::LogSeverity::kInfo, + "2020-01-02T03:04:05.678967896789", + static_cast(-2147483647), + absl::log_internal::PrefixFormat::kNotRaw, + "I know the kings of England, and I quote the fights historical / " + "From Marathon to Waterloo, in order categorical."); + EXPECT_THAT( + entry.FormatLogMessage(), + Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.")); + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), + Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] ")); + for (size_t sz = + strlen( + "I0102 03:04:05.678967 -2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] ") + + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT( + "I0102 03:04:05.678967 -2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] ", + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline(), + Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("I0102 03:04:05.678967 -2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix(), + Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.")); + EXPECT_THAT( + entry.entry().text_message(), + Eq("I know the kings of England, and I quote the fights historical / " + "From Marathon to Waterloo, in order categorical.")); + } else { + LogEntryTestPeer entry( + "I am the very model of a modern Major-General / " + "I've information vegetable, animal, and mineral.", + -2147483647, kUsePrefix, absl::LogSeverity::kInfo, + "2020-01-02T03:04:05.678967896789", 2147483647, + absl::log_internal::PrefixFormat::kNotRaw, + "I know the kings of England, and I quote the fights historical / " + "From Marathon to Waterloo, in order categorical."); + EXPECT_THAT( + entry.FormatLogMessage(), + Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.")); + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), + Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] ")); + for (size_t sz = + strlen( + "I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] ") + + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT( + "I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] ", + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline(), + Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix(), + Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " + "modern Major-General / I've information vegetable, animal, " + "and mineral.:-2147483647] I know the kings of England, and I " + "quote the fights historical / From Marathon to Waterloo, in " + "order categorical.")); + EXPECT_THAT( + entry.entry().text_message(), + Eq("I know the kings of England, and I quote the fights historical / " + "From Marathon to Waterloo, in order categorical.")); + } +} + +TEST(LogEntryTest, Raw) { + LogEntryTestPeer entry("foo.cc", 1234, kUsePrefix, absl::LogSeverity::kInfo, + "2020-01-02T03:04:05.6789", 451, + absl::log_internal::PrefixFormat::kRaw, "hello world"); + EXPECT_THAT( + entry.FormatLogMessage(), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world")); + EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: ")); + for (size_t sz = + strlen("I0102 03:04:05.678900 451 foo.cc:1234] RAW: ") + 20; + sz != std::numeric_limits::max(); sz--) + EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] RAW: ", + StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); + + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline(), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix_and_newline_c_str(), + StrEq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\n")); + EXPECT_THAT( + entry.entry().text_message_with_prefix(), + Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world")); + EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_format_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_format_test.cc new file mode 100644 index 0000000000..cf087ca859 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_format_test.cc @@ -0,0 +1,1678 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __ANDROID__ +#include +#endif +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/log/internal/test_matchers.h" +#include "absl/log/log.h" +#include "absl/log/scoped_mock_log.h" +#include "absl/strings/match.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" + +namespace { +using ::absl::log_internal::MatchesOstream; +using ::absl::log_internal::TextMessage; +using ::absl::log_internal::TextPrefix; + +using ::testing::AllOf; +using ::testing::AnyOf; +using ::testing::Each; +using ::testing::ElementsAre; +using ::testing::Eq; +using ::testing::Ge; +using ::testing::IsEmpty; +using ::testing::Le; +using ::testing::ResultOf; +using ::testing::SizeIs; +using ::testing::Truly; +using ::testing::Types; + +// Some aspects of formatting streamed data (e.g. pointer handling) are +// implementation-defined. Others are buggy in supported implementations. +// These tests validate that the formatting matches that performed by a +// `std::ostream` and also that the result is one of a list of expected formats. + +std::ostringstream ComparisonStream() { + std::ostringstream str; + str.setf(std::ios_base::showbase | std::ios_base::boolalpha | + std::ios_base::internal); + return str; +} + +TEST(LogFormatTest, NoMessage) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int log_line = __LINE__ + 1; + auto do_log = [] { LOG(INFO); }; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(MatchesOstream(ComparisonStream())), + TextPrefix(Truly([=](absl::string_view msg) { + return absl::EndsWith( + msg, absl::StrCat(" log_format_test.cc:", log_line, "] ")); + })), + TextMessage(IsEmpty()), ENCODED_MESSAGE(EqualsProto(R"pb()pb"))))); + + test_sink.StartCapturingLogs(); + do_log(); +} + +template +class CharLogFormatTest : public testing::Test {}; +using CharTypes = Types; +TYPED_TEST_SUITE(CharLogFormatTest, CharTypes); + +TYPED_TEST(CharLogFormatTest, Printable) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = 'x'; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("x")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "x" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(CharLogFormatTest, Unprintable) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + constexpr auto value = static_cast(0xeeu); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("\xee")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "\xee" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +template +class UnsignedIntLogFormatTest : public testing::Test {}; +using UnsignedIntTypes = Types; // NOLINT +TYPED_TEST_SUITE(UnsignedIntLogFormatTest, UnsignedIntTypes); + +TYPED_TEST(UnsignedIntLogFormatTest, Positive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = 224; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("224")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(UnsignedIntLogFormatTest, BitfieldPositive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const struct { + TypeParam bits : 6; + } value{42}; + auto comparison_stream = ComparisonStream(); + comparison_stream << value.bits; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("42")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "42" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value.bits; +} + +template +class SignedIntLogFormatTest : public testing::Test {}; +using SignedIntTypes = + Types; // NOLINT +TYPED_TEST_SUITE(SignedIntLogFormatTest, SignedIntTypes); + +TYPED_TEST(SignedIntLogFormatTest, Positive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = 224; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("224")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(SignedIntLogFormatTest, Negative) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = -112; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("-112")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "-112" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(SignedIntLogFormatTest, BitfieldPositive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const struct { + TypeParam bits : 6; + } value{21}; + auto comparison_stream = ComparisonStream(); + comparison_stream << value.bits; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("21")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "21" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value.bits; +} + +TYPED_TEST(SignedIntLogFormatTest, BitfieldNegative) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const struct { + TypeParam bits : 6; + } value{-21}; + auto comparison_stream = ComparisonStream(); + comparison_stream << value.bits; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("-21")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "-21" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value.bits; +} + +// Ignore these test cases on GCC due to "is too small to hold all values ..." +// warning. +#if !defined(__GNUC__) || defined(__clang__) +// The implementation may choose a signed or unsigned integer type to represent +// this enum, so it may be tested by either `UnsignedEnumLogFormatTest` or +// `SignedEnumLogFormatTest`. +enum MyUnsignedEnum { + MyUnsignedEnum_ZERO = 0, + MyUnsignedEnum_FORTY_TWO = 42, + MyUnsignedEnum_TWO_HUNDRED_TWENTY_FOUR = 224, +}; +enum MyUnsignedIntEnum : unsigned int { + MyUnsignedIntEnum_ZERO = 0, + MyUnsignedIntEnum_FORTY_TWO = 42, + MyUnsignedIntEnum_TWO_HUNDRED_TWENTY_FOUR = 224, +}; + +template +class UnsignedEnumLogFormatTest : public testing::Test {}; +using UnsignedEnumTypes = std::conditional< + std::is_signed::type>::value, + Types, Types>::type; +TYPED_TEST_SUITE(UnsignedEnumLogFormatTest, UnsignedEnumTypes); + +TYPED_TEST(UnsignedEnumLogFormatTest, Positive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = static_cast(224); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("224")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(UnsignedEnumLogFormatTest, BitfieldPositive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const struct { + TypeParam bits : 6; + } value{static_cast(42)}; + auto comparison_stream = ComparisonStream(); + comparison_stream << value.bits; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("42")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "42" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value.bits; +} + +enum MySignedEnum { + MySignedEnum_NEGATIVE_ONE_HUNDRED_TWELVE = -112, + MySignedEnum_NEGATIVE_TWENTY_ONE = -21, + MySignedEnum_ZERO = 0, + MySignedEnum_TWENTY_ONE = 21, + MySignedEnum_TWO_HUNDRED_TWENTY_FOUR = 224, +}; +enum MySignedIntEnum : signed int { + MySignedIntEnum_NEGATIVE_ONE_HUNDRED_TWELVE = -112, + MySignedIntEnum_NEGATIVE_TWENTY_ONE = -21, + MySignedIntEnum_ZERO = 0, + MySignedIntEnum_TWENTY_ONE = 21, + MySignedIntEnum_TWO_HUNDRED_TWENTY_FOUR = 224, +}; + +template +class SignedEnumLogFormatTest : public testing::Test {}; +using SignedEnumTypes = std::conditional< + std::is_signed::type>::value, + Types, + Types>::type; +TYPED_TEST_SUITE(SignedEnumLogFormatTest, SignedEnumTypes); + +TYPED_TEST(SignedEnumLogFormatTest, Positive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = static_cast(224); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("224")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(SignedEnumLogFormatTest, Negative) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = static_cast(-112); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("-112")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "-112" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(SignedEnumLogFormatTest, BitfieldPositive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const struct { + TypeParam bits : 6; + } value{static_cast(21)}; + auto comparison_stream = ComparisonStream(); + comparison_stream << value.bits; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("21")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "21" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value.bits; +} + +TYPED_TEST(SignedEnumLogFormatTest, BitfieldNegative) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const struct { + TypeParam bits : 6; + } value{static_cast(-21)}; + auto comparison_stream = ComparisonStream(); + comparison_stream << value.bits; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("-21")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "-21" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value.bits; +} +#endif + +TEST(FloatLogFormatTest, Positive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const float value = 6.02e23f; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("6.02e+23")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "6.02e+23" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(FloatLogFormatTest, Negative) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const float value = -6.02e23f; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("-6.02e+23")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "-6.02e+23" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(FloatLogFormatTest, NegativeExponent) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const float value = 6.02e-23f; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("6.02e-23")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "6.02e-23" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(DoubleLogFormatTest, Positive) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 6.02e23; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("6.02e+23")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "6.02e+23" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(DoubleLogFormatTest, Negative) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = -6.02e23; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("-6.02e+23")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "-6.02e+23" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(DoubleLogFormatTest, NegativeExponent) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 6.02e-23; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("6.02e-23")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "6.02e-23" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +template +class FloatingPointLogFormatTest : public testing::Test {}; +using FloatingPointTypes = Types; +TYPED_TEST_SUITE(FloatingPointLogFormatTest, FloatingPointTypes); + +TYPED_TEST(FloatingPointLogFormatTest, Zero) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = 0.0; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("0")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "0" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(FloatingPointLogFormatTest, Integer) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = 1.0; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("1")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "1" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(FloatingPointLogFormatTest, Infinity) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = std::numeric_limits::infinity(); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(AnyOf(Eq("inf"), Eq("Inf"))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "inf" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(FloatingPointLogFormatTest, NegativeInfinity) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = -std::numeric_limits::infinity(); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(AnyOf(Eq("-inf"), Eq("-Inf"))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "-inf" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(FloatingPointLogFormatTest, NaN) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = std::numeric_limits::quiet_NaN(); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(AnyOf(Eq("nan"), Eq("NaN"))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "nan" })pb"))))); + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(FloatingPointLogFormatTest, NegativeNaN) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = + std::copysign(std::numeric_limits::quiet_NaN(), -1.0); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(MatchesOstream(comparison_stream)), + TextMessage(AnyOf(Eq("-nan"), Eq("nan"), Eq("NaN"), Eq("-nan(ind)"))), + ENCODED_MESSAGE( + AnyOf(EqualsProto(R"pb(value { str: "-nan" })pb"), + EqualsProto(R"pb(value { str: "nan" })pb"), + EqualsProto(R"pb(value { str: "-nan(ind)" })pb")))))); + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +template +class VoidPtrLogFormatTest : public testing::Test {}; +using VoidPtrTypes = Types; +TYPED_TEST_SUITE(VoidPtrLogFormatTest, VoidPtrTypes); + +TYPED_TEST(VoidPtrLogFormatTest, Null) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = nullptr; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(AnyOf(Eq("(nil)"), Eq("0"), Eq("0x0"), + Eq("00000000"), Eq("0000000000000000")))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(VoidPtrLogFormatTest, NonNull) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = reinterpret_cast(0xdeadbeefULL); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(MatchesOstream(comparison_stream)), + TextMessage( + AnyOf(Eq("0xdeadbeef"), Eq("DEADBEEF"), Eq("00000000DEADBEEF"))), + ENCODED_MESSAGE(AnyOf( + EqualsProto(R"pb(value { str: "0xdeadbeef" })pb"), + EqualsProto(R"pb(value { str: "00000000DEADBEEF" })pb")))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +template +class VolatileVoidPtrLogFormatTest : public testing::Test {}; +using VolatileVoidPtrTypes = Types; +TYPED_TEST_SUITE(VolatileVoidPtrLogFormatTest, VolatileVoidPtrTypes); + +TYPED_TEST(VolatileVoidPtrLogFormatTest, Null) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = nullptr; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("false")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "false" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(VolatileVoidPtrLogFormatTest, NonNull) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const TypeParam value = reinterpret_cast(0xdeadbeefLL); + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("true")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "true" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +template +class CharPtrLogFormatTest : public testing::Test {}; +using CharPtrTypes = Types; +TYPED_TEST_SUITE(CharPtrLogFormatTest, CharPtrTypes); + +TYPED_TEST(CharPtrLogFormatTest, Null) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + // Streaming `([cv] char *)nullptr` into a `std::ostream` is UB, and some C++ + // standard library implementations choose to crash. We take measures to log + // something useful instead of crashing, even when that differs from the + // standard library in use (and thus the behavior of `std::ostream`). + const TypeParam value = nullptr; + + EXPECT_CALL( + test_sink, + Send(AllOf( + // `MatchesOstream` deliberately omitted since we deliberately differ. + TextMessage(Eq("(null)")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(null)" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(CharPtrLogFormatTest, NonNull) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + char data[] = "value"; + const TypeParam value = data; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("value")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "value" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(BoolLogFormatTest, True) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const bool value = true; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("true")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "true" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(BoolLogFormatTest, False) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const bool value = false; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("false")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "false" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(LogFormatTest, StringLiteral) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + auto comparison_stream = ComparisonStream(); + comparison_stream << "value"; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("value")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + literal: "value" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << "value"; +} + +TEST(LogFormatTest, CharArray) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + char value[] = "value"; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("value")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "value" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +class CustomClass {}; +std::ostream& operator<<(std::ostream& os, const CustomClass&) { + return os << "CustomClass{}"; +} + +TEST(LogFormatTest, Custom) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + CustomClass value; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("CustomClass{}")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "CustomClass{}" + })pb"))))); + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +class CustomClassNonCopyable { + public: + CustomClassNonCopyable() = default; + CustomClassNonCopyable(const CustomClassNonCopyable&) = delete; + CustomClassNonCopyable& operator=(const CustomClassNonCopyable&) = delete; +}; +std::ostream& operator<<(std::ostream& os, const CustomClassNonCopyable&) { + return os << "CustomClassNonCopyable{}"; +} + +TEST(LogFormatTest, CustomNonCopyable) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + CustomClassNonCopyable value; + auto comparison_stream = ComparisonStream(); + comparison_stream << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("CustomClassNonCopyable{}")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { str: "CustomClassNonCopyable{}" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +struct Point { + template + friend void AbslStringify(Sink& sink, const Point& p) { + absl::Format(&sink, "(%d, %d)", p.x, p.y); + } + + int x = 10; + int y = 20; +}; + +TEST(LogFormatTest, AbslStringifyExample) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + Point p; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << p; +} + +struct PointWithAbslStringifiyAndOstream { + template + friend void AbslStringify(Sink& sink, + const PointWithAbslStringifiyAndOstream& p) { + absl::Format(&sink, "(%d, %d)", p.x, p.y); + } + + int x = 10; + int y = 20; +}; + +ABSL_ATTRIBUTE_UNUSED std::ostream& operator<<( + std::ostream& os, const PointWithAbslStringifiyAndOstream&) { + return os << "Default to AbslStringify()"; +} + +TEST(LogFormatTest, CustomWithAbslStringifyAndOstream) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + PointWithAbslStringifiyAndOstream p; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << p; +} + +struct PointStreamsNothing { + template + friend void AbslStringify(Sink&, const PointStreamsNothing&) {} + + int x = 10; + int y = 20; +}; + +TEST(LogFormatTest, AbslStringifyStreamsNothing) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + PointStreamsNothing p; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(Eq("77")), TextMessage(Eq(absl::StrCat(p, 77))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << p << 77; +} + +struct PointMultipleAppend { + template + friend void AbslStringify(Sink& sink, const PointMultipleAppend& p) { + sink.Append("("); + sink.Append(absl::StrCat(p.x, ", ", p.y, ")")); + } + + int x = 10; + int y = 20; +}; + +TEST(LogFormatTest, AbslStringifyMultipleAppend) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + PointMultipleAppend p; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(" } + value { str: "10, 20)" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << p; +} + +TEST(ManipulatorLogFormatTest, BoolAlphaTrue) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const bool value = true; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::noboolalpha << value << " " // + << std::boolalpha << value << " " // + << std::noboolalpha << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("1 true 1")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { str: "1" } + value { literal: " " } + value { str: "true" } + value { literal: " " } + value { str: "1" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::noboolalpha << value << " " // + << std::boolalpha << value << " " // + << std::noboolalpha << value; +} + +TEST(ManipulatorLogFormatTest, BoolAlphaFalse) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const bool value = false; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::noboolalpha << value << " " // + << std::boolalpha << value << " " // + << std::noboolalpha << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("0 false 0")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { str: "0" } + value { literal: " " } + value { str: "false" } + value { literal: " " } + value { str: "0" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::noboolalpha << value << " " // + << std::boolalpha << value << " " // + << std::noboolalpha << value; +} + +TEST(ManipulatorLogFormatTest, ShowPoint) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 77.0; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::noshowpoint << value << " " // + << std::showpoint << value << " " // + << std::noshowpoint << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("77 77.0000 77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" } + value { literal: " " } + value { str: "77.0000" } + value { literal: " " } + value { str: "77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::noshowpoint << value << " " // + << std::showpoint << value << " " // + << std::noshowpoint << value; +} + +TEST(ManipulatorLogFormatTest, ShowPos) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::noshowpos << value << " " // + << std::showpos << value << " " // + << std::noshowpos << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("77 +77 77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" } + value { literal: " " } + value { str: "+77" } + value { literal: " " } + value { str: "77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::noshowpos << value << " " // + << std::showpos << value << " " // + << std::noshowpos << value; +} + +TEST(ManipulatorLogFormatTest, UppercaseFloat) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 7.7e7; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::nouppercase << value << " " // + << std::uppercase << value << " " // + << std::nouppercase << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("7.7e+07 7.7E+07 7.7e+07")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { str: "7.7e+07" } + value { literal: " " } + value { str: "7.7E+07" } + value { literal: " " } + value { str: "7.7e+07" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::nouppercase << value << " " // + << std::uppercase << value << " " // + << std::nouppercase << value; +} + +TEST(ManipulatorLogFormatTest, Hex) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 0x77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::hex << value; + + EXPECT_CALL( + test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("0x77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "0x77" + })pb"))))); + test_sink.StartCapturingLogs(); + LOG(INFO) << std::hex << value; +} + +TEST(ManipulatorLogFormatTest, Oct) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 077; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::oct << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("077")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "077" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::oct << value; +} + +TEST(ManipulatorLogFormatTest, Dec) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::hex << std::dec << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::hex << std::dec << value; +} + +TEST(ManipulatorLogFormatTest, ShowbaseHex) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 0x77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::hex // + << std::noshowbase << value << " " // + << std::showbase << value << " " // + << std::noshowbase << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("77 0x77 77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" } + value { literal: " " } + value { str: "0x77" } + value { literal: " " } + value { str: "77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::hex // + << std::noshowbase << value << " " // + << std::showbase << value << " " // + << std::noshowbase << value; +} + +TEST(ManipulatorLogFormatTest, ShowbaseOct) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 077; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::oct // + << std::noshowbase << value << " " // + << std::showbase << value << " " // + << std::noshowbase << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("77 077 77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" } + value { literal: " " } + value { str: "077" } + value { literal: " " } + value { str: "77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::oct // + << std::noshowbase << value << " " // + << std::showbase << value << " " // + << std::noshowbase << value; +} + +TEST(ManipulatorLogFormatTest, UppercaseHex) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 0xbeef; + auto comparison_stream = ComparisonStream(); + comparison_stream // + << std::hex // + << std::nouppercase << value << " " // + << std::uppercase << value << " " // + << std::nouppercase << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("0xbeef 0XBEEF 0xbeef")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { str: "0xbeef" } + value { literal: " " } + value { str: "0XBEEF" } + value { literal: " " } + value { str: "0xbeef" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::hex // + << std::nouppercase << value << " " // + << std::uppercase << value << " " // + << std::nouppercase << value; +} + +TEST(ManipulatorLogFormatTest, FixedFloat) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 7.7e7; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::fixed << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("77000000.000000")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "77000000.000000" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::fixed << value; +} + +TEST(ManipulatorLogFormatTest, ScientificFloat) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 7.7e7; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::scientific << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("7.700000e+07")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "7.700000e+07" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::scientific << value; +} + +#if defined(__BIONIC__) && (!defined(__ANDROID_API__) || __ANDROID_API__ < 22) +// Bionic doesn't support `%a` until API 22, so this prints 'a' even if the +// C++ standard library implements it correctly (by forwarding to printf). +#elif defined(__GLIBCXX__) && __cplusplus < 201402L +// libstdc++ shipped C++11 support without `std::hexfloat`. +#else +TEST(ManipulatorLogFormatTest, FixedAndScientificFloat) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 7.7e7; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::setiosflags(std::ios_base::scientific | + std::ios_base::fixed) + << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(AnyOf(Eq("0x1.25bb50p+26"), Eq("0x1.25bb5p+26"), + Eq("0x1.25bb500000000p+26"))), + ENCODED_MESSAGE( + AnyOf(EqualsProto(R"pb(value { str: "0x1.25bb5p+26" })pb"), + EqualsProto(R"pb(value { + str: "0x1.25bb500000000p+26" + })pb")))))); + + test_sink.StartCapturingLogs(); + + // This combination should mean the same thing as `std::hexfloat`. + LOG(INFO) << std::setiosflags(std::ios_base::scientific | + std::ios_base::fixed) + << value; +} +#endif + +#if defined(__BIONIC__) && (!defined(__ANDROID_API__) || __ANDROID_API__ < 22) +// Bionic doesn't support `%a` until API 22, so this prints 'a' even if the C++ +// standard library supports `std::hexfloat` (by forwarding to printf). +#elif defined(__GLIBCXX__) && __cplusplus < 201402L +// libstdc++ shipped C++11 support without `std::hexfloat`. +#else +TEST(ManipulatorLogFormatTest, HexfloatFloat) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 7.7e7; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::hexfloat << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(AnyOf(Eq("0x1.25bb50p+26"), Eq("0x1.25bb5p+26"), + Eq("0x1.25bb500000000p+26"))), + ENCODED_MESSAGE( + AnyOf(EqualsProto(R"pb(value { str: "0x1.25bb5p+26" })pb"), + EqualsProto(R"pb(value { + str: "0x1.25bb500000000p+26" + })pb")))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::hexfloat << value; +} +#endif + +TEST(ManipulatorLogFormatTest, DefaultFloatFloat) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 7.7e7; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::hexfloat << std::defaultfloat << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("7.7e+07")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "7.7e+07" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::hexfloat << std::defaultfloat << value; +} + +TEST(ManipulatorLogFormatTest, Ends) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + auto comparison_stream = ComparisonStream(); + comparison_stream << std::ends; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq(absl::string_view("\0", 1))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "\0" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::ends; +} + +TEST(ManipulatorLogFormatTest, Endl) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + auto comparison_stream = ComparisonStream(); + comparison_stream << std::endl; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("\n")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "\n" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::endl; +} + +TEST(ManipulatorLogFormatTest, SetIosFlags) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 0x77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::resetiosflags(std::ios_base::basefield) + << std::setiosflags(std::ios_base::hex) << value << " " // + << std::resetiosflags(std::ios_base::basefield) + << std::setiosflags(std::ios_base::dec) << value; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("0x77 119")), + // `std::setiosflags` and `std::resetiosflags` aren't manipulators. + // We're unable to distinguish their return type(s) from arbitrary + // user-defined types and thus don't suppress the empty str value. + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "0x77" } + value { literal: " " } + value { str: "119" } + )pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::resetiosflags(std::ios_base::basefield) + << std::setiosflags(std::ios_base::hex) << value << " " // + << std::resetiosflags(std::ios_base::basefield) + << std::setiosflags(std::ios_base::dec) << value; +} + +TEST(ManipulatorLogFormatTest, SetBase) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 0x77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::setbase(16) << value << " " // + << std::setbase(0) << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("0x77 119")), + // `std::setbase` isn't a manipulator. We're unable to + // distinguish its return type from arbitrary user-defined + // types and thus don't suppress the empty str value. + ENCODED_MESSAGE(EqualsProto( + R"pb(value { str: "0x77" } + value { literal: " " } + value { str: "119" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::setbase(16) << value << " " // + << std::setbase(0) << value; +} + +TEST(ManipulatorLogFormatTest, SetPrecision) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 6.022140857e23; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::setprecision(4) << value; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("6.022e+23")), + // `std::setprecision` isn't a manipulator. We're unable to + // distinguish its return type from arbitrary user-defined + // types and thus don't suppress the empty str value. + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "6.022e+23" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::setprecision(4) << value; +} + +TEST(ManipulatorLogFormatTest, SetPrecisionOverflow) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const double value = 6.022140857e23; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::setprecision(200) << value; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("602214085700000015187968")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { str: "602214085700000015187968" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::setprecision(200) << value; +} + +TEST(ManipulatorLogFormatTest, SetW) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::setw(8) << value; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq(" 77")), + // `std::setw` isn't a manipulator. We're unable to + // distinguish its return type from arbitrary user-defined + // types and thus don't suppress the empty str value. + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: " 77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::setw(8) << value; +} + +TEST(ManipulatorLogFormatTest, Left) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = -77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::left << std::setw(8) << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("-77 ")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "-77 " + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::left << std::setw(8) << value; +} + +TEST(ManipulatorLogFormatTest, Right) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = -77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::right << std::setw(8) << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq(" -77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: " -77" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::right << std::setw(8) << value; +} + +TEST(ManipulatorLogFormatTest, Internal) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = -77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::internal << std::setw(8) << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("- 77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "- 77" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::internal << std::setw(8) << value; +} + +TEST(ManipulatorLogFormatTest, SetFill) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + const int value = 77; + auto comparison_stream = ComparisonStream(); + comparison_stream << std::setfill('0') << std::setw(8) << value; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("00000077")), + // `std::setfill` isn't a manipulator. We're + // unable to distinguish its return + // type from arbitrary user-defined types and + // thus don't suppress the empty str value. + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "00000077" + })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::setfill('0') << std::setw(8) << value; +} + +class FromCustomClass {}; +std::ostream& operator<<(std::ostream& os, const FromCustomClass&) { + return os << "FromCustomClass{}" << std::hex; +} + +TEST(ManipulatorLogFormatTest, FromCustom) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + FromCustomClass value; + auto comparison_stream = ComparisonStream(); + comparison_stream << value << " " << 0x77; + + EXPECT_CALL(test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("FromCustomClass{} 0x77")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { str: "FromCustomClass{}" } + value { literal: " " } + value { str: "0x77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value << " " << 0x77; +} + +class StreamsNothing {}; +std::ostream& operator<<(std::ostream& os, const StreamsNothing&) { return os; } + +TEST(ManipulatorLogFormatTest, CustomClassStreamsNothing) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + StreamsNothing value; + auto comparison_stream = ComparisonStream(); + comparison_stream << value << 77; + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(MatchesOstream(comparison_stream)), + TextMessage(Eq("77")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value << 77; +} + +struct PointPercentV { + template + friend void AbslStringify(Sink& sink, const PointPercentV& p) { + absl::Format(&sink, "(%v, %v)", p.x, p.y); + } + + int x = 10; + int y = 20; +}; + +TEST(ManipulatorLogFormatTest, IOManipsDoNotAffectAbslStringify) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + PointPercentV p; + + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::hex << p; +} + +TEST(StructuredLoggingOverflowTest, TruncatesStrings) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + // This message is too long and should be truncated to some unspecified size + // no greater than the buffer size but not too much less either. It should be + // truncated rather than discarded. + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(AllOf( + SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256), + Le(absl::log_internal::kLogMessageBufferSize))), + Each(Eq('x')))), + ENCODED_MESSAGE(ResultOf( + [](const logging::proto::Event& e) { return e.value(); }, + ElementsAre(ResultOf( + [](const logging::proto::Value& v) { + return std::string(v.str()); + }, + AllOf(SizeIs(AllOf( + Ge(absl::log_internal::kLogMessageBufferSize - 256), + Le(absl::log_internal::kLogMessageBufferSize))), + Each(Eq('x')))))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::string(2 * absl::log_internal::kLogMessageBufferSize, 'x'); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_macro_hygiene_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_macro_hygiene_test.cc new file mode 100644 index 0000000000..ab6461f5eb --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_macro_hygiene_test.cc @@ -0,0 +1,171 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/log_severity.h" +#include "absl/log/log.h" +#include "absl/log/scoped_mock_log.h" + +namespace { +using ::testing::_; +using ::testing::Eq; + +namespace not_absl { + +class Dummy { + public: + Dummy() {} + + private: + Dummy(const Dummy&) = delete; + Dummy& operator=(const Dummy&) = delete; +}; + +// This line tests that local definitions of INFO, WARNING, ERROR, and +// etc don't shadow the global ones used by the logging macros. If +// they do, the LOG() calls in the tests won't compile, catching the +// bug. +const Dummy INFO, WARNING, ERROR, FATAL, NUM_SEVERITIES; + +// These makes sure that the uses of same-named types in the +// implementation of the logging macros are fully qualified. +class string {}; +class vector {}; +class LogMessage {}; +class LogMessageFatal {}; +class LogMessageQuietlyFatal {}; +class LogMessageVoidify {}; +class LogSink {}; +class NullStream {}; +class NullStreamFatal {}; + +} // namespace not_absl + +using namespace not_absl; // NOLINT + +// Tests for LOG(LEVEL(()). + +TEST(LogHygieneTest, WorksForQualifiedSeverity) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + ::testing::InSequence seq; + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "To INFO")); + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kWarning, _, "To WARNING")); + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kError, _, "To ERROR")); + + test_sink.StartCapturingLogs(); + // Note that LOG(LEVEL()) expects the severity as a run-time + // expression (as opposed to a compile-time constant). Hence we + // test that :: is allowed before INFO, etc. + LOG(LEVEL(absl::LogSeverity::kInfo)) << "To INFO"; + LOG(LEVEL(absl::LogSeverity::kWarning)) << "To WARNING"; + LOG(LEVEL(absl::LogSeverity::kError)) << "To ERROR"; +} + +TEST(LogHygieneTest, WorksWithAlternativeINFOSymbol) { + const double INFO ABSL_ATTRIBUTE_UNUSED = 7.77; + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "Hello world")); + + test_sink.StartCapturingLogs(); + LOG(INFO) << "Hello world"; +} + +TEST(LogHygieneTest, WorksWithAlternativeWARNINGSymbol) { + const double WARNING ABSL_ATTRIBUTE_UNUSED = 7.77; + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kWarning, _, "Hello world")); + + test_sink.StartCapturingLogs(); + LOG(WARNING) << "Hello world"; +} + +TEST(LogHygieneTest, WorksWithAlternativeERRORSymbol) { + const double ERROR ABSL_ATTRIBUTE_UNUSED = 7.77; + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kError, _, "Hello world")); + + test_sink.StartCapturingLogs(); + LOG(ERROR) << "Hello world"; +} + +TEST(LogHygieneTest, WorksWithAlternativeLEVELSymbol) { + const double LEVEL ABSL_ATTRIBUTE_UNUSED = 7.77; + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kError, _, "Hello world")); + + test_sink.StartCapturingLogs(); + LOG(LEVEL(absl::LogSeverity::kError)) << "Hello world"; +} + +#define INFO Bogus +#ifdef NDEBUG +constexpr bool IsOptimized = false; +#else +constexpr bool IsOptimized = true; +#endif + +TEST(LogHygieneTest, WorksWithINFODefined) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "Hello world")) + .Times(2 + (IsOptimized ? 2 : 0)); + + test_sink.StartCapturingLogs(); + LOG(INFO) << "Hello world"; + LOG_IF(INFO, true) << "Hello world"; + + DLOG(INFO) << "Hello world"; + DLOG_IF(INFO, true) << "Hello world"; +} + +#undef INFO + +TEST(LogHygieneTest, ExpressionEvaluationInLEVELSeverity) { + auto i = static_cast(absl::LogSeverity::kInfo); + LOG(LEVEL(++i)) << "hello world"; // NOLINT + EXPECT_THAT(i, Eq(static_cast(absl::LogSeverity::kInfo) + 1)); +} + +TEST(LogHygieneTest, ExpressionEvaluationInStreamedMessage) { + int i = 0; + LOG(INFO) << ++i; + EXPECT_THAT(i, 1); + LOG_IF(INFO, false) << ++i; + EXPECT_THAT(i, 1); +} + +// Tests that macros are usable in unbraced switch statements. +// ----------------------------------------------------------- + +class UnbracedSwitchCompileTest { + static void Log() { + switch (0) { + case 0: + LOG(INFO); + break; + default: + break; + } + } +}; + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_modifier_methods_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_modifier_methods_test.cc new file mode 100644 index 0000000000..42e13b1ba2 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_modifier_methods_test.cc @@ -0,0 +1,233 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/log/internal/test_actions.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/internal/test_matchers.h" +#include "absl/log/log.h" +#include "absl/log/log_sink.h" +#include "absl/log/scoped_mock_log.h" +#include "absl/strings/match.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace { +#if GTEST_HAS_DEATH_TEST +using ::absl::log_internal::DeathTestExpectedLogging; +using ::absl::log_internal::DeathTestUnexpectedLogging; +using ::absl::log_internal::DeathTestValidateExpectations; +using ::absl::log_internal::DiedOfQFatal; +#endif +using ::absl::log_internal::LogSeverity; +using ::absl::log_internal::Prefix; +using ::absl::log_internal::SourceBasename; +using ::absl::log_internal::SourceFilename; +using ::absl::log_internal::SourceLine; +using ::absl::log_internal::Stacktrace; +using ::absl::log_internal::TextMessage; +using ::absl::log_internal::TextMessageWithPrefix; +using ::absl::log_internal::TextMessageWithPrefixAndNewline; +using ::absl::log_internal::TextPrefix; +using ::absl::log_internal::ThreadID; +using ::absl::log_internal::Timestamp; +using ::absl::log_internal::Verbosity; + +using ::testing::AllOf; +using ::testing::AnyNumber; +using ::testing::AnyOf; +using ::testing::Eq; +using ::testing::IsEmpty; +using ::testing::IsFalse; +using ::testing::Truly; + +TEST(TailCallsModifiesTest, AtLocationFileLine) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf( + // The metadata should change: + SourceFilename(Eq("/my/very/very/very_long_source_file.cc")), + SourceBasename(Eq("very_long_source_file.cc")), SourceLine(Eq(777)), + // The logged line should change too, even though the prefix must + // grow to fit the new metadata. + TextMessageWithPrefix(Truly([](absl::string_view msg) { + return absl::EndsWith(msg, + " very_long_source_file.cc:777] hello world"); + }))))); + + test_sink.StartCapturingLogs(); + LOG(INFO).AtLocation("/my/very/very/very_long_source_file.cc", 777) + << "hello world"; +} + +TEST(TailCallsModifiesTest, NoPrefix) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(AllOf(Prefix(IsFalse()), TextPrefix(IsEmpty()), + TextMessageWithPrefix(Eq("hello world"))))); + + test_sink.StartCapturingLogs(); + LOG(INFO).NoPrefix() << "hello world"; +} + +TEST(TailCallsModifiesTest, NoPrefixNoMessageNoShirtNoShoesNoService) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, + Send(AllOf(Prefix(IsFalse()), TextPrefix(IsEmpty()), + TextMessageWithPrefix(IsEmpty()), + TextMessageWithPrefixAndNewline(Eq("\n"))))); + test_sink.StartCapturingLogs(); + LOG(INFO).NoPrefix(); +} + +TEST(TailCallsModifiesTest, WithVerbosity) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(Verbosity(Eq(2)))); + + test_sink.StartCapturingLogs(); + LOG(INFO).WithVerbosity(2) << "hello world"; +} + +TEST(TailCallsModifiesTest, WithVerbosityNoVerbosity) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, + Send(Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)))); + + test_sink.StartCapturingLogs(); + LOG(INFO).WithVerbosity(2).WithVerbosity(absl::LogEntry::kNoVerbosityLevel) + << "hello world"; +} + +TEST(TailCallsModifiesTest, WithTimestamp) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(Timestamp(Eq(absl::UnixEpoch())))); + + test_sink.StartCapturingLogs(); + LOG(INFO).WithTimestamp(absl::UnixEpoch()) << "hello world"; +} + +TEST(TailCallsModifiesTest, WithThreadID) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, + Send(AllOf(ThreadID(Eq(absl::LogEntry::tid_t{1234}))))); + + test_sink.StartCapturingLogs(); + LOG(INFO).WithThreadID(1234) << "hello world"; +} + +TEST(TailCallsModifiesTest, WithMetadataFrom) { + class ForwardingLogSink : public absl::LogSink { + public: + void Send(const absl::LogEntry &entry) override { + LOG(LEVEL(entry.log_severity())).WithMetadataFrom(entry) + << "forwarded: " << entry.text_message(); + } + } forwarding_sink; + + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("fake/file")), SourceBasename(Eq("file")), + SourceLine(Eq(123)), Prefix(IsFalse()), + LogSeverity(Eq(absl::LogSeverity::kWarning)), + Timestamp(Eq(absl::UnixEpoch())), + ThreadID(Eq(absl::LogEntry::tid_t{456})), + TextMessage(Eq("forwarded: hello world")), Verbosity(Eq(7)), + ENCODED_MESSAGE( + EqualsProto(R"pb(value { literal: "forwarded: " } + value { str: "hello world" })pb"))))); + + test_sink.StartCapturingLogs(); + LOG(WARNING) + .AtLocation("fake/file", 123) + .NoPrefix() + .WithTimestamp(absl::UnixEpoch()) + .WithThreadID(456) + .WithVerbosity(7) + .ToSinkOnly(&forwarding_sink) + << "hello world"; +} + +TEST(TailCallsModifiesTest, WithPerror) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf(TextMessage(AnyOf(Eq("hello world: Bad file number [9]"), + Eq("hello world: Bad file descriptor [9]"), + Eq("hello world: Bad file descriptor [8]"))), + ENCODED_MESSAGE( + AnyOf(EqualsProto(R"pb(value { literal: "hello world" } + value { literal: ": " } + value { str: "Bad file number" } + value { literal: " [" } + value { str: "9" } + value { literal: "]" })pb"), + EqualsProto(R"pb(value { literal: "hello world" } + value { literal: ": " } + value { str: "Bad file descriptor" } + value { literal: " [" } + value { str: "9" } + value { literal: "]" })pb"), + EqualsProto(R"pb(value { literal: "hello world" } + value { literal: ": " } + value { str: "Bad file descriptor" } + value { literal: " [" } + value { str: "8" } + value { literal: "]" })pb")))))); + + test_sink.StartCapturingLogs(); + errno = EBADF; + LOG(INFO).WithPerror() << "hello world"; +} + +#if GTEST_HAS_DEATH_TEST +TEST(ModifierMethodDeathTest, ToSinkOnlyQFatal) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink( + absl::MockLogDefault::kDisallowUnexpected); + + auto do_log = [&test_sink] { + LOG(QFATAL).ToSinkOnly(&test_sink.UseAsLocalSink()) << "hello world"; + }; + + EXPECT_CALL(test_sink, Send) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("hello world")), + Stacktrace(IsEmpty())))) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + do_log(); + }, + DiedOfQFatal, DeathTestValidateExpectations()); +} +#endif + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink.cc new file mode 100644 index 0000000000..01d7ca8251 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink.cc @@ -0,0 +1,23 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/log_sink.h" + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +void LogSink::KeyFunction() const {} +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink.h new file mode 100644 index 0000000000..9bfa6f8624 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink.h @@ -0,0 +1,64 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/log_sink.h +// ----------------------------------------------------------------------------- +// +// This header declares the interface class `absl::LogSink`. + +#ifndef ABSL_LOG_LOG_SINK_H_ +#define ABSL_LOG_LOG_SINK_H_ + +#include "absl/base/config.h" +#include "absl/log/log_entry.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::LogSink +// +// `absl::LogSink` is an interface which can be extended to intercept and +// process particular messages (with `LOG.ToSinkOnly()` or +// `LOG.ToSinkAlso()`) or all messages (if registered with +// `absl::AddLogSink`). Implementations must be thread-safe, and should take +// care not to take any locks that might be held by the `LOG` caller. +class LogSink { + public: + virtual ~LogSink() = default; + + // LogSink::Send() + // + // `Send` is called synchronously during the log statement. + // + // It is safe to use `LOG` within an implementation of `Send`. `ToSinkOnly` + // and `ToSinkAlso` are safe in general but can be used to create an infinite + // loop if you try. + virtual void Send(const absl::LogEntry& entry) = 0; + + // LogSink::Flush() + // + // Sinks that buffer messages should override this method to flush the buffer + // and return. + virtual void Flush() {} + + private: + // https://lld.llvm.org/missingkeyfunction.html#missing-key-function + virtual void KeyFunction() const final; // NOLINT(readability/inheritance) +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_LOG_SINK_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink_registry.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink_registry.h new file mode 100644 index 0000000000..bf76cceeae --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink_registry.h @@ -0,0 +1,61 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/log_sink_registry.h +// ----------------------------------------------------------------------------- +// +// This header declares APIs to operate on global set of registered log sinks. + +#ifndef ABSL_LOG_LOG_SINK_REGISTRY_H_ +#define ABSL_LOG_LOG_SINK_REGISTRY_H_ + +#include "absl/base/config.h" +#include "absl/log/internal/log_sink_set.h" +#include "absl/log/log_sink.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// AddLogSink(), RemoveLogSink() +// +// Adds or removes a `absl::LogSink` as a consumer of logging data. +// +// These functions are thread-safe. +// +// It is an error to attempt to add a sink that's already registered or to +// attempt to remove one that isn't. +// +// To avoid unbounded recursion, dispatch to registered `absl::LogSink`s is +// disabled per-thread while running the `Send()` method of registered +// `absl::LogSink`s. Affected messages are dispatched to a special internal +// sink instead which writes them to `stderr`. +// +// Do not call these inside `absl::LogSink::Send`. +inline void AddLogSink(absl::LogSink* sink) { log_internal::AddLogSink(sink); } +inline void RemoveLogSink(absl::LogSink* sink) { + log_internal::RemoveLogSink(sink); +} + +// FlushLogSinks() +// +// Calls `absl::LogSink::Flush` on all registered sinks. +// +// Do not call this inside `absl::LogSink::Send`. +inline void FlushLogSinks() { log_internal::FlushLogSinks(); } + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_LOG_SINK_REGISTRY_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink_test.cc new file mode 100644 index 0000000000..8903da7266 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_sink_test.cc @@ -0,0 +1,419 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/log_sink.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/log/internal/test_actions.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/internal/test_matchers.h" +#include "absl/log/log.h" +#include "absl/log/log_sink_registry.h" +#include "absl/log/scoped_mock_log.h" +#include "absl/strings/string_view.h" + +namespace { + +using ::absl::log_internal::DeathTestExpectedLogging; +using ::absl::log_internal::DeathTestUnexpectedLogging; +using ::absl::log_internal::DeathTestValidateExpectations; +using ::absl::log_internal::DiedOfFatal; +using ::testing::_; +using ::testing::AnyNumber; +using ::testing::HasSubstr; +using ::testing::InSequence; + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +// Tests for global log sink registration. +// --------------------------------------- + +TEST(LogSinkRegistryTest, AddLogSink) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + InSequence s; + EXPECT_CALL(test_sink, Log(_, _, "hello world")).Times(0); + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, __FILE__, "Test : 42")); + EXPECT_CALL(test_sink, + Log(absl::LogSeverity::kWarning, __FILE__, "Danger ahead")); + EXPECT_CALL(test_sink, + Log(absl::LogSeverity::kError, __FILE__, "This is an error")); + + LOG(INFO) << "hello world"; + test_sink.StartCapturingLogs(); + + LOG(INFO) << "Test : " << 42; + LOG(WARNING) << "Danger" << ' ' << "ahead"; + LOG(ERROR) << "This is an error"; + + test_sink.StopCapturingLogs(); + LOG(INFO) << "Goodby world"; +} + +TEST(LogSinkRegistryTest, MultipleLogSinks) { + absl::ScopedMockLog test_sink1(absl::MockLogDefault::kDisallowUnexpected); + absl::ScopedMockLog test_sink2(absl::MockLogDefault::kDisallowUnexpected); + + ::testing::InSequence seq; + EXPECT_CALL(test_sink1, Log(absl::LogSeverity::kInfo, _, "First")).Times(1); + EXPECT_CALL(test_sink2, Log(absl::LogSeverity::kInfo, _, "First")).Times(0); + + EXPECT_CALL(test_sink1, Log(absl::LogSeverity::kInfo, _, "Second")).Times(1); + EXPECT_CALL(test_sink2, Log(absl::LogSeverity::kInfo, _, "Second")).Times(1); + + EXPECT_CALL(test_sink1, Log(absl::LogSeverity::kInfo, _, "Third")).Times(0); + EXPECT_CALL(test_sink2, Log(absl::LogSeverity::kInfo, _, "Third")).Times(1); + + LOG(INFO) << "Before first"; + + test_sink1.StartCapturingLogs(); + LOG(INFO) << "First"; + + test_sink2.StartCapturingLogs(); + LOG(INFO) << "Second"; + + test_sink1.StopCapturingLogs(); + LOG(INFO) << "Third"; + + test_sink2.StopCapturingLogs(); + LOG(INFO) << "Fourth"; +} + +TEST(LogSinkRegistrationDeathTest, DuplicateSinkRegistration) { + ASSERT_DEATH_IF_SUPPORTED( + { + absl::ScopedMockLog sink; + sink.StartCapturingLogs(); + absl::AddLogSink(&sink.UseAsLocalSink()); + }, + HasSubstr("Duplicate log sinks")); +} + +TEST(LogSinkRegistrationDeathTest, MismatchSinkRemoval) { + ASSERT_DEATH_IF_SUPPORTED( + { + absl::ScopedMockLog sink; + absl::RemoveLogSink(&sink.UseAsLocalSink()); + }, + HasSubstr("Mismatched log sink")); +} + +// Tests for log sink semantic. +// --------------------------------------- + +TEST(LogSinkTest, FlushSinks) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Flush()).Times(2); + + test_sink.StartCapturingLogs(); + + absl::FlushLogSinks(); + absl::FlushLogSinks(); +} + +TEST(LogSinkDeathTest, DeathInSend) { + class FatalSendSink : public absl::LogSink { + public: + void Send(const absl::LogEntry&) override { LOG(FATAL) << "goodbye world"; } + }; + + FatalSendSink sink; + EXPECT_EXIT({ LOG(INFO).ToSinkAlso(&sink) << "hello world"; }, DiedOfFatal, + _); +} + +// Tests for explicit log sink redirection. +// --------------------------------------- + +TEST(LogSinkTest, ToSinkAlso) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + absl::ScopedMockLog another_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Log(_, _, "hello world")); + EXPECT_CALL(another_sink, Log(_, _, "hello world")); + + test_sink.StartCapturingLogs(); + LOG(INFO).ToSinkAlso(&another_sink.UseAsLocalSink()) << "hello world"; +} + +TEST(LogSinkTest, ToSinkOnly) { + absl::ScopedMockLog another_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(another_sink, Log(_, _, "hello world")); + LOG(INFO).ToSinkOnly(&another_sink.UseAsLocalSink()) << "hello world"; +} + +TEST(LogSinkTest, ToManySinks) { + absl::ScopedMockLog sink1(absl::MockLogDefault::kDisallowUnexpected); + absl::ScopedMockLog sink2(absl::MockLogDefault::kDisallowUnexpected); + absl::ScopedMockLog sink3(absl::MockLogDefault::kDisallowUnexpected); + absl::ScopedMockLog sink4(absl::MockLogDefault::kDisallowUnexpected); + absl::ScopedMockLog sink5(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(sink3, Log(_, _, "hello world")); + EXPECT_CALL(sink4, Log(_, _, "hello world")); + EXPECT_CALL(sink5, Log(_, _, "hello world")); + + LOG(INFO) + .ToSinkAlso(&sink1.UseAsLocalSink()) + .ToSinkAlso(&sink2.UseAsLocalSink()) + .ToSinkOnly(&sink3.UseAsLocalSink()) + .ToSinkAlso(&sink4.UseAsLocalSink()) + .ToSinkAlso(&sink5.UseAsLocalSink()) + << "hello world"; +} + +class ReentrancyTest : public ::testing::Test { + protected: + ReentrancyTest() = default; + enum class LogMode : int { kNormal, kToSinkAlso, kToSinkOnly }; + + class ReentrantSendLogSink : public absl::LogSink { + public: + explicit ReentrantSendLogSink(absl::LogSeverity severity, + absl::LogSink* sink, LogMode mode) + : severity_(severity), sink_(sink), mode_(mode) {} + explicit ReentrantSendLogSink(absl::LogSeverity severity) + : ReentrantSendLogSink(severity, nullptr, LogMode::kNormal) {} + + void Send(const absl::LogEntry&) override { + switch (mode_) { + case LogMode::kNormal: + LOG(LEVEL(severity_)) << "The log is coming from *inside the sink*."; + break; + case LogMode::kToSinkAlso: + LOG(LEVEL(severity_)).ToSinkAlso(sink_) + << "The log is coming from *inside the sink*."; + break; + case LogMode::kToSinkOnly: + LOG(LEVEL(severity_)).ToSinkOnly(sink_) + << "The log is coming from *inside the sink*."; + break; + default: + ABSL_RAW_LOG(FATAL, "Invalid mode %d.\n", static_cast(mode_)); + } + } + + private: + absl::LogSeverity severity_; + absl::LogSink* sink_; + LogMode mode_; + }; + + static absl::string_view LogAndReturn(absl::LogSeverity severity, + absl::string_view to_log, + absl::string_view to_return) { + LOG(LEVEL(severity)) << to_log; + return to_return; + } +}; + +TEST_F(ReentrancyTest, LogFunctionThatLogs) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + InSequence seq; + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "hello")); + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "world")); + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kWarning, _, "danger")); + EXPECT_CALL(test_sink, Log(absl::LogSeverity::kInfo, _, "here")); + + test_sink.StartCapturingLogs(); + LOG(INFO) << LogAndReturn(absl::LogSeverity::kInfo, "hello", "world"); + LOG(INFO) << LogAndReturn(absl::LogSeverity::kWarning, "danger", "here"); +} + +TEST_F(ReentrancyTest, RegisteredLogSinkThatLogsInSend) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + ReentrantSendLogSink renentrant_sink(absl::LogSeverity::kInfo); + EXPECT_CALL(test_sink, Log(_, _, "hello world")); + + test_sink.StartCapturingLogs(); + absl::AddLogSink(&renentrant_sink); + LOG(INFO) << "hello world"; + absl::RemoveLogSink(&renentrant_sink); +} + +TEST_F(ReentrancyTest, AlsoLogSinkThatLogsInSend) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kInfo); + EXPECT_CALL(test_sink, Log(_, _, "hello world")); + EXPECT_CALL(test_sink, + Log(_, _, "The log is coming from *inside the sink*.")); + + test_sink.StartCapturingLogs(); + LOG(INFO).ToSinkAlso(&reentrant_sink) << "hello world"; +} + +TEST_F(ReentrancyTest, RegisteredAlsoLogSinkThatLogsInSend) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kInfo); + EXPECT_CALL(test_sink, Log(_, _, "hello world")); + // We only call into the test_log sink once with this message, since the + // second time log statement is run we are in "ThreadIsLogging" mode and all + // the log statements are redirected into stderr. + EXPECT_CALL(test_sink, + Log(_, _, "The log is coming from *inside the sink*.")); + + test_sink.StartCapturingLogs(); + absl::AddLogSink(&reentrant_sink); + LOG(INFO).ToSinkAlso(&reentrant_sink) << "hello world"; + absl::RemoveLogSink(&reentrant_sink); +} + +TEST_F(ReentrancyTest, OnlyLogSinkThatLogsInSend) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kInfo); + EXPECT_CALL(test_sink, + Log(_, _, "The log is coming from *inside the sink*.")); + + test_sink.StartCapturingLogs(); + LOG(INFO).ToSinkOnly(&reentrant_sink) << "hello world"; +} + +TEST_F(ReentrancyTest, RegisteredOnlyLogSinkThatLogsInSend) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kInfo); + EXPECT_CALL(test_sink, + Log(_, _, "The log is coming from *inside the sink*.")); + + test_sink.StartCapturingLogs(); + absl::AddLogSink(&reentrant_sink); + LOG(INFO).ToSinkOnly(&reentrant_sink) << "hello world"; + absl::RemoveLogSink(&reentrant_sink); +} + +using ReentrancyDeathTest = ReentrancyTest; + +TEST_F(ReentrancyDeathTest, LogFunctionThatLogsFatal) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + + EXPECT_CALL(test_sink, Log) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + EXPECT_CALL(test_sink, Log(_, _, "hello")) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + LOG(INFO) << LogAndReturn(absl::LogSeverity::kFatal, "hello", "world"); + }, + DiedOfFatal, DeathTestValidateExpectations()); +} + +TEST_F(ReentrancyDeathTest, RegisteredLogSinkThatLogsFatalInSend) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal); + EXPECT_CALL(test_sink, Log) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + EXPECT_CALL(test_sink, Log(_, _, "hello world")) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + absl::AddLogSink(&reentrant_sink); + LOG(INFO) << "hello world"; + // No need to call RemoveLogSink - process is dead at this point. + }, + DiedOfFatal, DeathTestValidateExpectations()); +} + +TEST_F(ReentrancyDeathTest, AlsoLogSinkThatLogsFatalInSend) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal); + + EXPECT_CALL(test_sink, Log) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + EXPECT_CALL(test_sink, Log(_, _, "hello world")) + .WillOnce(DeathTestExpectedLogging()); + EXPECT_CALL(test_sink, + Log(_, _, "The log is coming from *inside the sink*.")) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + LOG(INFO).ToSinkAlso(&reentrant_sink) << "hello world"; + }, + DiedOfFatal, DeathTestValidateExpectations()); +} + +TEST_F(ReentrancyDeathTest, RegisteredAlsoLogSinkThatLogsFatalInSend) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal); + EXPECT_CALL(test_sink, Log) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + EXPECT_CALL(test_sink, Log(_, _, "hello world")) + .WillOnce(DeathTestExpectedLogging()); + EXPECT_CALL(test_sink, + Log(_, _, "The log is coming from *inside the sink*.")) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + absl::AddLogSink(&reentrant_sink); + LOG(INFO).ToSinkAlso(&reentrant_sink) << "hello world"; + // No need to call RemoveLogSink - process is dead at this point. + }, + DiedOfFatal, DeathTestValidateExpectations()); +} + +TEST_F(ReentrancyDeathTest, OnlyLogSinkThatLogsFatalInSend) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal); + EXPECT_CALL(test_sink, Log) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + EXPECT_CALL(test_sink, + Log(_, _, "The log is coming from *inside the sink*.")) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + LOG(INFO).ToSinkOnly(&reentrant_sink) << "hello world"; + }, + DiedOfFatal, DeathTestValidateExpectations()); +} + +TEST_F(ReentrancyDeathTest, RegisteredOnlyLogSinkThatLogsFatalInSend) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + ReentrantSendLogSink reentrant_sink(absl::LogSeverity::kFatal); + EXPECT_CALL(test_sink, Log) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + EXPECT_CALL(test_sink, + Log(_, _, "The log is coming from *inside the sink*.")) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + absl::AddLogSink(&reentrant_sink); + LOG(INFO).ToSinkOnly(&reentrant_sink) << "hello world"; + // No need to call RemoveLogSink - process is dead at this point. + }, + DiedOfFatal, DeathTestValidateExpectations()); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_streamer.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_streamer.h new file mode 100644 index 0000000000..20327455dd --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_streamer.h @@ -0,0 +1,171 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/log_streamer.h +// ----------------------------------------------------------------------------- +// +// This header declares the class `LogStreamer` and convenience functions to +// construct LogStreamer objects with different associated log severity levels. + +#ifndef ABSL_LOG_LOG_STREAMER_H_ +#define ABSL_LOG_LOG_STREAMER_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/log.h" +#include "absl/strings/internal/ostringstream.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// LogStreamer +// +// Although you can stream into `LOG(INFO)`, you can't pass it into a function +// that takes a `std::ostream` parameter. `LogStreamer::stream()` provides a +// `std::ostream` that buffers everything that's streamed in. The buffer's +// contents are logged as if by `LOG` when the `LogStreamer` is destroyed. +// If nothing is streamed in, an empty message is logged. If the specified +// severity is `absl::LogSeverity::kFatal`, the program will be terminated when +// the `LogStreamer` is destroyed regardless of whether any data were streamed +// in. +// +// Factory functions corresponding to the `absl::LogSeverity` enumerators +// are provided for convenience; if the desired severity is variable, invoke the +// constructor directly. +// +// LogStreamer is movable, but not copyable. +// +// Examples: +// +// ShaveYakAndWriteToStream( +// yak, absl::LogInfoStreamer(__FILE__, __LINE__).stream()); +// +// { +// // This logs a single line containing data streamed by all three function +// // calls. +// absl::LogStreamer streamer(absl::LogSeverity::kInfo, __FILE__, __LINE__); +// ShaveYakAndWriteToStream(yak1, streamer.stream()); +// streamer.stream() << " "; +// ShaveYakAndWriteToStream(yak2, streamer.stream()); +// streamer.stream() << " "; +// ShaveYakAndWriteToStreamPointer(yak3, &streamer.stream()); +// } +class LogStreamer final { + public: + // LogStreamer::LogStreamer() + // + // Creates a LogStreamer with a given `severity` that will log a message + // attributed to the given `file` and `line`. + explicit LogStreamer(absl::LogSeverity severity, absl::string_view file, + int line) + : severity_(severity), + line_(line), + file_(file), + stream_(absl::in_place, &buf_) { + // To match `LOG`'s defaults: + stream_->setf(std::ios_base::showbase | std::ios_base::boolalpha); + } + + // A moved-from `absl::LogStreamer` does not `LOG` when destroyed, + // and a program that streams into one has undefined behavior. + LogStreamer(LogStreamer&& that) noexcept + : severity_(that.severity_), + line_(that.line_), + file_(std::move(that.file_)), + buf_(std::move(that.buf_)), + stream_(std::move(that.stream_)) { + if (stream_.has_value()) stream_->str(&buf_); + that.stream_.reset(); + } + LogStreamer& operator=(LogStreamer&& that) { + LOG_IF(LEVEL(severity_), stream_).AtLocation(file_, line_) << buf_; + severity_ = that.severity_; + file_ = std::move(that.file_); + line_ = that.line_; + buf_ = std::move(that.buf_); + stream_ = std::move(that.stream_); + if (stream_.has_value()) stream_->str(&buf_); + that.stream_.reset(); + return *this; + } + + // LogStreamer::~LogStreamer() + // + // Logs this LogStreamer's buffered content as if by LOG. + ~LogStreamer() { + LOG_IF(LEVEL(severity_), stream_.has_value()).AtLocation(file_, line_) + << buf_; + } + + // LogStreamer::stream() + // + // Returns the `std::ostream` to use to write into this LogStreamer' internal + // buffer. + std::ostream& stream() { return *stream_; } + + private: + absl::LogSeverity severity_; + int line_; + std::string file_; + std::string buf_; + // A disengaged `stream_` indicates a moved-from `LogStreamer` that should not + // `LOG` upon destruction. + absl::optional stream_; +}; + +// LogInfoStreamer() +// +// Returns a LogStreamer that writes at level LogSeverity::kInfo. +inline LogStreamer LogInfoStreamer(absl::string_view file, int line) { + return absl::LogStreamer(absl::LogSeverity::kInfo, file, line); +} + +// LogWarningStreamer() +// +// Returns a LogStreamer that writes at level LogSeverity::kWarning. +inline LogStreamer LogWarningStreamer(absl::string_view file, int line) { + return absl::LogStreamer(absl::LogSeverity::kWarning, file, line); +} + +// LogErrorStreamer() +// +// Returns a LogStreamer that writes at level LogSeverity::kError. +inline LogStreamer LogErrorStreamer(absl::string_view file, int line) { + return absl::LogStreamer(absl::LogSeverity::kError, file, line); +} + +// LogFatalStreamer() +// +// Returns a LogStreamer that writes at level LogSeverity::kFatal. +// +// The program will be terminated when this `LogStreamer` is destroyed, +// regardless of whether any data were streamed in. +inline LogStreamer LogFatalStreamer(absl::string_view file, int line) { + return absl::LogStreamer(absl::LogSeverity::kFatal, file, line); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_LOG_STREAMER_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_streamer_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_streamer_test.cc new file mode 100644 index 0000000000..328d70d085 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/log_streamer_test.cc @@ -0,0 +1,365 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/log_streamer.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/base/log_severity.h" +#include "absl/log/internal/test_actions.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/internal/test_matchers.h" +#include "absl/log/log.h" +#include "absl/log/scoped_mock_log.h" +#include "absl/strings/string_view.h" + +namespace { +using ::absl::log_internal::DeathTestExpectedLogging; +using ::absl::log_internal::DeathTestUnexpectedLogging; +using ::absl::log_internal::DeathTestValidateExpectations; +#if GTEST_HAS_DEATH_TEST +using ::absl::log_internal::DiedOfFatal; +#endif +using ::absl::log_internal::LogSeverity; +using ::absl::log_internal::Prefix; +using ::absl::log_internal::SourceFilename; +using ::absl::log_internal::SourceLine; +using ::absl::log_internal::Stacktrace; +using ::absl::log_internal::TextMessage; +using ::absl::log_internal::ThreadID; +using ::absl::log_internal::TimestampInMatchWindow; +using ::testing::AnyNumber; +using ::testing::Eq; +using ::testing::HasSubstr; +using ::testing::IsEmpty; +using ::testing::IsTrue; + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +void WriteToStream(absl::string_view data, std::ostream* os) { + *os << "WriteToStream: " << data; +} +void WriteToStreamRef(absl::string_view data, std::ostream& os) { + os << "WriteToStreamRef: " << data; +} + +TEST(LogStreamerTest, LogInfoStreamer) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kInfo)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("WriteToStream: foo")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "WriteToStream: foo" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); + WriteToStream("foo", &absl::LogInfoStreamer("path/file.cc", 1234).stream()); +} + +TEST(LogStreamerTest, LogWarningStreamer) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kWarning)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("WriteToStream: foo")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "WriteToStream: foo" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); + WriteToStream("foo", + &absl::LogWarningStreamer("path/file.cc", 1234).stream()); +} + +TEST(LogStreamerTest, LogErrorStreamer) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("WriteToStream: foo")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "WriteToStream: foo" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); + WriteToStream("foo", &absl::LogErrorStreamer("path/file.cc", 1234).stream()); +} + +#if GTEST_HAS_DEATH_TEST +TEST(LogStreamerDeathTest, LogFatalStreamer) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + + EXPECT_CALL(test_sink, Send) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + + EXPECT_CALL( + test_sink, + Send(AllOf( + SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kFatal)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("WriteToStream: foo")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "WriteToStream: foo" + })pb"))))) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + WriteToStream("foo", + &absl::LogFatalStreamer("path/file.cc", 1234).stream()); + }, + DiedOfFatal, DeathTestValidateExpectations()); +} +#endif + +TEST(LogStreamerTest, LogStreamer) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("WriteToStream: foo")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "WriteToStream: foo" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); + WriteToStream( + "foo", &absl::LogStreamer(absl::LogSeverity::kError, "path/file.cc", 1234) + .stream()); +} + +#if GTEST_HAS_DEATH_TEST +TEST(LogStreamerDeathTest, LogStreamer) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + + EXPECT_CALL(test_sink, Send) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + + EXPECT_CALL( + test_sink, + Send(AllOf( + SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kFatal)), + TimestampInMatchWindow(), + ThreadID(Eq(absl::base_internal::GetTID())), + TextMessage(Eq("WriteToStream: foo")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "WriteToStream: foo" + })pb"))))) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + WriteToStream("foo", &absl::LogStreamer(absl::LogSeverity::kFatal, + "path/file.cc", 1234) + .stream()); + }, + DiedOfFatal, DeathTestValidateExpectations()); +} +#endif + +TEST(LogStreamerTest, PassedByReference) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + TextMessage(Eq("WriteToStreamRef: foo")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "WriteToStreamRef: foo" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); + WriteToStreamRef("foo", absl::LogInfoStreamer("path/file.cc", 1234).stream()); +} + +TEST(LogStreamerTest, StoredAsLocal) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + auto streamer = absl::LogInfoStreamer("path/file.cc", 1234); + WriteToStream("foo", &streamer.stream()); + streamer.stream() << " "; + WriteToStreamRef("bar", streamer.stream()); + + // The call should happen when `streamer` goes out of scope; if it + // happened before this `EXPECT_CALL` the call would be unexpected and the + // test would fail. + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + TextMessage(Eq("WriteToStream: foo WriteToStreamRef: bar")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { + str: "WriteToStream: foo WriteToStreamRef: bar" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); +} + +#if GTEST_HAS_DEATH_TEST +TEST(LogStreamerDeathTest, StoredAsLocal) { + EXPECT_EXIT( + { + // This is fatal when it goes out of scope, but not until then: + auto streamer = absl::LogFatalStreamer("path/file.cc", 1234); + std::cerr << "I'm still alive" << std::endl; + WriteToStream("foo", &streamer.stream()); + }, + DiedOfFatal, HasSubstr("I'm still alive")); +} +#endif + +TEST(LogStreamerTest, LogsEmptyLine) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Send(AllOf(SourceFilename(Eq("path/file.cc")), + SourceLine(Eq(1234)), TextMessage(Eq("")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); + absl::LogInfoStreamer("path/file.cc", 1234); +} + +#if GTEST_HAS_DEATH_TEST +TEST(LogStreamerDeathTest, LogsEmptyLine) { + EXPECT_EXIT( + { + absl::ScopedMockLog test_sink; + + EXPECT_CALL(test_sink, Log) + .Times(AnyNumber()) + .WillRepeatedly(DeathTestUnexpectedLogging()); + + EXPECT_CALL( + test_sink, + Send(AllOf( + SourceFilename(Eq("path/file.cc")), TextMessage(Eq("")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "" })pb"))))) + .WillOnce(DeathTestExpectedLogging()); + + test_sink.StartCapturingLogs(); + // This is fatal even though it's never used: + auto streamer = absl::LogFatalStreamer("path/file.cc", 1234); + }, + DiedOfFatal, DeathTestValidateExpectations()); +} +#endif + +TEST(LogStreamerTest, MoveConstruction) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + LogSeverity(Eq(absl::LogSeverity::kInfo)), + TextMessage(Eq("hello 0x10 world 0x10")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "hello 0x10 world 0x10" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); + auto streamer1 = absl::LogInfoStreamer("path/file.cc", 1234); + streamer1.stream() << "hello " << std::hex << 16; + absl::LogStreamer streamer2(std::move(streamer1)); + streamer2.stream() << " world " << 16; +} + +TEST(LogStreamerTest, MoveAssignment) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + testing::InSequence seq; + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file2.cc")), SourceLine(Eq(5678)), + LogSeverity(Eq(absl::LogSeverity::kWarning)), + TextMessage(Eq("something else")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "something else" + })pb")), + Stacktrace(IsEmpty())))); + EXPECT_CALL( + test_sink, + Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), + LogSeverity(Eq(absl::LogSeverity::kInfo)), + TextMessage(Eq("hello 0x10 world 0x10")), + ENCODED_MESSAGE(EqualsProto(R"pb(value { + str: "hello 0x10 world 0x10" + })pb")), + Stacktrace(IsEmpty())))); + + test_sink.StartCapturingLogs(); + auto streamer1 = absl::LogInfoStreamer("path/file.cc", 1234); + streamer1.stream() << "hello " << std::hex << 16; + auto streamer2 = absl::LogWarningStreamer("path/file2.cc", 5678); + streamer2.stream() << "something else"; + streamer2 = std::move(streamer1); + streamer2.stream() << " world " << 16; +} + +TEST(LogStreamerTest, CorrectDefaultFlags) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + // The `boolalpha` and `showbase` flags should be set by default, to match + // `LOG`. + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("false0xdeadbeef"))))) + .Times(2); + + test_sink.StartCapturingLogs(); + absl::LogInfoStreamer("path/file.cc", 1234).stream() + << false << std::hex << 0xdeadbeef; + LOG(INFO) << false << std::hex << 0xdeadbeef; +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log.cc new file mode 100644 index 0000000000..4ebc0a9f98 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log.cc @@ -0,0 +1,86 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/scoped_mock_log.h" + +#include +#include + +#include "gmock/gmock.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/log/log_entry.h" +#include "absl/log/log_sink.h" +#include "absl/log/log_sink_registry.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +ScopedMockLog::ScopedMockLog(MockLogDefault default_exp) + : sink_(this), is_capturing_logs_(false) { + if (default_exp == MockLogDefault::kIgnoreUnexpected) { + // Ignore all calls to Log we did not set expectations for. + EXPECT_CALL(*this, Log).Times(::testing::AnyNumber()); + } else { + // Disallow all calls to Log we did not set expectations for. + EXPECT_CALL(*this, Log).Times(0); + } + // By default Send mock forwards to Log mock. + EXPECT_CALL(*this, Send) + .Times(::testing::AnyNumber()) + .WillRepeatedly([this](const absl::LogEntry& entry) { + is_triggered_.store(true, std::memory_order_relaxed); + Log(entry.log_severity(), std::string(entry.source_filename()), + std::string(entry.text_message())); + }); + + // By default We ignore all Flush calls. + EXPECT_CALL(*this, Flush).Times(::testing::AnyNumber()); +} + +ScopedMockLog::~ScopedMockLog() { + ABSL_RAW_CHECK(is_triggered_.load(std::memory_order_relaxed), + "Did you forget to call StartCapturingLogs()?"); + + if (is_capturing_logs_) StopCapturingLogs(); +} + +void ScopedMockLog::StartCapturingLogs() { + ABSL_RAW_CHECK(!is_capturing_logs_, + "StartCapturingLogs() can be called only when the " + "absl::ScopedMockLog object is not capturing logs."); + + is_capturing_logs_ = true; + is_triggered_.store(true, std::memory_order_relaxed); + absl::AddLogSink(&sink_); +} + +void ScopedMockLog::StopCapturingLogs() { + ABSL_RAW_CHECK(is_capturing_logs_, + "StopCapturingLogs() can be called only when the " + "absl::ScopedMockLog object is capturing logs."); + + is_capturing_logs_ = false; + absl::RemoveLogSink(&sink_); +} + +absl::LogSink& ScopedMockLog::UseAsLocalSink() { + is_triggered_.store(true, std::memory_order_relaxed); + return sink_; +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log.h new file mode 100644 index 0000000000..44470c1677 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log.h @@ -0,0 +1,194 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/scoped_mock_log.h +// ----------------------------------------------------------------------------- +// +// This header declares `class absl::ScopedMockLog`, for use in testing. + +#ifndef ABSL_LOG_SCOPED_MOCK_LOG_H_ +#define ABSL_LOG_SCOPED_MOCK_LOG_H_ + +#include +#include + +#include "gmock/gmock.h" +#include "absl/base/config.h" +#include "absl/base/log_severity.h" +#include "absl/log/log_entry.h" +#include "absl/log/log_sink.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// MockLogDefault +// +// Controls how ScopedMockLog responds to unexpected calls by default. +enum class MockLogDefault { kIgnoreUnexpected, kDisallowUnexpected }; + +// ScopedMockLog +// +// ScopedMockLog is a LogSink that intercepts LOG() messages issued during its +// lifespan. +// +// Using this together with GoogleTest, it's easy to test how a piece of code +// calls LOG(). The typical usage, noting the distinction between +// "uninteresting" and "unexpected", looks like this: +// +// using ::testing::_; +// using ::testing::AnyNumber; +// using ::testing::EndsWith; +// using ::testing::kDoNotCaptureLogsYet; +// using ::testing::Lt; +// +// TEST(FooTest, LogsCorrectly) { +// // Simple robust setup, ignores unexpected logs. +// absl::ScopedMockLog log; +// +// // We expect the WARNING "Something bad!" exactly twice. +// EXPECT_CALL(log, Log(absl::LogSeverity::kWarning, _, "Something bad!")) +// .Times(2); +// +// // But we want no messages from foo.cc. +// EXPECT_CALL(log, Log(_, EndsWith("/foo.cc"), _)).Times(0); +// +// log.StartCapturingLogs(); // Call this after done setting expectations. +// Foo(); // Exercises the code under test. +// } +// +// TEST(BarTest, LogsExactlyCorrectly) { +// // Strict checking, fails for unexpected logs. +// absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected); +// +// // ... but ignore low severity messages +// EXPECT_CALL(log, Log(Lt(absl::LogSeverity::kWarning), _, _)) +// .Times(AnyNumber()); +// +// // We expect the ERROR "Something bad!" exactly once. +// EXPECT_CALL(log, Log(absl::LogSeverity::kError, EndsWith("/foo.cc"), +// "Something bad!")) +// .Times(1); +// +// log.StartCapturingLogs(); // Call this after done setting expectations. +// Bar(); // Exercises the code under test. +// } +// +// Note that in a multi-threaded environment, all LOG() messages from a single +// thread will be handled in sequence, but that cannot be guaranteed for +// messages from different threads. In fact, if the same or multiple +// expectations are matched on two threads concurrently, their actions will be +// executed concurrently as well and may interleave. +class ScopedMockLog final { + public: + // ScopedMockLog::ScopedMockLog() + // + // Sets up the log and adds default expectations. + explicit ScopedMockLog( + MockLogDefault default_exp = MockLogDefault::kIgnoreUnexpected); + ScopedMockLog(const ScopedMockLog&) = delete; + ScopedMockLog& operator=(const ScopedMockLog&) = delete; + + // ScopedMockLog::~ScopedMockLog() + // + // Stops intercepting logs and destroys this ScopedMockLog. + ~ScopedMockLog(); + + // ScopedMockLog::StartCapturingLogs() + // + // Starts log capturing if the object isn't already doing so. Otherwise + // crashes. + // + // Usually this method is called in the same thread that created this + // ScopedMockLog. It is the user's responsibility to not call this method if + // another thread may be calling it or StopCapturingLogs() at the same time. + // It is undefined behavior to add expectations while capturing logs is + // enabled. + void StartCapturingLogs(); + + // ScopedMockLog::StopCapturingLogs() + // + // Stops log capturing if the object is capturing logs. Otherwise crashes. + // + // Usually this method is called in the same thread that created this object. + // It is the user's responsibility to not call this method if another thread + // may be calling it or StartCapturingLogs() at the same time. + // + // It is UB to add expectations, while capturing logs is enabled. + void StopCapturingLogs(); + + // ScopedMockLog::UseAsLocalSink() + // + // Each `ScopedMockLog` is implemented with an `absl::LogSink`; this method + // returns a reference to that sink (e.g. for use with + // `LOG(...).ToSinkOnly()`) and marks the `ScopedMockLog` as having been used + // even if `StartCapturingLogs` is never called. + absl::LogSink& UseAsLocalSink(); + + // Implements the mock method: + // + // void Log(LogSeverity severity, absl::string_view file_path, + // absl::string_view message); + // + // The second argument to Log() is the full path of the source file in + // which the LOG() was issued. + // + // This is a shorthand form, which should be used by most users. Use the + // `Send` mock only if you want to add expectations for other log message + // attributes. + MOCK_METHOD(void, Log, + (absl::LogSeverity severity, const std::string& file_path, + const std::string& message)); + + // Implements the mock method: + // + // void Send(const absl::LogEntry& entry); + // + // This is the most generic form of mock that can be specified. Use this mock + // only if you want to add expectations for log message attributes different + // from the log message text, log message path and log message severity. + // + // If no expectations are specified for this mock, the default action is to + // forward the call to the `Log` mock. + MOCK_METHOD(void, Send, (const absl::LogEntry&)); + + // Implements the mock method: + // + // void Flush(); + // + // Use this mock only if you want to add expectations for log flush calls. + MOCK_METHOD(void, Flush, ()); + + private: + class ForwardingSink final : public absl::LogSink { + public: + explicit ForwardingSink(ScopedMockLog* sml) : sml_(sml) {} + ForwardingSink(const ForwardingSink&) = delete; + ForwardingSink& operator=(const ForwardingSink&) = delete; + void Send(const absl::LogEntry& entry) override { sml_->Send(entry); } + void Flush() override { sml_->Flush(); } + + private: + ScopedMockLog* sml_; + }; + + ForwardingSink sink_; + bool is_capturing_logs_; + std::atomic is_triggered_; +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_SCOPED_MOCK_LOG_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log_test.cc new file mode 100644 index 0000000000..44b8d7379e --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/scoped_mock_log_test.cc @@ -0,0 +1,290 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/scoped_mock_log.h" + +#include +#include // NOLINT(build/c++11) + +#include "gmock/gmock.h" +#include "gtest/gtest-spi.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/internal/test_matchers.h" +#include "absl/log/log.h" +#include "absl/memory/memory.h" +#include "absl/strings/match.h" +#include "absl/strings/string_view.h" +#include "absl/synchronization/barrier.h" +#include "absl/synchronization/notification.h" + +namespace { + +using ::testing::_; +using ::testing::AnyNumber; +using ::testing::Eq; +using ::testing::HasSubstr; +using ::testing::InSequence; +using ::testing::Lt; +using ::testing::Truly; +using absl::log_internal::SourceBasename; +using absl::log_internal::SourceFilename; +using absl::log_internal::SourceLine; +using absl::log_internal::TextMessageWithPrefix; +using absl::log_internal::ThreadID; + +auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +#if GTEST_HAS_DEATH_TEST +TEST(ScopedMockLogDeathTest, + StartCapturingLogsCannotBeCalledWhenAlreadyCapturing) { + EXPECT_DEATH( + { + absl::ScopedMockLog log; + log.StartCapturingLogs(); + log.StartCapturingLogs(); + }, + "StartCapturingLogs"); +} + +TEST(ScopedMockLogDeathTest, StopCapturingLogsCannotBeCalledWhenNotCapturing) { + EXPECT_DEATH( + { + absl::ScopedMockLog log; + log.StopCapturingLogs(); + }, + "StopCapturingLogs"); +} +#endif + +// Tests that ScopedMockLog intercepts LOG()s when it's alive. +TEST(ScopedMockLogTest, LogMockCatchAndMatchStrictExpectations) { + absl::ScopedMockLog log; + + // The following expectations must match in the order they appear. + InSequence s; + EXPECT_CALL(log, + Log(absl::LogSeverity::kWarning, HasSubstr(__FILE__), "Danger.")); + EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Working...")).Times(2); + EXPECT_CALL(log, Log(absl::LogSeverity::kError, _, "Bad!!")); + + log.StartCapturingLogs(); + LOG(WARNING) << "Danger."; + LOG(INFO) << "Working..."; + LOG(INFO) << "Working..."; + LOG(ERROR) << "Bad!!"; +} + +TEST(ScopedMockLogTest, LogMockCatchAndMatchSendExpectations) { + absl::ScopedMockLog log; + + EXPECT_CALL( + log, + Send(AllOf(SourceFilename(Eq("/my/very/very/very_long_source_file.cc")), + SourceBasename(Eq("very_long_source_file.cc")), + SourceLine(Eq(777)), ThreadID(Eq(absl::LogEntry::tid_t{1234})), + TextMessageWithPrefix(Truly([](absl::string_view msg) { + return absl::EndsWith( + msg, " very_long_source_file.cc:777] Info message"); + }))))); + + log.StartCapturingLogs(); + LOG(INFO) + .AtLocation("/my/very/very/very_long_source_file.cc", 777) + .WithThreadID(1234) + << "Info message"; +} + +TEST(ScopedMockLogTest, ScopedMockLogCanBeNice) { + absl::ScopedMockLog log; + + InSequence s; + EXPECT_CALL(log, + Log(absl::LogSeverity::kWarning, HasSubstr(__FILE__), "Danger.")); + EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Working...")).Times(2); + EXPECT_CALL(log, Log(absl::LogSeverity::kError, _, "Bad!!")); + + log.StartCapturingLogs(); + + // Any number of these are OK. + LOG(INFO) << "Info message."; + // Any number of these are OK. + LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger "; + + LOG(WARNING) << "Danger."; + + // Any number of these are OK. + LOG(INFO) << "Info message."; + // Any number of these are OK. + LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger "; + + LOG(INFO) << "Working..."; + + // Any number of these are OK. + LOG(INFO) << "Info message."; + // Any number of these are OK. + LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger "; + + LOG(INFO) << "Working..."; + + // Any number of these are OK. + LOG(INFO) << "Info message."; + // Any number of these are OK. + LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger "; + + LOG(ERROR) << "Bad!!"; + + // Any number of these are OK. + LOG(INFO) << "Info message."; + // Any number of these are OK. + LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger "; +} + +// Tests that ScopedMockLog generates a test failure if a message is logged +// that is not expected (here, that means ERROR or FATAL). +TEST(ScopedMockLogTest, RejectsUnexpectedLogs) { + EXPECT_NONFATAL_FAILURE( + { + absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected); + // Any INFO and WARNING messages are permitted. + EXPECT_CALL(log, Log(Lt(absl::LogSeverity::kError), _, _)) + .Times(AnyNumber()); + log.StartCapturingLogs(); + LOG(INFO) << "Ignored"; + LOG(WARNING) << "Ignored"; + LOG(ERROR) << "Should not be ignored"; + }, + "Should not be ignored"); +} + +TEST(ScopedMockLogTest, CapturesLogsAfterStartCapturingLogs) { + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfinity); + absl::ScopedMockLog log; + + // The ScopedMockLog object shouldn't see these LOGs, as it hasn't + // started capturing LOGs yet. + LOG(INFO) << "Ignored info"; + LOG(WARNING) << "Ignored warning"; + LOG(ERROR) << "Ignored error"; + + EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Expected info")); + log.StartCapturingLogs(); + + // Only this LOG will be seen by the ScopedMockLog. + LOG(INFO) << "Expected info"; +} + +TEST(ScopedMockLogTest, DoesNotCaptureLogsAfterStopCapturingLogs) { + absl::ScopedMockLog log; + EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Expected info")); + + log.StartCapturingLogs(); + + // This LOG should be seen by the ScopedMockLog. + LOG(INFO) << "Expected info"; + + log.StopCapturingLogs(); + + // The ScopedMockLog object shouldn't see these LOGs, as it has + // stopped capturing LOGs. + LOG(INFO) << "Ignored info"; + LOG(WARNING) << "Ignored warning"; + LOG(ERROR) << "Ignored error"; +} + +// Tests that all messages are intercepted regardless of issuing thread. The +// purpose of this test is NOT to exercise thread-safety. +TEST(ScopedMockLogTest, LogFromMultipleThreads) { + absl::ScopedMockLog log; + + // We don't establish an order to expectations here, since the threads may + // execute their log statements in different order. + EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread 1")); + EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread 2")); + + log.StartCapturingLogs(); + + absl::Barrier barrier(2); + std::thread thread1([&barrier]() { + barrier.Block(); + LOG(INFO) << "Thread 1"; + }); + std::thread thread2([&barrier]() { + barrier.Block(); + LOG(INFO) << "Thread 2"; + }); + + thread1.join(); + thread2.join(); +} + +// Tests that no sequence will be imposed on two LOG message expectations from +// different threads. This test would actually deadlock if replaced to two LOG +// statements from the same thread. +TEST(ScopedMockLogTest, NoSequenceWithMultipleThreads) { + absl::ScopedMockLog log; + + absl::Barrier barrier(2); + EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, _)) + .Times(2) + .WillRepeatedly([&barrier]() { barrier.Block(); }); + + log.StartCapturingLogs(); + + std::thread thread1([]() { LOG(INFO) << "Thread 1"; }); + std::thread thread2([]() { LOG(INFO) << "Thread 2"; }); + + thread1.join(); + thread2.join(); +} + +TEST(ScopedMockLogTsanTest, + ScopedMockLogCanBeDeletedWhenAnotherThreadIsLogging) { + auto log = absl::make_unique(); + EXPECT_CALL(*log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread log")) + .Times(AnyNumber()); + + log->StartCapturingLogs(); + + absl::Notification logging_started; + + std::thread thread([&logging_started]() { + for (int i = 0; i < 100; ++i) { + if (i == 50) logging_started.Notify(); + LOG(INFO) << "Thread log"; + } + }); + + logging_started.WaitForNotification(); + log.reset(); + thread.join(); +} + +TEST(ScopedMockLogTest, AsLocalSink) { + absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(log, Log(_, _, "two")); + EXPECT_CALL(log, Log(_, _, "three")); + + LOG(INFO) << "one"; + LOG(INFO).ToSinkOnly(&log.UseAsLocalSink()) << "two"; + LOG(INFO).ToSinkAlso(&log.UseAsLocalSink()) << "three"; +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/stripping_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/stripping_test.cc new file mode 100644 index 0000000000..d6a6606efe --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/stripping_test.cc @@ -0,0 +1,340 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Tests for stripping of literal strings. +// --------------------------------------- +// +// When a `LOG` statement can be trivially proved at compile time to never fire, +// e.g. due to `ABSL_MIN_LOG_LEVEL`, `NDEBUG`, or some explicit condition, data +// streamed in can be dropped from the compiled program completely if they are +// not used elsewhere. This most commonly affects string literals, which users +// often want to strip to reduce binary size and/or redact information about +// their program's internals (e.g. in a release build). +// +// These tests log strings and then validate whether they appear in the compiled +// binary. This is done by opening the file corresponding to the running test +// and running a simple string search on its contents. The strings to be logged +// and searched for must be unique, and we must take care not to emit them into +// the binary in any other place, e.g. when searching for them. The latter is +// accomplished by computing them using base64; the source string appears in the +// binary but the target string is computed at runtime. + +#include + +#if defined(__MACH__) +#include +#elif defined(_WIN32) +#include +#include +#endif + +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/strerror.h" +#include "absl/flags/internal/program_name.h" +#include "absl/log/check.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/log.h" +#include "absl/strings/escaping.h" +#include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" + +namespace { +using ::testing::_; +using ::testing::Eq; +using ::testing::NotNull; + +using absl::log_internal::kAbslMinLogLevel; + +std::string Base64UnescapeOrDie(absl::string_view data) { + std::string decoded; + CHECK(absl::Base64Unescape(data, &decoded)); + return decoded; +} + +// ----------------------------------------------------------------------------- +// A Googletest matcher which searches the running binary for a given string +// ----------------------------------------------------------------------------- + +// This matcher is used to validate that literal strings streamed into +// `LOG` statements that ought to be compiled out (e.g. `LOG_IF(INFO, false)`) +// do not appear in the binary. +// +// Note that passing the string to be sought directly to `FileHasSubstr()` all +// but forces its inclusion in the binary regardless of the logging library's +// behavior. For example: +// +// LOG_IF(INFO, false) << "you're the man now dog"; +// // This will always pass: +// // EXPECT_THAT(fp, FileHasSubstr("you're the man now dog")); +// // So use this instead: +// EXPECT_THAT(fp, FileHasSubstr( +// Base64UnescapeOrDie("eW91J3JlIHRoZSBtYW4gbm93IGRvZw=="))); + +class FileHasSubstrMatcher final : public ::testing::MatcherInterface { + public: + explicit FileHasSubstrMatcher(absl::string_view needle) : needle_(needle) {} + + bool MatchAndExplain( + FILE* fp, ::testing::MatchResultListener* listener) const override { + std::string buf( + std::max(needle_.size() * 2, 163840000), '\0'); + size_t buf_start_offset = 0; // The file offset of the byte at `buf[0]`. + size_t buf_data_size = 0; // The number of bytes of `buf` which contain + // data. + + ::fseek(fp, 0, SEEK_SET); + while (true) { + // Fill the buffer to capacity or EOF: + while (buf_data_size < buf.size()) { + const size_t ret = fread(&buf[buf_data_size], sizeof(char), + buf.size() - buf_data_size, fp); + if (ret == 0) break; + buf_data_size += ret; + } + if (ferror(fp)) { + *listener << "error reading file"; + return false; + } + const absl::string_view haystack(&buf[0], buf_data_size); + const auto off = haystack.find(needle_); + if (off != haystack.npos) { + *listener << "string found at offset " << buf_start_offset + off; + return true; + } + if (feof(fp)) { + *listener << "string not found"; + return false; + } + // Copy the end of `buf` to the beginning so we catch matches that span + // buffer boundaries. `buf` and `buf_data_size` are always large enough + // that these ranges don't overlap. + memcpy(&buf[0], &buf[buf_data_size - needle_.size()], needle_.size()); + buf_start_offset += buf_data_size - needle_.size(); + buf_data_size = needle_.size(); + } + } + void DescribeTo(std::ostream* os) const override { + *os << "contains the string \"" << needle_ << "\" (base64(\"" + << Base64UnescapeOrDie(needle_) << "\"))"; + } + + void DescribeNegationTo(std::ostream* os) const override { + *os << "does not "; + DescribeTo(os); + } + + private: + std::string needle_; +}; + +class StrippingTest : public ::testing::Test { + protected: + void SetUp() override { +#ifndef NDEBUG + // Non-optimized builds don't necessarily eliminate dead code at all, so we + // don't attempt to validate stripping against such builds. + GTEST_SKIP() << "StrippingTests skipped since this build is not optimized"; +#elif defined(__EMSCRIPTEN__) + // These tests require a way to examine the running binary and look for + // strings; there's no portable way to do that. + GTEST_SKIP() + << "StrippingTests skipped since this platform is not optimized"; +#endif + } + + // Opens this program's executable file. Returns `nullptr` and writes to + // `stderr` on failure. + std::unique_ptr> OpenTestExecutable() { +#if defined(__linux__) + std::unique_ptr> fp( + fopen("/proc/self/exe", "rb"), [](FILE* fp) { fclose(fp); }); + if (!fp) { + const std::string err = absl::base_internal::StrError(errno); + absl::FPrintF(stderr, "Failed to open /proc/self/exe: %s\n", err); + } + return fp; +#elif defined(__Fuchsia__) + // TODO(b/242579714): We need to restore the test coverage on this platform. + std::unique_ptr> fp( + fopen(absl::StrCat("/pkg/bin/", + absl::flags_internal::ShortProgramInvocationName()) + .c_str(), + "rb"), + [](FILE* fp) { fclose(fp); }); + if (!fp) { + const std::string err = absl::base_internal::StrError(errno); + absl::FPrintF(stderr, "Failed to open /pkg/bin/: %s\n", err); + } + return fp; +#elif defined(__MACH__) + uint32_t size = 0; + int ret = _NSGetExecutablePath(nullptr, &size); + if (ret != -1) { + absl::FPrintF(stderr, + "Failed to get executable path: " + "_NSGetExecutablePath(nullptr) returned %d\n", + ret); + return nullptr; + } + std::string path(size, '\0'); + ret = _NSGetExecutablePath(&path[0], &size); + if (ret != 0) { + absl::FPrintF( + stderr, + "Failed to get executable path: _NSGetExecutablePath(buffer) " + "returned %d\n", + ret); + return nullptr; + } + std::unique_ptr> fp( + fopen(path.c_str(), "rb"), [](FILE* fp) { fclose(fp); }); + if (!fp) { + const std::string err = absl::base_internal::StrError(errno); + absl::FPrintF(stderr, "Failed to open executable at %s: %s\n", path, err); + } + return fp; +#elif defined(_WIN32) + std::basic_string path(4096, _T('\0')); + while (true) { + const uint32_t ret = ::GetModuleFileName(nullptr, &path[0], + static_cast(path.size())); + if (ret == 0) { + absl::FPrintF( + stderr, + "Failed to get executable path: GetModuleFileName(buffer) " + "returned 0\n"); + return nullptr; + } + if (ret < path.size()) break; + path.resize(path.size() * 2, _T('\0')); + } + std::unique_ptr> fp( + _tfopen(path.c_str(), _T("rb")), [](FILE* fp) { fclose(fp); }); + if (!fp) absl::FPrintF(stderr, "Failed to open executable\n"); + return fp; +#else + absl::FPrintF(stderr, + "OpenTestExecutable() unimplemented on this platform\n"); + return nullptr; +#endif + } + + ::testing::Matcher FileHasSubstr(absl::string_view needle) { + return MakeMatcher(new FileHasSubstrMatcher(needle)); + } +}; + +// This tests whether out methodology for testing stripping works on this +// platform by looking for one string that definitely ought to be there and one +// that definitely ought not to. If this fails, none of the `StrippingTest`s +// are going to produce meaningful results. +TEST_F(StrippingTest, Control) { + constexpr char kEncodedPositiveControl[] = + "U3RyaXBwaW5nVGVzdC5Qb3NpdGl2ZUNvbnRyb2w="; + const std::string encoded_negative_control = + absl::Base64Escape("StrippingTest.NegativeControl"); + + // Verify this mainly so we can encode other strings and know definitely they + // won't encode to `kEncodedPositiveControl`. + EXPECT_THAT(Base64UnescapeOrDie("U3RyaXBwaW5nVGVzdC5Qb3NpdGl2ZUNvbnRyb2w="), + Eq("StrippingTest.PositiveControl")); + + auto exe = OpenTestExecutable(); + ASSERT_THAT(exe, NotNull()); + EXPECT_THAT(exe.get(), FileHasSubstr(kEncodedPositiveControl)); + EXPECT_THAT(exe.get(), Not(FileHasSubstr(encoded_negative_control))); +} + +TEST_F(StrippingTest, Literal) { + // We need to load a copy of the needle string into memory (so we can search + // for it) without leaving it lying around in plaintext in the executable file + // as would happen if we used a literal. We might (or might not) leave it + // lying around later; that's what the tests are for! + const std::string needle = absl::Base64Escape("StrippingTest.Literal"); + LOG(INFO) << "U3RyaXBwaW5nVGVzdC5MaXRlcmFs"; + auto exe = OpenTestExecutable(); + ASSERT_THAT(exe, NotNull()); + if (absl::LogSeverity::kInfo >= kAbslMinLogLevel) { + EXPECT_THAT(exe.get(), FileHasSubstr(needle)); + } else { + EXPECT_THAT(exe.get(), Not(FileHasSubstr(needle))); + } +} + +TEST_F(StrippingTest, LiteralInExpression) { + // We need to load a copy of the needle string into memory (so we can search + // for it) without leaving it lying around in plaintext in the executable file + // as would happen if we used a literal. We might (or might not) leave it + // lying around later; that's what the tests are for! + const std::string needle = + absl::Base64Escape("StrippingTest.LiteralInExpression"); + LOG(INFO) << absl::StrCat("secret: ", + "U3RyaXBwaW5nVGVzdC5MaXRlcmFsSW5FeHByZXNzaW9u"); + std::unique_ptr> exe = OpenTestExecutable(); + ASSERT_THAT(exe, NotNull()); + if (absl::LogSeverity::kInfo >= kAbslMinLogLevel) { + EXPECT_THAT(exe.get(), FileHasSubstr(needle)); + } else { + EXPECT_THAT(exe.get(), Not(FileHasSubstr(needle))); + } +} + +TEST_F(StrippingTest, Fatal) { + // We need to load a copy of the needle string into memory (so we can search + // for it) without leaving it lying around in plaintext in the executable file + // as would happen if we used a literal. We might (or might not) leave it + // lying around later; that's what the tests are for! + const std::string needle = absl::Base64Escape("StrippingTest.Fatal"); + EXPECT_DEATH_IF_SUPPORTED(LOG(FATAL) << "U3RyaXBwaW5nVGVzdC5GYXRhbA==", ""); + std::unique_ptr> exe = OpenTestExecutable(); + ASSERT_THAT(exe, NotNull()); + if (absl::LogSeverity::kFatal >= kAbslMinLogLevel) { + EXPECT_THAT(exe.get(), FileHasSubstr(needle)); + } else { + EXPECT_THAT(exe.get(), Not(FileHasSubstr(needle))); + } +} + +TEST_F(StrippingTest, Level) { + const std::string needle = absl::Base64Escape("StrippingTest.Level"); + volatile auto severity = absl::LogSeverity::kWarning; + // Ensure that `severity` is not a compile-time constant to prove that + // stripping works regardless: + LOG(LEVEL(severity)) << "U3RyaXBwaW5nVGVzdC5MZXZlbA=="; + std::unique_ptr> exe = OpenTestExecutable(); + ASSERT_THAT(exe, NotNull()); + if (absl::LogSeverity::kFatal >= kAbslMinLogLevel) { + // This can't be stripped at compile-time because it might evaluate to a + // level that shouldn't be stripped. + EXPECT_THAT(exe.get(), FileHasSubstr(needle)); + } else { +#if (defined(_MSC_VER) && !defined(__clang__)) || defined(__APPLE__) + // Dead code elimination misses this case. +#else + // All levels should be stripped, so it doesn't matter what the severity + // winds up being. + EXPECT_THAT(exe.get(), Not(FileHasSubstr(needle))); +#endif + } +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/structured.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/structured.h new file mode 100644 index 0000000000..9ad69fbdcd --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/structured.h @@ -0,0 +1,70 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: log/structured.h +// ----------------------------------------------------------------------------- +// +// This header declares APIs supporting structured logging, allowing log +// statements to be more easily parsed, especially by automated processes. +// +// When structured logging is in use, data streamed into a `LOG` statement are +// encoded as `Value` fields in a `logging.proto.Event` protocol buffer message. +// The individual data are exposed programmatically to `LogSink`s and to the +// user via some log reading tools which are able to query the structured data +// more usefully than would be possible if each message was a single opaque +// string. These helpers allow user code to add additional structure to the +// data they stream. + +#ifndef ABSL_LOG_STRUCTURED_H_ +#define ABSL_LOG_STRUCTURED_H_ + +#include + +#include "absl/base/config.h" +#include "absl/log/internal/structured.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// LogAsLiteral() +// +// Annotates its argument as a string literal so that structured logging +// captures it as a `literal` field instead of a `str` field (the default). +// This does not affect the text representation, only the structure. +// +// Streaming `LogAsLiteral(s)` into a `std::ostream` behaves just like streaming +// `s` directly. +// +// Using `LogAsLiteral()` is occasionally appropriate and useful when proxying +// data logged from another system or another language. For example: +// +// void Logger::LogString(absl::string_view str, absl::LogSeverity severity, +// const char *file, int line) { +// LOG(LEVEL(severity)).AtLocation(file, line) << str; +// } +// void Logger::LogStringLiteral(absl::string_view str, +// absl::LogSeverity severity, const char *file, +// int line) { +// LOG(LEVEL(severity)).AtLocation(file, line) << absl::LogAsLiteral(str); +// } +inline log_internal::AsLiteralImpl LogAsLiteral(absl::string_view s) { + return log_internal::AsLiteralImpl(s); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOG_STRUCTURED_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/structured_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/structured_test.cc new file mode 100644 index 0000000000..490a35d8ad --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/log/structured_test.cc @@ -0,0 +1,63 @@ +// +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/structured.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/log/internal/test_helpers.h" +#include "absl/log/internal/test_matchers.h" +#include "absl/log/log.h" +#include "absl/log/scoped_mock_log.h" + +namespace { +using ::absl::log_internal::MatchesOstream; +using ::absl::log_internal::TextMessage; +using ::testing::Eq; + +auto *test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( + new absl::log_internal::LogTestEnvironment); + +// Abseil Logging library uses these by default, so we set them on the +// `std::ostream` we compare against too. +std::ios &LoggingDefaults(std::ios &str) { + str.setf(std::ios_base::showbase | std::ios_base::boolalpha | + std::ios_base::internal); + return str; +} + +TEST(StreamingFormatTest, LogAsLiteral) { + std::ostringstream stream; + const std::string not_a_literal("hello world"); + stream << LoggingDefaults << absl::LogAsLiteral(not_a_literal); + + absl::ScopedMockLog sink; + + EXPECT_CALL(sink, + Send(AllOf(TextMessage(MatchesOstream(stream)), + TextMessage(Eq("hello world")), + ENCODED_MESSAGE(EqualsProto( + R"pb(value { literal: "hello world" })pb"))))); + + sink.StartCapturingLogs(); + LOG(INFO) << absl::LogAsLiteral(not_a_literal); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/CMakeLists.txt index 9d50e1dcd4..c5ed4b4255 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/CMakeLists.txt @@ -39,17 +39,3 @@ absl_cc_test( absl::core_headers GTest::gmock_main ) - -absl_cc_test( - NAME - memory_exception_safety_test - SRCS - "memory_exception_safety_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::memory - absl::config - absl::exception_safety_testing - GTest::gmock_main -) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory.h index d63326068f..e5ff0e6563 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory.h @@ -75,32 +75,6 @@ std::unique_ptr WrapUnique(T* ptr) { return std::unique_ptr(ptr); } -namespace memory_internal { - -// Traits to select proper overload and return type for `absl::make_unique<>`. -template -struct MakeUniqueResult { - using scalar = std::unique_ptr; -}; -template -struct MakeUniqueResult { - using array = std::unique_ptr; -}; -template -struct MakeUniqueResult { - using invalid = void; -}; - -} // namespace memory_internal - -// gcc 4.8 has __cplusplus at 201301 but the libstdc++ shipped with it doesn't -// define make_unique. Other supported compilers either just define __cplusplus -// as 201103 but have make_unique (msvc), or have make_unique whenever -// __cplusplus > 201103 (clang). -#if (__cplusplus > 201103L || defined(_MSC_VER)) && \ - !(defined(__GLIBCXX__) && !defined(__cpp_lib_make_unique)) -using std::make_unique; -#else // ----------------------------------------------------------------------------- // Function Template: make_unique() // ----------------------------------------------------------------------------- @@ -109,82 +83,18 @@ using std::make_unique; // during the construction process. `absl::make_unique<>` also avoids redundant // type declarations, by avoiding the need to explicitly use the `new` operator. // -// This implementation of `absl::make_unique<>` is designed for C++11 code and -// will be replaced in C++14 by the equivalent `std::make_unique<>` abstraction. -// `absl::make_unique<>` is designed to be 100% compatible with -// `std::make_unique<>` so that the eventual migration will involve a simple -// rename operation. +// https://en.cppreference.com/w/cpp/memory/unique_ptr/make_unique // // For more background on why `std::unique_ptr(new T(a,b))` is problematic, // see Herb Sutter's explanation on // (Exception-Safe Function Calls)[https://herbsutter.com/gotw/_102/]. // (In general, reviewers should treat `new T(a,b)` with scrutiny.) // -// Example usage: -// -// auto p = make_unique(args...); // 'p' is a std::unique_ptr -// auto pa = make_unique(5); // 'pa' is a std::unique_ptr -// -// Three overloads of `absl::make_unique` are required: -// -// - For non-array T: -// -// Allocates a T with `new T(std::forward args...)`, -// forwarding all `args` to T's constructor. -// Returns a `std::unique_ptr` owning that object. -// -// - For an array of unknown bounds T[]: -// -// `absl::make_unique<>` will allocate an array T of type U[] with -// `new U[n]()` and return a `std::unique_ptr` owning that array. -// -// Note that 'U[n]()' is different from 'U[n]', and elements will be -// value-initialized. Note as well that `std::unique_ptr` will perform its -// own destruction of the array elements upon leaving scope, even though -// the array [] does not have a default destructor. -// -// NOTE: an array of unknown bounds T[] may still be (and often will be) -// initialized to have a size, and will still use this overload. E.g: -// -// auto my_array = absl::make_unique(10); -// -// - For an array of known bounds T[N]: -// -// `absl::make_unique<>` is deleted (like with `std::make_unique<>`) as -// this overload is not useful. -// -// NOTE: an array of known bounds T[N] is not considered a useful -// construction, and may cause undefined behavior in templates. E.g: -// -// auto my_array = absl::make_unique(); -// -// In those cases, of course, you can still use the overload above and -// simply initialize it to its desired size: -// -// auto my_array = absl::make_unique(10); - -// `absl::make_unique` overload for non-array types. -template -typename memory_internal::MakeUniqueResult::scalar make_unique( - Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - -// `absl::make_unique` overload for an array T[] of unknown bounds. -// The array allocation needs to use the `new T[size]` form and cannot take -// element constructor arguments. The `std::unique_ptr` will manage destructing -// these array elements. -template -typename memory_internal::MakeUniqueResult::array make_unique(size_t n) { - return std::unique_ptr(new typename absl::remove_extent_t[n]()); -} - -// `absl::make_unique` overload for an array T[N] of known bounds. -// This construction will be rejected. -template -typename memory_internal::MakeUniqueResult::invalid make_unique( - Args&&... /* args */) = delete; -#endif +// Historical note: Abseil once provided a C++11 compatible implementation of +// the C++14's `std::make_unique`. Now that C++11 support has been sunsetted, +// `absl::make_unique` simply uses the STL-provided implementation. New code +// should use `std::make_unique`. +using std::make_unique; // ----------------------------------------------------------------------------- // Function Template: RawPtr() diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory_exception_safety_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory_exception_safety_test.cc deleted file mode 100644 index 1df72614c0..0000000000 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory_exception_safety_test.cc +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "absl/memory/memory.h" - -#include "absl/base/config.h" - -#ifdef ABSL_HAVE_EXCEPTIONS - -#include "gtest/gtest.h" -#include "absl/base/internal/exception_safety_testing.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace { - -constexpr int kLength = 50; -using Thrower = testing::ThrowingValue; - -TEST(MakeUnique, CheckForLeaks) { - constexpr int kValue = 321; - auto tester = testing::MakeExceptionSafetyTester() - .WithInitialValue(Thrower(kValue)) - // Ensures make_unique does not modify the input. The real - // test, though, is ConstructorTracker checking for leaks. - .WithContracts(testing::strong_guarantee); - - EXPECT_TRUE(tester.Test([](Thrower* thrower) { - static_cast(absl::make_unique(*thrower)); - })); - - EXPECT_TRUE(tester.Test([](Thrower* thrower) { - static_cast(absl::make_unique(std::move(*thrower))); - })); - - // Test T[n] overload - EXPECT_TRUE(tester.Test([&](Thrower*) { - static_cast(absl::make_unique(kLength)); - })); -} - -} // namespace -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_HAVE_EXCEPTIONS diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory_test.cc index 1990c7ba47..6f01cdff9a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/memory/memory_test.cc @@ -63,12 +63,6 @@ TEST(WrapUniqueTest, WrapUnique) { } EXPECT_EQ(0, DestructorVerifier::instance_count()); } -TEST(MakeUniqueTest, Basic) { - std::unique_ptr p = absl::make_unique(); - EXPECT_EQ("", *p); - p = absl::make_unique("hi"); - EXPECT_EQ("hi", *p); -} // InitializationVerifier fills in a pattern when allocated so we can // distinguish between its default and value initialized states (without @@ -93,65 +87,6 @@ struct InitializationVerifier { int b; }; -TEST(Initialization, MakeUnique) { - auto p = absl::make_unique(); - - EXPECT_EQ(0, p->a); - EXPECT_EQ(0, p->b); -} - -TEST(Initialization, MakeUniqueArray) { - auto p = absl::make_unique(2); - - EXPECT_EQ(0, p[0].a); - EXPECT_EQ(0, p[0].b); - EXPECT_EQ(0, p[1].a); - EXPECT_EQ(0, p[1].b); -} - -struct MoveOnly { - MoveOnly() = default; - explicit MoveOnly(int i1) : ip1{new int{i1}} {} - MoveOnly(int i1, int i2) : ip1{new int{i1}}, ip2{new int{i2}} {} - std::unique_ptr ip1; - std::unique_ptr ip2; -}; - -struct AcceptMoveOnly { - explicit AcceptMoveOnly(MoveOnly m) : m_(std::move(m)) {} - MoveOnly m_; -}; - -TEST(MakeUniqueTest, MoveOnlyTypeAndValue) { - using ExpectedType = std::unique_ptr; - { - auto p = absl::make_unique(); - static_assert(std::is_same::value, - "unexpected return type"); - EXPECT_TRUE(!p->ip1); - EXPECT_TRUE(!p->ip2); - } - { - auto p = absl::make_unique(1); - static_assert(std::is_same::value, - "unexpected return type"); - EXPECT_TRUE(p->ip1 && *p->ip1 == 1); - EXPECT_TRUE(!p->ip2); - } - { - auto p = absl::make_unique(1, 2); - static_assert(std::is_same::value, - "unexpected return type"); - EXPECT_TRUE(p->ip1 && *p->ip1 == 1); - EXPECT_TRUE(p->ip2 && *p->ip2 == 2); - } -} - -TEST(MakeUniqueTest, AcceptMoveOnly) { - auto p = absl::make_unique(MoveOnly()); - p = std::unique_ptr(new AcceptMoveOnly(MoveOnly())); -} - struct ArrayWatch { void* operator new[](size_t n) { allocs().push_back(n); @@ -164,38 +99,6 @@ struct ArrayWatch { } }; -TEST(Make_UniqueTest, Array) { - // Ensure state is clean before we start so that these tests - // are order-agnostic. - ArrayWatch::allocs().clear(); - - auto p = absl::make_unique(5); - static_assert(std::is_same>::value, - "unexpected return type"); - EXPECT_THAT(ArrayWatch::allocs(), ElementsAre(5 * sizeof(ArrayWatch))); -} - -TEST(Make_UniqueTest, NotAmbiguousWithStdMakeUnique) { - // Ensure that absl::make_unique is not ambiguous with std::make_unique. - // In C++14 mode, the below call to make_unique has both types as candidates. - struct TakesStdType { - explicit TakesStdType(const std::vector& vec) {} - }; - using absl::make_unique; - (void)make_unique(std::vector()); -} - -#if 0 -// These tests shouldn't compile. -TEST(MakeUniqueTestNC, AcceptMoveOnlyLvalue) { - auto m = MoveOnly(); - auto p = absl::make_unique(m); -} -TEST(MakeUniqueTestNC, KnownBoundArray) { - auto p = absl::make_unique(); -} -#endif - TEST(RawPtrTest, RawPointer) { int i = 5; EXPECT_EQ(&i, absl::RawPtr(&i)); @@ -548,22 +451,23 @@ struct MinimalMockAllocator { TEST(AllocatorTraits, FunctionsMinimal) { int trace = 0; int hint; - TestValue x(&trace); + alignas(TestValue) char buffer[sizeof(TestValue)]; + auto* x = reinterpret_cast(buffer); MinimalMockAllocator mock; using Traits = absl::allocator_traits; - EXPECT_CALL(mock, allocate(7)).WillRepeatedly(Return(&x)); - EXPECT_CALL(mock, deallocate(&x, 7)); + EXPECT_CALL(mock, allocate(7)).WillRepeatedly(Return(x)); + EXPECT_CALL(mock, deallocate(x, 7)); - EXPECT_EQ(&x, Traits::allocate(mock, 7)); + EXPECT_EQ(x, Traits::allocate(mock, 7)); static_cast(Traits::allocate(mock, 7, static_cast(&hint))); - EXPECT_EQ(&x, Traits::allocate(mock, 7, static_cast(&hint))); - Traits::deallocate(mock, &x, 7); + EXPECT_EQ(x, Traits::allocate(mock, 7, static_cast(&hint))); + Traits::deallocate(mock, x, 7); + EXPECT_EQ(0, trace); + Traits::construct(mock, x, &trace); EXPECT_EQ(1, trace); - Traits::construct(mock, &x, &trace); - EXPECT_EQ(2, trace); - Traits::destroy(mock, &x); - EXPECT_EQ(1, trace); + Traits::destroy(mock, x); + EXPECT_EQ(0, trace); EXPECT_EQ(std::numeric_limits::max() / sizeof(TestValue), Traits::max_size(mock)); @@ -599,7 +503,7 @@ TEST(AllocatorTraits, FunctionsFull) { EXPECT_CALL(mock, allocate(13, &hint)).WillRepeatedly(Return(&y)); EXPECT_CALL(mock, construct(&x, &trace)); EXPECT_CALL(mock, destroy(&x)); - EXPECT_CALL(mock, max_size()).WillRepeatedly(Return(17)); + EXPECT_CALL(mock, max_size()).WillRepeatedly(Return(17u)); EXPECT_CALL(mock, select_on_container_copy_construction()) .WillRepeatedly(Return(FullMockAllocator(23))); @@ -612,7 +516,7 @@ TEST(AllocatorTraits, FunctionsFull) { Traits::destroy(mock, &x); EXPECT_EQ(1, trace); - EXPECT_EQ(17, Traits::max_size(mock)); + EXPECT_EQ(17u, Traits::max_size(mock)); EXPECT_EQ(0, mock.value); EXPECT_EQ(23, Traits::select_on_container_copy_construction(mock).value); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/CMakeLists.txt index 9de4bd3751..f16f17bd64 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/CMakeLists.txt @@ -34,6 +34,7 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS + absl::core_headers absl::type_traits GTest::gmock_main ) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/type_traits.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/type_traits.h index e7c123936d..6e6001fe8e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/type_traits.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/type_traits.h @@ -35,7 +35,7 @@ #ifndef ABSL_META_TYPE_TRAITS_H_ #define ABSL_META_TYPE_TRAITS_H_ -#include +#include #include #include @@ -47,6 +47,14 @@ #define ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1 #endif +// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17 +// feature. +#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__) +#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT __STDCPP_DEFAULT_NEW_ALIGNMENT__ +#else // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__) +#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT alignof(std::max_align_t) +#endif // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__) + namespace absl { ABSL_NAMESPACE_BEGIN @@ -290,8 +298,12 @@ struct is_function // https://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html#Type-Traits. template struct is_trivially_destructible +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE + : std::is_trivially_destructible { +#else : std::integral_constant::value> { +#endif #ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE private: static constexpr bool compliant = std::is_trivially_destructible::value == @@ -339,9 +351,13 @@ struct is_trivially_destructible // Nontrivially destructible types will cause the expression to be nontrivial. template struct is_trivially_default_constructible +#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) + : std::is_trivially_default_constructible { +#else : std::integral_constant::value && is_trivially_destructible::value> { +#endif #if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \ !defined( \ ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION) @@ -373,10 +389,14 @@ struct is_trivially_default_constructible // expression to be nontrivial. template struct is_trivially_move_constructible +#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) + : std::is_trivially_move_constructible { +#else : std::conditional< std::is_object::value && !std::is_array::value, type_traits_internal::IsTriviallyMoveConstructibleObject, std::is_reference>::type::type { +#endif #if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \ !defined( \ ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION) @@ -482,9 +502,13 @@ struct is_trivially_move_assignable // `is_trivially_assignable`. template struct is_trivially_copy_assignable +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE + : std::is_trivially_copy_assignable { +#else : std::integral_constant< bool, __has_trivial_assign(typename std::remove_reference::type) && absl::is_copy_assignable::value> { +#endif #ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE private: static constexpr bool compliant = @@ -536,6 +560,11 @@ namespace type_traits_internal { // destructible. Arrays of trivially copyable types are trivially copyable. // // We expose this metafunction only for internal use within absl. + +#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE) +template +struct is_trivially_copyable : std::is_trivially_copyable {}; +#else template class is_trivially_copyable_impl { using ExtentsRemoved = typename std::remove_all_extents::type; @@ -561,6 +590,7 @@ template struct is_trivially_copyable : std::integral_constant< bool, type_traits_internal::is_trivially_copyable_impl::kValue> {}; +#endif } // namespace type_traits_internal // ----------------------------------------------------------------------------- @@ -634,7 +664,8 @@ using underlying_type_t = typename std::underlying_type::type; namespace type_traits_internal { -#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +#if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \ + (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) // std::result_of is deprecated (C++17) or removed (C++20) template struct result_of; template @@ -782,6 +813,34 @@ using swap_internal::Swap; using swap_internal::StdSwapIsUnconstrained; } // namespace type_traits_internal + +// absl::is_trivially_relocatable +// Detects whether a type is "trivially relocatable" -- meaning it can be +// relocated without invoking the constructor/destructor, using a form of move +// elision. +// +// Example: +// +// if constexpr (absl::is_trivially_relocatable::value) { +// memcpy(new_location, old_location, sizeof(T)); +// } else { +// new(new_location) T(std::move(*old_location)); +// old_location->~T(); +// } +// +// Upstream documentation: +// +// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable +// +#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) +template +struct is_trivially_relocatable + : std::integral_constant {}; +#else +template +struct is_trivially_relocatable : std::integral_constant {}; +#endif + ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/type_traits_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/type_traits_test.cc index 0ef5b66558..d08d9ad97c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/type_traits_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/meta/type_traits_test.cc @@ -21,6 +21,7 @@ #include #include "gtest/gtest.h" +#include "absl/base/attributes.h" namespace { @@ -336,6 +337,7 @@ struct MovableNonCopyable { struct NonCopyableOrMovable { NonCopyableOrMovable() = default; + virtual ~NonCopyableOrMovable() = default; NonCopyableOrMovable(const NonCopyableOrMovable&) = delete; NonCopyableOrMovable(NonCopyableOrMovable&&) = delete; NonCopyableOrMovable& operator=(const NonCopyableOrMovable&) = delete; @@ -1393,4 +1395,22 @@ TEST(TypeTraitsTest, IsNothrowSwappable) { EXPECT_TRUE(IsNothrowSwappable::value); } +TEST(TrivallyRelocatable, Sanity) { +#if !defined(ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI) || \ + !ABSL_HAVE_BUILTIN(__is_trivially_relocatable) + GTEST_SKIP() << "No trivial ABI support."; +#endif + + struct Trivial {}; + struct NonTrivial { + NonTrivial(const NonTrivial&) {} // NOLINT + }; + struct ABSL_ATTRIBUTE_TRIVIAL_ABI TrivialAbi { + TrivialAbi(const TrivialAbi&) {} // NOLINT + }; + EXPECT_TRUE(absl::is_trivially_relocatable::value); + EXPECT_FALSE(absl::is_trivially_relocatable::value); + EXPECT_TRUE(absl::is_trivially_relocatable::value); +} + } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/bits.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/bits.h index 52013ad49b..df81b9a929 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/bits.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/bits.h @@ -131,7 +131,7 @@ has_single_bit(T x) noexcept { // fractional part discarded. template ABSL_INTERNAL_CONSTEXPR_CLZ inline - typename std::enable_if::value, T>::type + typename std::enable_if::value, int>::type bit_width(T x) noexcept { return std::numeric_limits::digits - countl_zero(x); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/bits_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/bits_benchmark.cc new file mode 100644 index 0000000000..719bfa8135 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/bits_benchmark.cc @@ -0,0 +1,73 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "benchmark/benchmark.h" +#include "absl/base/optimization.h" +#include "absl/numeric/bits.h" +#include "absl/random/random.h" + +namespace absl { +namespace { + +template +static void BM_bitwidth(benchmark::State& state) { + const auto count = static_cast(state.range(0)); + + absl::BitGen rng; + std::vector values; + values.reserve(count); + for (size_t i = 0; i < count; ++i) { + values.push_back(absl::Uniform(rng, 0, std::numeric_limits::max())); + } + + while (state.KeepRunningBatch(count)) { + for (size_t i = 0; i < count; ++i) { + benchmark::DoNotOptimize(values[i]); + } + } +} +BENCHMARK_TEMPLATE(BM_bitwidth, uint8_t)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_bitwidth, uint16_t)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_bitwidth, uint32_t)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_bitwidth, uint64_t)->Range(1, 1 << 20); + +template +static void BM_bitwidth_nonzero(benchmark::State& state) { + const auto count = static_cast(state.range(0)); + + absl::BitGen rng; + std::vector values; + values.reserve(count); + for (size_t i = 0; i < count; ++i) { + values.push_back(absl::Uniform(rng, 1, std::numeric_limits::max())); + } + + while (state.KeepRunningBatch(count)) { + for (size_t i = 0; i < count; ++i) { + const T value = values[i]; + ABSL_ASSUME(value > 0); + benchmark::DoNotOptimize(value); + } + } +} +BENCHMARK_TEMPLATE(BM_bitwidth_nonzero, uint8_t)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_bitwidth_nonzero, uint16_t)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_bitwidth_nonzero, uint32_t)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_bitwidth_nonzero, uint64_t)->Range(1, 1 << 20); + +} // namespace +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128.cc index 17d88744ae..e5526c6f59 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128.cc @@ -42,11 +42,11 @@ namespace { // Returns: 2 inline ABSL_ATTRIBUTE_ALWAYS_INLINE int Fls128(uint128 n) { if (uint64_t hi = Uint128High64(n)) { - ABSL_INTERNAL_ASSUME(hi != 0); + ABSL_ASSUME(hi != 0); return 127 - countl_zero(hi); } const uint64_t low = Uint128Low64(n); - ABSL_INTERNAL_ASSUME(low != 0); + ABSL_ASSUME(low != 0); return 63 - countl_zero(low); } @@ -209,15 +209,16 @@ std::ostream& operator<<(std::ostream& os, uint128 v) { // Add the requisite padding. std::streamsize width = os.width(0); if (static_cast(width) > rep.size()) { + const size_t count = static_cast(width) - rep.size(); std::ios::fmtflags adjustfield = flags & std::ios::adjustfield; if (adjustfield == std::ios::left) { - rep.append(width - rep.size(), os.fill()); + rep.append(count, os.fill()); } else if (adjustfield == std::ios::internal && (flags & std::ios::showbase) && (flags & std::ios::basefield) == std::ios::hex && v != 0) { - rep.insert(2, width - rep.size(), os.fill()); + rep.insert(2, count, os.fill()); } else { - rep.insert(0, width - rep.size(), os.fill()); + rep.insert(0, count, os.fill()); } } @@ -306,22 +307,23 @@ std::ostream& operator<<(std::ostream& os, int128 v) { // Add the requisite padding. std::streamsize width = os.width(0); if (static_cast(width) > rep.size()) { + const size_t count = static_cast(width) - rep.size(); switch (flags & std::ios::adjustfield) { case std::ios::left: - rep.append(width - rep.size(), os.fill()); + rep.append(count, os.fill()); break; case std::ios::internal: if (print_as_decimal && (rep[0] == '+' || rep[0] == '-')) { - rep.insert(1, width - rep.size(), os.fill()); + rep.insert(1, count, os.fill()); } else if ((flags & std::ios::basefield) == std::ios::hex && (flags & std::ios::showbase) && v != 0) { - rep.insert(2, width - rep.size(), os.fill()); + rep.insert(2, count, os.fill()); } else { - rep.insert(0, width - rep.size(), os.fill()); + rep.insert(0, count, os.fill()); } break; default: // std::ios::right - rep.insert(0, width - rep.size(), os.fill()); + rep.insert(0, count, os.fill()); break; } } @@ -332,6 +334,7 @@ std::ostream& operator<<(std::ostream& os, int128 v) { ABSL_NAMESPACE_END } // namespace absl +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL namespace std { constexpr bool numeric_limits::is_specialized; constexpr bool numeric_limits::is_signed; @@ -381,3 +384,4 @@ constexpr int numeric_limits::max_exponent10; constexpr bool numeric_limits::traps; constexpr bool numeric_limits::tinyness_before; } // namespace std +#endif diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128.h index c7ad96befd..7a899eec84 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128.h @@ -44,7 +44,7 @@ // builtin type. We need to make sure not to define operator wchar_t() // alongside operator unsigned short() in these instances. #define ABSL_INTERNAL_WCHAR_T __wchar_t -#if defined(_M_X64) +#if defined(_M_X64) && !defined(_M_ARM64EC) #include #pragma intrinsic(_umul128) #endif // defined(_M_X64) @@ -980,7 +980,7 @@ inline uint128 operator*(uint128 lhs, uint128 rhs) { // can be used for uint128 storage. return static_cast(lhs) * static_cast(rhs); -#elif defined(_MSC_VER) && defined(_M_X64) +#elif defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC) uint64_t carry; uint64_t low = _umul128(Uint128Low64(lhs), Uint128Low64(rhs), &carry); return MakeUint128(Uint128Low64(lhs) * Uint128High64(rhs) + diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128_no_intrinsic.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128_no_intrinsic.inc index 66f6809ffd..8834804cec 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128_no_intrinsic.inc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128_no_intrinsic.inc @@ -279,7 +279,7 @@ constexpr int128 operator^(int128 lhs, int128 rhs) { } constexpr int128 operator<<(int128 lhs, int amount) { - // uint64_t shifts of >= 64 are undefined, so we need some special-casing. + // int64_t shifts of >= 64 are undefined, so we need some special-casing. return amount >= 64 ? MakeInt128( static_cast(Int128Low64(lhs) << (amount - 64)), 0) @@ -292,10 +292,16 @@ constexpr int128 operator<<(int128 lhs, int amount) { } constexpr int128 operator>>(int128 lhs, int amount) { - // uint64_t shifts of >= 64 are undefined, so we need some special-casing. + // int64_t shifts of >= 64 are undefined, so we need some special-casing. + // The (Int128High64(lhs) >> 32) >> 32 "trick" causes the the most significant + // int64 to be inititialized with all zeros or all ones correctly. It takes + // into account whether the number is negative or positive, and whether the + // current architecture does arithmetic or logical right shifts for negative + // numbers. return amount >= 64 ? MakeInt128( - 0, static_cast(Int128High64(lhs) >> (amount - 64))) + (Int128High64(lhs) >> 32) >> 32, + static_cast(Int128High64(lhs) >> (amount - 64))) : amount == 0 ? lhs : MakeInt128(Int128High64(lhs) >> amount, diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128_test.cc index c445d89a99..dd9425d77a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/numeric/int128_test.cc @@ -239,6 +239,24 @@ TEST(Uint128, AllTests) { EXPECT_EQ(absl::Uint128Max(), absl::kuint128max); } +TEST(Int128, RightShiftOfNegativeNumbers) { + absl::int128 minus_six = -6; + absl::int128 minus_three = -3; + absl::int128 minus_two = -2; + absl::int128 minus_one = -1; + if ((-6 >> 1) == -3) { + // Right shift is arithmetic (sign propagates) + EXPECT_EQ(minus_six >> 1, minus_three); + EXPECT_EQ(minus_six >> 2, minus_two); + EXPECT_EQ(minus_six >> 65, minus_one); + } else { + // Right shift is logical (zeros shifted in at MSB) + EXPECT_EQ(minus_six >> 1, absl::int128(absl::uint128(minus_six) >> 1)); + EXPECT_EQ(minus_six >> 2, absl::int128(absl::uint128(minus_six) >> 2)); + EXPECT_EQ(minus_six >> 65, absl::int128(absl::uint128(minus_six) >> 65)); + } +} + TEST(Uint128, ConversionTests) { EXPECT_TRUE(absl::MakeUint128(1, 0)); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/CMakeLists.txt index 7b6a778032..9b3a71021c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/CMakeLists.txt @@ -37,3 +37,57 @@ absl_cc_test( GTest::gmock_main ) +absl_cc_library( + NAME + exponential_biased + SRCS + "internal/exponential_biased.cc" + HDRS + "internal/exponential_biased.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + exponential_biased_test + SRCS + "internal/exponential_biased_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::exponential_biased + absl::strings + GTest::gmock_main +) + +absl_cc_library( + NAME + periodic_sampler + SRCS + "internal/periodic_sampler.cc" + HDRS + "internal/periodic_sampler.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::exponential_biased +) + +absl_cc_test( + NAME + periodic_sampler_test + SRCS + "internal/periodic_sampler_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::core_headers + absl::periodic_sampler + GTest::gmock_main +) + diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased.cc similarity index 96% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased.cc rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased.cc index 05aeea566c..81d9a75765 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/base/internal/exponential_biased.h" +#include "absl/profiling/internal/exponential_biased.h" #include @@ -26,7 +26,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace profiling_internal { // The algorithm generates a random number between 0 and 1 and applies the // inverse cumulative distribution function for an exponential. Specifically: @@ -88,6 +88,6 @@ void ExponentialBiased::Initialize() { initialized_ = true; } -} // namespace base_internal +} // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased.h similarity index 95% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased.h rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased.h index a81f10e230..d31f7782e8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ -#define ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ +#ifndef ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ +#define ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ #include @@ -22,7 +22,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace profiling_internal { // ExponentialBiased provides a small and fast random number generator for a // rounded exponential distribution. This generator manages very little state, @@ -123,8 +123,8 @@ inline uint64_t ExponentialBiased::NextRandom(uint64_t rnd) { return (prng_mult * rnd + prng_add) & prng_mod_mask; } -} // namespace base_internal +} // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ +#endif // ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased_test.cc similarity index 89% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased_test.cc rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased_test.cc index 075583ca6f..ebfbcad4f0 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/exponential_biased_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/exponential_biased_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/base/internal/exponential_biased.h" +#include "absl/profiling/internal/exponential_biased.h" #include @@ -28,7 +28,8 @@ using ::testing::Ge; namespace absl { ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace profiling_internal { +namespace { MATCHER_P2(IsBetween, a, b, absl::StrCat(std::string(negation ? "isn't" : "is"), " between ", a, @@ -93,13 +94,14 @@ double AndersonDarlingPValue(int n, double z) { } double AndersonDarlingStatistic(const std::vector& random_sample) { - int n = random_sample.size(); + size_t n = random_sample.size(); double ad_sum = 0; - for (int i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { ad_sum += (2 * i + 1) * std::log(random_sample[i] * (1 - random_sample[n - 1 - i])); } - double ad_statistic = -n - 1 / static_cast(n) * ad_sum; + const auto n_as_double = static_cast(n); + double ad_statistic = -n_as_double - 1 / n_as_double * ad_sum; return ad_statistic; } @@ -110,14 +112,15 @@ double AndersonDarlingStatistic(const std::vector& random_sample) { // Marsaglia and Marsaglia for details. double AndersonDarlingTest(const std::vector& random_sample) { double ad_statistic = AndersonDarlingStatistic(random_sample); - double p = AndersonDarlingPValue(random_sample.size(), ad_statistic); + double p = AndersonDarlingPValue(static_cast(random_sample.size()), + ad_statistic); return p; } TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { ExponentialBiased eb; for (int runs = 0; runs < 10; ++runs) { - for (int flips = eb.GetSkipCount(1); flips > 0; --flips) { + for (int64_t flips = eb.GetSkipCount(1); flips > 0; --flips) { printf("head..."); } printf("tail\n"); @@ -131,7 +134,7 @@ TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { TEST(ExponentialBiasedTest, SampleDemoWithStride) { ExponentialBiased eb; - int stride = eb.GetStride(10); + int64_t stride = eb.GetStride(10); int samples = 0; for (int i = 0; i < 10000000; ++i) { if (--stride == 0) { @@ -146,7 +149,7 @@ TEST(ExponentialBiasedTest, SampleDemoWithStride) { // Testing that NextRandom generates uniform random numbers. Applies the // Anderson-Darling test for uniformity TEST(ExponentialBiasedTest, TestNextRandom) { - for (auto n : std::vector({ + for (auto n : std::vector({ 10, // Check short-range correlation 100, 1000, 10000 // Make sure there's no systemic error @@ -160,7 +163,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) { } std::vector int_random_sample(n); // Collect samples - for (int i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { int_random_sample[i] = x; x = ExponentialBiased::NextRandom(x); } @@ -168,7 +171,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) { std::sort(int_random_sample.begin(), int_random_sample.end()); std::vector random_sample(n); // Convert them to uniform randoms (in the range [0,1]) - for (int i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { random_sample[i] = static_cast(int_random_sample[i]) / max_prng_value; } @@ -194,6 +197,7 @@ TEST(ExponentialBiasedTest, InitializationModes) { EXPECT_THAT(eb_stack.GetSkipCount(2), Ge(0)); } -} // namespace base_internal +} // namespace +} // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler.cc similarity index 88% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler.cc rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler.cc index 520dabbaa0..a738a82c86 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler.cc @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/base/internal/periodic_sampler.h" +#include "absl/profiling/internal/periodic_sampler.h" #include -#include "absl/base/internal/exponential_biased.h" +#include "absl/profiling/internal/exponential_biased.h" namespace absl { ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace profiling_internal { int64_t PeriodicSamplerBase::GetExponentialBiased(int period) noexcept { return rng_.GetStride(period); @@ -48,6 +48,6 @@ bool PeriodicSamplerBase::SubtleConfirmSample() noexcept { return true; } -} // namespace base_internal +} // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler.h similarity index 95% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler.h rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler.h index f8a86796b1..54f0af452b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler.h @@ -12,19 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ -#define ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ +#ifndef ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ +#define ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ #include #include -#include "absl/base/internal/exponential_biased.h" #include "absl/base/optimization.h" +#include "absl/profiling/internal/exponential_biased.h" namespace absl { ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace profiling_internal { // PeriodicSamplerBase provides the basic period sampler implementation. // @@ -149,7 +149,7 @@ class PeriodicSamplerBase { // ICC x64 (OK) : https://gcc.godbolt.org/z/ptTNfD // MSVC x64 (OK) : https://gcc.godbolt.org/z/76j4-5 uint64_t stride_ = 0; - ExponentialBiased rng_; + absl::profiling_internal::ExponentialBiased rng_; }; inline bool PeriodicSamplerBase::SubtleMaybeSample() noexcept { @@ -204,8 +204,8 @@ class PeriodicSampler final : public PeriodicSamplerBase { template std::atomic PeriodicSampler::period_(default_period); -} // namespace base_internal +} // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ +#endif // ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc similarity index 94% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc index 5ad469ce79..8f0e5574c3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "absl/profiling/internal/periodic_sampler.h" #include "benchmark/benchmark.h" -#include "absl/base/internal/periodic_sampler.h" namespace absl { ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace profiling_internal { namespace { template @@ -74,6 +74,6 @@ void BM_PeriodicSampler_Disabled(benchmark::State& state) { BENCHMARK(BM_PeriodicSampler_Disabled); } // namespace -} // namespace base_internal +} // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc similarity index 97% rename from third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler_test.cc rename to third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc index 3b301e37ab..ef986f3878 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/base/internal/periodic_sampler_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/base/internal/periodic_sampler.h" +#include "absl/profiling/internal/periodic_sampler.h" #include // NOLINT(build/c++11) @@ -23,7 +23,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace profiling_internal { namespace { using testing::Eq; @@ -172,6 +172,6 @@ TEST(PeriodicSamplerTest, SetGlobalPeriod) { } } // namespace -} // namespace base_internal +} // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/sample_recorder.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/sample_recorder.h index 5e04a9cd19..ef1489b1f6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/sample_recorder.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/sample_recorder.h @@ -46,6 +46,7 @@ struct Sample { absl::Mutex init_mu; T* next = nullptr; T* dead ABSL_GUARDED_BY(init_mu) = nullptr; + int64_t weight; // How many sampling events were required to sample this one. }; // Holds samples and their associated stack traces with a soft limit of @@ -59,7 +60,8 @@ class SampleRecorder { ~SampleRecorder(); // Registers for sampling. Returns an opaque registration info. - T* Register(); + template + T* Register(Targs&&... args); // Unregisters the sample. void Unregister(T* sample); @@ -75,16 +77,18 @@ class SampleRecorder { // samples that have been dropped. int64_t Iterate(const std::function& f); - void SetMaxSamples(int32_t max); + size_t GetMaxSamples() const; + void SetMaxSamples(size_t max); private: void PushNew(T* sample); void PushDead(T* sample); - T* PopDead(); + template + T* PopDead(Targs... args); std::atomic dropped_samples_; std::atomic size_estimate_; - std::atomic max_samples_{1 << 20}; + std::atomic max_samples_{1 << 20}; // Intrusive lock free linked lists for tracking samples. // @@ -162,7 +166,8 @@ void SampleRecorder::PushDead(T* sample) { } template -T* SampleRecorder::PopDead() { +template +T* SampleRecorder::PopDead(Targs... args) { absl::MutexLock graveyard_lock(&graveyard_.init_mu); // The list is circular, so eventually it collapses down to @@ -174,23 +179,28 @@ T* SampleRecorder::PopDead() { absl::MutexLock sample_lock(&sample->init_mu); graveyard_.dead = sample->dead; sample->dead = nullptr; - sample->PrepareForSampling(); + sample->PrepareForSampling(std::forward(args)...); return sample; } template -T* SampleRecorder::Register() { - int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); +template +T* SampleRecorder::Register(Targs&&... args) { + size_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); if (size > max_samples_.load(std::memory_order_relaxed)) { size_estimate_.fetch_sub(1, std::memory_order_relaxed); dropped_samples_.fetch_add(1, std::memory_order_relaxed); return nullptr; } - T* sample = PopDead(); + T* sample = PopDead(args...); if (sample == nullptr) { // Resurrection failed. Hire a new warlock. sample = new T(); + { + absl::MutexLock sample_lock(&sample->init_mu); + sample->PrepareForSampling(std::forward(args)...); + } PushNew(sample); } @@ -219,10 +229,15 @@ int64_t SampleRecorder::Iterate( } template -void SampleRecorder::SetMaxSamples(int32_t max) { +void SampleRecorder::SetMaxSamples(size_t max) { max_samples_.store(max, std::memory_order_release); } +template +size_t SampleRecorder::GetMaxSamples() const { + return max_samples_.load(std::memory_order_acquire); +} + } // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/sample_recorder_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/sample_recorder_test.cc index ec6e0fa22d..3373329a8a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/sample_recorder_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/profiling/internal/sample_recorder_test.cc @@ -36,7 +36,7 @@ using ::testing::UnorderedElementsAre; struct Info : public Sample { public: - void PrepareForSampling() {} + void PrepareForSampling(int64_t w) { weight = w; } std::atomic size; absl::Time create_time; }; @@ -49,8 +49,14 @@ std::vector GetSizes(SampleRecorder* s) { return res; } -Info* Register(SampleRecorder* s, size_t size) { - auto* info = s->Register(); +std::vector GetWeights(SampleRecorder* s) { + std::vector res; + s->Iterate([&](const Info& info) { res.push_back(info.weight); }); + return res; +} + +Info* Register(SampleRecorder* s, int64_t weight, size_t size) { + auto* info = s->Register(weight); assert(info != nullptr); info->size.store(size); return info; @@ -58,13 +64,15 @@ Info* Register(SampleRecorder* s, size_t size) { TEST(SampleRecorderTest, Registration) { SampleRecorder sampler; - auto* info1 = Register(&sampler, 1); + auto* info1 = Register(&sampler, 31, 1); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31)); - auto* info2 = Register(&sampler, 2); + auto* info2 = Register(&sampler, 32, 2); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2)); info1->size.store(3); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31, 32)); sampler.Unregister(info1); sampler.Unregister(info2); @@ -74,18 +82,22 @@ TEST(SampleRecorderTest, Unregistration) { SampleRecorder sampler; std::vector infos; for (size_t i = 0; i < 3; ++i) { - infos.push_back(Register(&sampler, i)); + infos.push_back(Register(&sampler, 33 + i, i)); } EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 34, 35)); sampler.Unregister(infos[1]); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35)); - infos.push_back(Register(&sampler, 3)); - infos.push_back(Register(&sampler, 4)); + infos.push_back(Register(&sampler, 36, 3)); + infos.push_back(Register(&sampler, 37, 4)); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 36, 37)); sampler.Unregister(infos[3]); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 37)); sampler.Unregister(infos[0]); sampler.Unregister(infos[2]); @@ -99,18 +111,18 @@ TEST(SampleRecorderTest, MultiThreaded) { ThreadPool pool(10); for (int i = 0; i < 10; ++i) { - pool.Schedule([&sampler, &stop]() { + pool.Schedule([&sampler, &stop, i]() { std::random_device rd; std::mt19937 gen(rd()); std::vector infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(i)); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(i)); break; } case 1: { @@ -119,6 +131,7 @@ TEST(SampleRecorderTest, MultiThreaded) { Info* info = infoz[p]; infoz[p] = infoz.back(); infoz.pop_back(); + EXPECT_EQ(info->weight, i); sampler.Unregister(info); break; } @@ -143,8 +156,8 @@ TEST(SampleRecorderTest, MultiThreaded) { TEST(SampleRecorderTest, Callback) { SampleRecorder sampler; - auto* info1 = Register(&sampler, 1); - auto* info2 = Register(&sampler, 2); + auto* info1 = Register(&sampler, 39, 1); + auto* info2 = Register(&sampler, 40, 2); static const Info* expected; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/CMakeLists.txt index 9d1c67fb33..c74fd30064 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/CMakeLists.txt @@ -121,6 +121,7 @@ absl_cc_library( absl::variant GTest::gmock GTest::gtest + PUBLIC TESTONLY ) @@ -222,8 +223,8 @@ absl_cc_library( LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS + absl::config absl::inlined_vector - absl::random_internal_nonsecure_base absl::random_internal_pool_urbg absl::random_internal_salted_seed_seq absl::random_internal_seed_material @@ -568,7 +569,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} - $<$:"bcrypt"> + $<$:-lbcrypt> DEPS absl::core_headers absl::optional @@ -726,7 +727,7 @@ absl_cc_library( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::core_headers - absl::optional + absl::inlined_vector absl::random_internal_pool_urbg absl::random_internal_salted_seed_seq absl::random_internal_seed_material @@ -1210,5 +1211,6 @@ absl_cc_test( absl::random_internal_wide_multiply absl::bits absl::int128 + GTest::gmock GTest::gtest_main ) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/bernoulli_distribution.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/bernoulli_distribution.h index 25bd0d5ca4..d81b6ae6b1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/bernoulli_distribution.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/bernoulli_distribution.h @@ -138,16 +138,16 @@ bool bernoulli_distribution::Generate(double p, // 64 bits. // // Second, `c` is constructed by first casting explicitly to a signed - // integer and then converting implicitly to an unsigned integer of the same + // integer and then casting explicitly to an unsigned integer of the same // size. This is done because the hardware conversion instructions produce // signed integers from double; if taken as a uint64_t the conversion would // be wrong for doubles greater than 2^63 (not relevant in this use-case). // If converted directly to an unsigned integer, the compiler would end up // emitting code to handle such large values that are not relevant due to // the known bounds on `c`. To avoid these extra instructions this - // implementation converts first to the signed type and then use the - // implicit conversion to unsigned (which is a no-op). - const uint64_t c = static_cast(p * kP32); + // implementation converts first to the signed type and then convert to + // unsigned (which is a no-op). + const uint64_t c = static_cast(static_cast(p * kP32)); const uint32_t v = fast_u32(g); // FAST PATH: this path fails with probability 1/2^32. Note that simply // returning v <= c would approximate P very well (up to an absolute error diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/beta_distribution_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/beta_distribution_test.cc index d980c969f7..c16fbb4f0e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/beta_distribution_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/beta_distribution_test.cc @@ -45,16 +45,26 @@ namespace { template class BetaDistributionInterfaceTest : public ::testing::Test {}; -// double-double arithmetic is not supported well by either GCC or Clang; see -// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048, -// https://bugs.llvm.org/show_bug.cgi?id=49131, and -// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests -// with double doubles until compiler support is better. -using RealTypes = - std::conditional, - ::testing::Types>::type; -TYPED_TEST_CASE(BetaDistributionInterfaceTest, RealTypes); +constexpr bool ShouldExerciseLongDoubleTests() { + // long double arithmetic is not supported well by either GCC or Clang on + // most platforms specifically not when implemented in terms of double-double; + // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048, + // https://bugs.llvm.org/show_bug.cgi?id=49131, and + // https://bugs.llvm.org/show_bug.cgi?id=49132. + // So a conservative choice here is to disable long-double tests pretty much + // everywhere except on x64 but only if long double is not implemented as + // double-double. +#if defined(__i686__) && defined(__x86_64__) + return !absl::numeric_internal::IsDoubleDouble(); +#else + return false; +#endif +} + +using RealTypes = std::conditional, + ::testing::Types>::type; +TYPED_TEST_SUITE(BetaDistributionInterfaceTest, RealTypes); TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) { // The threshold for whether std::exp(1/a) is finite. @@ -431,13 +441,13 @@ std::string ParamName( return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}}); } -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( TestSampleStatisticsCombinations, BetaDistributionTest, ::testing::Combine(::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4), ::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4)), ParamName); -INSTANTIATE_TEST_CASE_P( +INSTANTIATE_TEST_SUITE_P( TestSampleStatistics_SelectedPairs, BetaDistributionTest, ::testing::Values(std::make_pair(0.5, 1000), std::make_pair(1000, 0.5), std::make_pair(900, 1000), std::make_pair(10000, 20000), diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/bit_gen_ref.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/bit_gen_ref.h index 9555460fd4..e475221a15 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/bit_gen_ref.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/bit_gen_ref.h @@ -24,6 +24,10 @@ #ifndef ABSL_RANDOM_BIT_GEN_REF_H_ #define ABSL_RANDOM_BIT_GEN_REF_H_ +#include +#include +#include + #include "absl/base/internal/fast_type_id.h" #include "absl/base/macros.h" #include "absl/meta/type_traits.h" diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/distributions.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/distributions.h index 31c79694e5..37fc3aa7fd 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/distributions.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/distributions.h @@ -373,7 +373,7 @@ RealType Gaussian(URBG&& urbg, // NOLINT(runtime/references) template IntType LogUniform(URBG&& urbg, // NOLINT(runtime/references) IntType lo, IntType hi, IntType base = 2) { - static_assert(std::is_integral::value, + static_assert(random_internal::IsIntegral::value, "Template-argument 'IntType' must be an integral type, in " "absl::LogUniform(...)"); @@ -403,7 +403,7 @@ IntType LogUniform(URBG&& urbg, // NOLINT(runtime/references) template IntType Poisson(URBG&& urbg, // NOLINT(runtime/references) double mean = 1.0) { - static_assert(std::is_integral::value, + static_assert(random_internal::IsIntegral::value, "Template-argument 'IntType' must be an integral type, in " "absl::Poisson(...)"); @@ -435,7 +435,7 @@ template IntType Zipf(URBG&& urbg, // NOLINT(runtime/references) IntType hi = (std::numeric_limits::max)(), double q = 2.0, double v = 1.0) { - static_assert(std::is_integral::value, + static_assert(random_internal::IsIntegral::value, "Template-argument 'IntType' must be an integral type, in " "absl::Zipf(...)"); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/distributions_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/distributions_test.cc index d3a5dd75e5..5321a11c85 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/distributions_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/distributions_test.cc @@ -220,6 +220,7 @@ TEST_F(RandomDistributionsTest, UniformNoBounds) { absl::Uniform(gen); absl::Uniform(gen); absl::Uniform(gen); + absl::Uniform(gen); } TEST_F(RandomDistributionsTest, UniformNonsenseRanges) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/exponential_distribution_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/exponential_distribution_test.cc index 81a5d17bac..3c44d9ec5b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/exponential_distribution_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/exponential_distribution_test.cc @@ -58,7 +58,7 @@ using RealTypes = std::conditional, ::testing::Types>::type; -TYPED_TEST_CASE(ExponentialDistributionTypedTest, RealTypes); +TYPED_TEST_SUITE(ExponentialDistributionTypedTest, RealTypes); TYPED_TEST(ExponentialDistributionTypedTest, SerializeTest) { using param_type = @@ -343,8 +343,8 @@ std::string ParamName(const ::testing::TestParamInfo& info) { return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}}); } -INSTANTIATE_TEST_CASE_P(All, ExponentialDistributionTests, - ::testing::ValuesIn(GenParams()), ParamName); +INSTANTIATE_TEST_SUITE_P(All, ExponentialDistributionTests, + ::testing::ValuesIn(GenParams()), ParamName); // NOTE: absl::exponential_distribution is not guaranteed to be stable. TEST(ExponentialDistributionTest, StabilityTest) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/gaussian_distribution_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/gaussian_distribution_test.cc index c0bac2b0db..4584ac9205 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/gaussian_distribution_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/gaussian_distribution_test.cc @@ -54,7 +54,7 @@ using RealTypes = std::conditional, ::testing::Types>::type; -TYPED_TEST_CASE(GaussianDistributionInterfaceTest, RealTypes); +TYPED_TEST_SUITE(GaussianDistributionInterfaceTest, RealTypes); TYPED_TEST(GaussianDistributionInterfaceTest, SerializeTest) { using param_type = diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/generators_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/generators_test.cc index 41725f139c..14fd24e9b6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/generators_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/generators_test.cc @@ -107,6 +107,8 @@ void TestPoisson(URBG* gen) { absl::Poisson(*gen); absl::Poisson(*gen); absl::Poisson(URBG()); + absl::Poisson(*gen); + absl::Poisson(*gen); } template @@ -126,6 +128,8 @@ void TestZipf(URBG* gen) { absl::Zipf(*gen, 1 << 10); absl::Zipf(*gen, 1 << 10); absl::Zipf(URBG(), 1 << 10); + absl::Zipf(*gen, 1 << 10); + absl::Zipf(*gen, 1 << 10); } template @@ -146,6 +150,8 @@ void TestLogNormal(URBG* gen) { absl::LogUniform(*gen, 0, 1 << 10); absl::LogUniform(*gen, 0, 1 << 10); absl::LogUniform(URBG(), 0, 1 << 10); + absl::LogUniform(*gen, 0, 1 << 10); + absl::LogUniform(*gen, 0, 1 << 10); } template diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/chi_square.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/chi_square.cc index 640d48cea6..fbe0173299 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/chi_square.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/chi_square.cc @@ -125,7 +125,8 @@ double ChiSquareValue(int dof, double p) { const double variance = 2.0 / (9 * dof); // Cannot use this method if the variance is 0. if (variance != 0) { - return std::pow(z * std::sqrt(variance) + mean, 3.0) * dof; + double term = z * std::sqrt(variance) + mean; + return dof * (term * term * term); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/distribution_caller.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/distribution_caller.h index fc81b787eb..0f162a4e29 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/distribution_caller.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/distribution_caller.h @@ -18,6 +18,7 @@ #define ABSL_RANDOM_INTERNAL_DISTRIBUTION_CALLER_H_ #include +#include #include "absl/base/config.h" #include "absl/base/internal/fast_type_id.h" @@ -32,6 +33,8 @@ namespace random_internal { // to intercept such calls. template struct DistributionCaller { + static_assert(!std::is_pointer::value, + "You must pass a reference, not a pointer."); // SFINAE to detect whether the URBG type includes a member matching // bool InvokeMock(base_internal::FastTypeIdType, void*, void*). // diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/explicit_seed_seq.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/explicit_seed_seq.h index e3aa31a184..25f791535f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/explicit_seed_seq.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/explicit_seed_seq.h @@ -74,7 +74,7 @@ class ExplicitSeedSeq { template void generate(OutIterator begin, OutIterator end) { for (size_t index = 0; begin != end; begin++) { - *begin = state_.empty() ? 0 : little_endian::FromHost32(state_[index++]); + *begin = state_.empty() ? 0 : state_[index++]; if (index >= state_.size()) { index = 0; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc index a55ad73948..e36d5fa028 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc @@ -24,6 +24,8 @@ namespace { +using ::absl::random_internal::ExplicitSeedSeq; + template bool ConformsToInterface() { // Check that the SeedSequence can be default-constructed. @@ -64,14 +66,14 @@ TEST(SeedSequences, CheckInterfaces) { EXPECT_TRUE(ConformsToInterface()); // Abseil classes - EXPECT_TRUE(ConformsToInterface()); + EXPECT_TRUE(ConformsToInterface()); } TEST(ExplicitSeedSeq, DefaultConstructorGeneratesZeros) { const size_t kNumBlocks = 128; uint32_t outputs[kNumBlocks]; - absl::random_internal::ExplicitSeedSeq seq; + ExplicitSeedSeq seq; seq.generate(outputs, &outputs[kNumBlocks]); for (uint32_t& seed : outputs) { @@ -87,8 +89,7 @@ TEST(ExplicitSeeqSeq, SeedMaterialIsForwardedIdentically) { for (uint32_t& seed : seed_material) { seed = urandom(); } - absl::random_internal::ExplicitSeedSeq seq(seed_material, - &seed_material[kNumBlocks]); + ExplicitSeedSeq seq(seed_material, &seed_material[kNumBlocks]); // Check that output is same as seed-material provided to constructor. { @@ -133,17 +134,14 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) { for (uint32_t& entry : entropy) { entry = urandom(); } - absl::random_internal::ExplicitSeedSeq seq_from_entropy(std::begin(entropy), - std::end(entropy)); + ExplicitSeedSeq seq_from_entropy(std::begin(entropy), std::end(entropy)); // Copy constructor. { - absl::random_internal::ExplicitSeedSeq seq_copy(seq_from_entropy); + ExplicitSeedSeq seq_copy(seq_from_entropy); EXPECT_EQ(seq_copy.size(), seq_from_entropy.size()); - std::vector seeds_1; - seeds_1.resize(1000, 0); - std::vector seeds_2; - seeds_2.resize(1000, 1); + std::vector seeds_1(1000, 0); + std::vector seeds_2(1000, 1); seq_from_entropy.generate(seeds_1.begin(), seeds_1.end()); seq_copy.generate(seeds_2.begin(), seeds_2.end()); @@ -155,13 +153,10 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) { for (uint32_t& entry : entropy) { entry = urandom(); } - absl::random_internal::ExplicitSeedSeq another_seq(std::begin(entropy), - std::end(entropy)); + ExplicitSeedSeq another_seq(std::begin(entropy), std::end(entropy)); - std::vector seeds_1; - seeds_1.resize(1000, 0); - std::vector seeds_2; - seeds_2.resize(1000, 0); + std::vector seeds_1(1000, 0); + std::vector seeds_2(1000, 0); seq_from_entropy.generate(seeds_1.begin(), seeds_1.end()); another_seq.generate(seeds_2.begin(), seeds_2.end()); @@ -170,7 +165,15 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) { EXPECT_THAT(seeds_1, Not(Pointwise(Eq(), seeds_2))); // Apply the assignment-operator. + // GCC 12 has a false-positive -Wstringop-overflow warning here. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstringop-overflow" +#endif another_seq = seq_from_entropy; +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif // Re-generate seeds. seq_from_entropy.generate(seeds_1.begin(), seeds_1.end()); @@ -182,15 +185,13 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) { // Move constructor. { // Get seeds from seed-sequence constructed from entropy. - std::vector seeds_1; - seeds_1.resize(1000, 0); + std::vector seeds_1(1000, 0); seq_from_entropy.generate(seeds_1.begin(), seeds_1.end()); // Apply move-constructor move the sequence to another instance. absl::random_internal::ExplicitSeedSeq moved_seq( std::move(seq_from_entropy)); - std::vector seeds_2; - seeds_2.resize(1000, 1); + std::vector seeds_2(1000, 1); moved_seq.generate(seeds_2.begin(), seeds_2.end()); // Verify that seeds produced by moved-instance are the same as original. EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2)); @@ -202,3 +203,35 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) { EXPECT_THAT(seeds_1, Each(Eq(0))); } } + +TEST(ExplicitSeedSeq, StdURBGGoldenTests) { + // Verify that for std::- URBG instances the results are stable across + // platforms (these should have deterministic output). + { + ExplicitSeedSeq seed_sequence{12, 34, 56}; + std::minstd_rand rng(seed_sequence); + + std::minstd_rand::result_type values[4] = {rng(), rng(), rng(), rng()}; + EXPECT_THAT(values, + testing::ElementsAre(579252, 43785881, 464353103, 1501811174)); + } + + { + ExplicitSeedSeq seed_sequence{12, 34, 56}; + std::mt19937 rng(seed_sequence); + + std::mt19937::result_type values[4] = {rng(), rng(), rng(), rng()}; + EXPECT_THAT(values, testing::ElementsAre(138416803, 151130212, 33817739, + 138416803)); + } + + { + ExplicitSeedSeq seed_sequence{12, 34, 56}; + std::mt19937_64 rng(seed_sequence); + + std::mt19937_64::result_type values[4] = {rng(), rng(), rng(), rng()}; + EXPECT_THAT(values, + testing::ElementsAre(19738651785169348, 1464811352364190456, + 18054685302720800, 19738651785169348)); + } +} diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/fast_uniform_bits.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/fast_uniform_bits.h index 425aaf7d83..8d8ed04515 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/fast_uniform_bits.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/fast_uniform_bits.h @@ -22,6 +22,7 @@ #include "absl/base/config.h" #include "absl/meta/type_traits.h" +#include "absl/random/internal/traits.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -98,7 +99,7 @@ class FastUniformBits { result_type operator()(URBG& g); // NOLINT(runtime/references) private: - static_assert(std::is_unsigned::value, + static_assert(IsUnsigned::value, "Class-template FastUniformBits<> must be parameterized using " "an unsigned type."); @@ -150,7 +151,8 @@ FastUniformBits::Generate(URBG& g, // NOLINT(runtime/references) result_type r = static_cast(g() - kMin); for (size_t n = 1; n < kIters; ++n) { - r = (r << kShift) + static_cast(g() - kMin); + r = static_cast(r << kShift) + + static_cast(g() - kMin); } return r; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/generate_real.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/generate_real.h index d5fbb44c24..b569450cf7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/generate_real.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/generate_real.h @@ -50,10 +50,10 @@ struct GenerateSignedTag {}; // inputs, otherwise it never returns 0. // // When a value in U(0,1) is required, use: -// Uniform64ToReal; +// GenerateRealFromBits; // // When a value in U(-1,1) is required, use: -// Uniform64ToReal; +// GenerateRealFromBits; // // This generates more distinct values than the mathematical equivalent // `U(0, 1) * 2.0 - 1.0`. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/mock_helpers.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/mock_helpers.h index 9d6ab21ef5..882b0518ca 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/mock_helpers.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/mock_helpers.h @@ -18,6 +18,7 @@ #include #include +#include #include "absl/base/internal/fast_type_id.h" #include "absl/types/optional.h" diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/nonsecure_base.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/nonsecure_base.h index 730fa2ea12..c3b80335ae 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/nonsecure_base.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/nonsecure_base.h @@ -17,28 +17,82 @@ #include #include -#include #include -#include -#include #include +#include #include #include "absl/base/macros.h" +#include "absl/container/inlined_vector.h" #include "absl/meta/type_traits.h" #include "absl/random/internal/pool_urbg.h" #include "absl/random/internal/salted_seed_seq.h" #include "absl/random/internal/seed_material.h" -#include "absl/types/optional.h" #include "absl/types/span.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace random_internal { +// RandenPoolSeedSeq is a custom seed sequence type where generate() fills the +// provided buffer via the RandenPool entropy source. +class RandenPoolSeedSeq { + private: + struct ContiguousTag {}; + struct BufferTag {}; + + // Generate random unsigned values directly into the buffer. + template + void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) { + const size_t n = static_cast(std::distance(begin, end)); + auto* a = &(*begin); + RandenPool::Fill( + absl::MakeSpan(reinterpret_cast(a), sizeof(*a) * n)); + } + + // Construct a buffer of size n and fill it with values, then copy + // those values into the seed iterators. + template + void generate_impl(BufferTag, RandomAccessIterator begin, + RandomAccessIterator end) { + const size_t n = std::distance(begin, end); + absl::InlinedVector data(n, 0); + RandenPool::Fill(absl::MakeSpan(data.begin(), data.end())); + std::copy(std::begin(data), std::end(data), begin); + } + + public: + using result_type = uint32_t; + + size_t size() { return 0; } + + template + void param(OutIterator) const {} + + template + void generate(RandomAccessIterator begin, RandomAccessIterator end) { + // RandomAccessIterator must be assignable from uint32_t + if (begin != end) { + using U = typename std::iterator_traits::value_type; + // ContiguousTag indicates the common case of a known contiguous buffer, + // which allows directly filling the buffer. In C++20, + // std::contiguous_iterator_tag provides a mechanism for testing this + // capability, however until Abseil's support requirements allow us to + // assume C++20, limit checks to a few common cases. + using TagType = absl::conditional_t< + (std::is_pointer::value || + std::is_same::iterator>::value), + ContiguousTag, BufferTag>; + + generate_impl(TagType{}, begin, end); + } + } +}; + // Each instance of NonsecureURBGBase will be seeded by variates produced // by a thread-unique URBG-instance. -template +template class NonsecureURBGBase { public: using result_type = typename URBG::result_type; @@ -85,49 +139,6 @@ class NonsecureURBGBase { } private: - // Seeder is a custom seed sequence type where generate() fills the provided - // buffer via the RandenPool entropy source. - struct Seeder { - using result_type = uint32_t; - - size_t size() { return 0; } - - template - void param(OutIterator) const {} - - template - void generate(RandomAccessIterator begin, RandomAccessIterator end) { - if (begin != end) { - // begin, end must be random access iterators assignable from uint32_t. - generate_impl( - std::integral_constant{}, - begin, end); - } - } - - // Commonly, generate is invoked with a pointer to a buffer which - // can be cast to a uint32_t. - template - void generate_impl(std::integral_constant, - RandomAccessIterator begin, RandomAccessIterator end) { - auto buffer = absl::MakeSpan(begin, end); - auto target = absl::MakeSpan(reinterpret_cast(buffer.data()), - buffer.size()); - RandenPool::Fill(target); - } - - // The non-uint32_t case should be uncommon, and involves an extra copy, - // filling the uint32_t buffer and then mixing into the output. - template - void generate_impl(std::integral_constant, - RandomAccessIterator begin, RandomAccessIterator end) { - const size_t n = std::distance(begin, end); - absl::InlinedVector data(n, 0); - RandenPool::Fill(absl::MakeSpan(data.begin(), data.end())); - std::copy(std::begin(data), std::end(data), begin); - } - }; - static URBG ConstructURBG() { Seeder seeder; return URBG(seeder); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/nonsecure_base_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/nonsecure_base_test.cc index 698027fc6e..3502243e15 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/nonsecure_base_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/nonsecure_base_test.cc @@ -15,6 +15,7 @@ #include "absl/random/internal/nonsecure_base.h" #include +#include #include #include #include @@ -192,54 +193,35 @@ TEST(NonsecureURBGBase, EqualSeedSequencesYieldEqualVariates) { } } -// This is a PRNG-compatible type specifically designed to test -// that NonsecureURBGBase::Seeder can correctly handle iterators -// to arbitrary non-uint32_t size types. -template -struct SeederTestEngine { - using result_type = T; +TEST(RandenPoolSeedSeqTest, SeederWorksForU32) { + absl::random_internal::RandenPoolSeedSeq seeder; - static constexpr result_type(min)() { - return (std::numeric_limits::min)(); - } - static constexpr result_type(max)() { - return (std::numeric_limits::max)(); - } - - template ::value>> - explicit SeederTestEngine(SeedSequence&& seq) { - seed(seq); - } - - SeederTestEngine(const SeederTestEngine&) = default; - SeederTestEngine& operator=(const SeederTestEngine&) = default; - SeederTestEngine(SeederTestEngine&&) = default; - SeederTestEngine& operator=(SeederTestEngine&&) = default; - - result_type operator()() { return state[0]; } - - template - void seed(SeedSequence&& seq) { - std::fill(std::begin(state), std::end(state), T(0)); - seq.generate(std::begin(state), std::end(state)); - } - - T state[2]; -}; - -TEST(NonsecureURBGBase, SeederWorksForU32) { - using U32 = - absl::random_internal::NonsecureURBGBase>; - U32 x; - EXPECT_NE(0, x()); + uint32_t state[2] = {0, 0}; + seeder.generate(std::begin(state), std::end(state)); + EXPECT_FALSE(state[0] == 0 && state[1] == 0); } -TEST(NonsecureURBGBase, SeederWorksForU64) { - using U64 = - absl::random_internal::NonsecureURBGBase>; +TEST(RandenPoolSeedSeqTest, SeederWorksForU64) { + absl::random_internal::RandenPoolSeedSeq seeder; - U64 x; - EXPECT_NE(0, x()); + uint64_t state[2] = {0, 0}; + seeder.generate(std::begin(state), std::end(state)); + EXPECT_FALSE(state[0] == 0 && state[1] == 0); + EXPECT_FALSE((state[0] >> 32) == 0 && (state[1] >> 32) == 0); +} + +TEST(RandenPoolSeedSeqTest, SeederWorksForS32) { + absl::random_internal::RandenPoolSeedSeq seeder; + + int32_t state[2] = {0, 0}; + seeder.generate(std::begin(state), std::end(state)); + EXPECT_FALSE(state[0] == 0 && state[1] == 0); +} + +TEST(RandenPoolSeedSeqTest, SeederWorksForVector) { + absl::random_internal::RandenPoolSeedSeq seeder; + + std::vector state(2); + seeder.generate(std::begin(state), std::end(state)); + EXPECT_FALSE(state[0] == 0 && state[1] == 0); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/pcg_engine.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/pcg_engine.h index 8efaf2e09a..e1f4ef3317 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/pcg_engine.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/pcg_engine.h @@ -221,48 +221,27 @@ class pcg_engine { template class pcg128_params { public: -#if ABSL_HAVE_INTRINSIC_INT128 - using state_type = __uint128_t; - static inline constexpr state_type make_u128(uint64_t a, uint64_t b) { - return (static_cast<__uint128_t>(a) << 64) | b; - } -#else using state_type = absl::uint128; - static inline constexpr state_type make_u128(uint64_t a, uint64_t b) { - return absl::MakeUint128(a, b); - } -#endif - static inline constexpr state_type multiplier() { - return make_u128(kMultA, kMultB); + return absl::MakeUint128(kMultA, kMultB); } static inline constexpr state_type increment() { - return make_u128(kIncA, kIncB); + return absl::MakeUint128(kIncA, kIncB); } }; // Implementation of the PCG xsl_rr_128_64 128-bit mixing function, which // accepts an input of state_type and mixes it into an output of result_type. struct pcg_xsl_rr_128_64 { -#if ABSL_HAVE_INTRINSIC_INT128 - using state_type = __uint128_t; -#else using state_type = absl::uint128; -#endif using result_type = uint64_t; inline uint64_t operator()(state_type state) { // This is equivalent to the xsl_rr_128_64 mixing function. -#if ABSL_HAVE_INTRINSIC_INT128 uint64_t rotate = static_cast(state >> 122u); state ^= state >> 64; uint64_t s = static_cast(state); -#else - uint64_t h = Uint128High64(state); - uint64_t rotate = h >> 58u; - uint64_t s = Uint128Low64(state) ^ h; -#endif - return rotr(s, rotate); + return rotr(s, static_cast(rotate)); } }; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/pool_urbg.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/pool_urbg.cc index 725100a415..5aefa7d97b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/pool_urbg.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/pool_urbg.cc @@ -131,7 +131,7 @@ void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) { } // Number of pooled urbg entries. -static constexpr int kPoolSize = 8; +static constexpr size_t kPoolSize = 8; // Shared pool entries. static absl::once_flag pool_once; @@ -147,15 +147,15 @@ ABSL_CACHELINE_ALIGNED static RandenPoolEntry* shared_pools[kPoolSize]; // on subsequent runs the order within the same program may be significantly // different. However, as other thread IDs are not assigned sequentially, // this is not expected to matter. -int GetPoolID() { +size_t GetPoolID() { static_assert(kPoolSize >= 1, "At least one urbg instance is required for PoolURBG"); - ABSL_CONST_INIT static std::atomic sequence{0}; + ABSL_CONST_INIT static std::atomic sequence{0}; #ifdef ABSL_HAVE_THREAD_LOCAL - static thread_local int my_pool_id = -1; - if (ABSL_PREDICT_FALSE(my_pool_id < 0)) { + static thread_local size_t my_pool_id = kPoolSize; + if (ABSL_PREDICT_FALSE(my_pool_id == kPoolSize)) { my_pool_id = (sequence++ % kPoolSize); } return my_pool_id; @@ -171,8 +171,8 @@ int GetPoolID() { // Store the value in the pthread_{get/set}specific. However an uninitialized // value is 0, so add +1 to distinguish from the null value. - intptr_t my_pool_id = - reinterpret_cast(pthread_getspecific(tid_key)); + uintptr_t my_pool_id = + reinterpret_cast(pthread_getspecific(tid_key)); if (ABSL_PREDICT_FALSE(my_pool_id == 0)) { // No allocated ID, allocate the next value, cache it, and return. my_pool_id = (sequence++ % kPoolSize) + 1; @@ -194,7 +194,7 @@ RandenPoolEntry* PoolAlignedAlloc() { // Not all the platforms that we build for have std::aligned_alloc, however // since we never free these objects, we can over allocate and munge the // pointers to the correct alignment. - intptr_t x = reinterpret_cast( + uintptr_t x = reinterpret_cast( new char[sizeof(RandenPoolEntry) + kAlignment]); auto y = x % kAlignment; void* aligned = reinterpret_cast(y == 0 ? x : (x + kAlignment - y)); @@ -215,7 +215,7 @@ void InitPoolURBG() { absl::MakeSpan(seed_material))) { random_internal::ThrowSeedGenException(); } - for (int i = 0; i < kPoolSize; i++) { + for (size_t i = 0; i < kPoolSize; i++) { shared_pools[i] = PoolAlignedAlloc(); shared_pools[i]->Init( absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize)); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen.h index 9a3840b8f1..9ff4a7a554 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen.h @@ -43,10 +43,8 @@ class Randen { // Generate updates the randen sponge. The outer portion of the sponge // (kCapacityBytes .. kStateBytes) may be consumed as PRNG state. - template - void Generate(T (&state)[N]) const { - static_assert(N * sizeof(T) == kStateBytes, - "Randen::Generate() requires kStateBytes of state"); + // REQUIRES: state points to kStateBytes of state. + inline void Generate(void* state) const { #if ABSL_RANDOM_INTERNAL_AES_DISPATCH // HW AES Dispatch. if (has_crypto_) { @@ -65,13 +63,9 @@ class Randen { // Absorb incorporates additional seed material into the randen sponge. After // absorb returns, Generate must be called before the state may be consumed. - template - void Absorb(const S (&seed)[M], T (&state)[N]) const { - static_assert(M * sizeof(S) == RandenTraits::kSeedBytes, - "Randen::Absorb() requires kSeedBytes of seed"); - - static_assert(N * sizeof(T) == RandenTraits::kStateBytes, - "Randen::Absorb() requires kStateBytes of state"); + // REQUIRES: seed points to kSeedBytes of seed. + // REQUIRES: state points to kStateBytes of state. + inline void Absorb(const void* seed, void* state) const { #if ABSL_RANDOM_INTERNAL_AES_DISPATCH // HW AES Dispatch. if (has_crypto_) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_detect.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_detect.cc index bbe7b96532..6dababa351 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_detect.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_detect.cc @@ -24,6 +24,11 @@ #include "absl/random/internal/platform.h" +#if !defined(__UCLIBC__) && defined(__GLIBC__) && \ + (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16)) +#define ABSL_HAVE_GETAUXVAL +#endif + #if defined(ABSL_ARCH_X86_64) #define ABSL_INTERNAL_USE_X86_CPUID #elif defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) || \ @@ -31,7 +36,7 @@ #if defined(__ANDROID__) #define ABSL_INTERNAL_USE_ANDROID_GETAUXVAL #define ABSL_INTERNAL_USE_GETAUXVAL -#elif defined(__linux__) +#elif defined(__linux__) && defined(ABSL_HAVE_GETAUXVAL) #define ABSL_INTERNAL_USE_LINUX_GETAUXVAL #define ABSL_INTERNAL_USE_GETAUXVAL #endif @@ -40,7 +45,6 @@ #if defined(ABSL_INTERNAL_USE_X86_CPUID) #if defined(_WIN32) || defined(_WIN64) #include // NOLINT(build/include_order) -#pragma intrinsic(__cpuid) #else // MSVC-equivalent __cpuid intrinsic function. static void __cpuid(int cpu_info[4], int info_type) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_engine.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_engine.h index 92bb8905f7..b47086649e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_engine.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_engine.h @@ -42,7 +42,7 @@ namespace random_internal { // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32. template -class alignas(16) randen_engine { +class alignas(8) randen_engine { public: // C++11 URBG interface: using result_type = T; @@ -58,7 +58,8 @@ class alignas(16) randen_engine { return (std::numeric_limits::max)(); } - explicit randen_engine(result_type seed_value = 0) { seed(seed_value); } + randen_engine() : randen_engine(0) {} + explicit randen_engine(result_type seed_value) { seed(seed_value); } template = kStateSizeT) { next_ = kCapacityT; - impl_.Generate(state_); + impl_.Generate(begin); } - - return little_endian::ToHost(state_[next_++]); + return little_endian::ToHost(begin[next_++]); } template @@ -92,9 +103,10 @@ class alignas(16) randen_engine { void seed(result_type seed_value = 0) { next_ = kStateSizeT; // Zeroes the inner state and fills the outer state with seed_value to - // mimics behaviour of reseed - std::fill(std::begin(state_), std::begin(state_) + kCapacityT, 0); - std::fill(std::begin(state_) + kCapacityT, std::end(state_), seed_value); + // mimic the behaviour of reseed + auto* begin = state(); + std::fill(begin, begin + kCapacityT, 0); + std::fill(begin + kCapacityT, begin + kStateSizeT, seed_value); } // Inserts entropy into (part of) the state. Calling this periodically with @@ -105,7 +117,6 @@ class alignas(16) randen_engine { using sequence_result_type = typename SeedSequence::result_type; static_assert(sizeof(sequence_result_type) == 4, "SeedSequence::result_type must be 32-bit"); - constexpr size_t kBufferSize = Randen::kSeedBytes / sizeof(sequence_result_type); alignas(16) sequence_result_type buffer[kBufferSize]; @@ -119,8 +130,15 @@ class alignas(16) randen_engine { if (entropy_size < kBufferSize) { // ... and only request that many values, or 256-bits, when unspecified. const size_t requested_entropy = (entropy_size == 0) ? 8u : entropy_size; - std::fill(std::begin(buffer) + requested_entropy, std::end(buffer), 0); - seq.generate(std::begin(buffer), std::begin(buffer) + requested_entropy); + std::fill(buffer + requested_entropy, buffer + kBufferSize, 0); + seq.generate(buffer, buffer + requested_entropy); +#ifdef ABSL_IS_BIG_ENDIAN + // Randen expects the seed buffer to be in Little Endian; reverse it on + // Big Endian platforms. + for (sequence_result_type& e : buffer) { + e = absl::little_endian::FromHost(e); + } +#endif // The Randen paper suggests preferentially initializing even-numbered // 128-bit vectors of the randen state (there are 16 such vectors). // The seed data is merged into the state offset by 128-bits, which @@ -139,9 +157,9 @@ class alignas(16) randen_engine { std::swap(buffer[--dst], buffer[--src]); } } else { - seq.generate(std::begin(buffer), std::end(buffer)); + seq.generate(buffer, buffer + kBufferSize); } - impl_.Absorb(buffer, state_); + impl_.Absorb(buffer, state()); // Generate will be called when operator() is called next_ = kStateSizeT; @@ -152,9 +170,10 @@ class alignas(16) randen_engine { count -= step; constexpr uint64_t kRateT = kStateSizeT - kCapacityT; + auto* begin = state(); while (count > 0) { next_ = kCapacityT; - impl_.Generate(state_); + impl_.Generate(*reinterpret_cast(begin)); step = std::min(kRateT, count); count -= step; } @@ -162,9 +181,9 @@ class alignas(16) randen_engine { } bool operator==(const randen_engine& other) const { + const auto* begin = state(); return next_ == other.next_ && - std::equal(std::begin(state_), std::end(state_), - std::begin(other.state_)); + std::equal(begin, begin + kStateSizeT, other.state()); } bool operator!=(const randen_engine& other) const { @@ -178,11 +197,12 @@ class alignas(16) randen_engine { using numeric_type = typename random_internal::stream_format_type::type; auto saver = random_internal::make_ostream_state_saver(os); - for (const auto& elem : engine.state_) { + auto* it = engine.state(); + for (auto* end = it + kStateSizeT; it < end; ++it) { // In the case that `elem` is `uint8_t`, it must be cast to something // larger so that it prints as an integer rather than a character. For // simplicity, apply the cast all circumstances. - os << static_cast(little_endian::FromHost(elem)) + os << static_cast(little_endian::FromHost(*it)) << os.fill(); } os << engine.next_; @@ -208,7 +228,7 @@ class alignas(16) randen_engine { if (is.fail()) { return is; } - std::memcpy(engine.state_, state, sizeof(engine.state_)); + std::memcpy(engine.state(), state, sizeof(state)); engine.next_ = next; return is; } @@ -219,9 +239,21 @@ class alignas(16) randen_engine { static constexpr size_t kCapacityT = Randen::kCapacityBytes / sizeof(result_type); - // First kCapacityT are `inner', the others are accessible random bits. - alignas(16) result_type state_[kStateSizeT]; - size_t next_; // index within state_ + // Returns the state array pointer, which is aligned to 16 bytes. + // The first kCapacityT are the `inner' sponge; the remainder are available. + result_type* state() { + return reinterpret_cast( + (reinterpret_cast(&raw_state_) & 0xf) ? (raw_state_ + 8) + : raw_state_); + } + const result_type* state() const { + return const_cast(this)->state(); + } + + // raw state array, manually aligned in state(). This overallocates + // by 8 bytes since C++ does not guarantee extended heap alignment. + alignas(8) char raw_state_[Randen::kStateBytes + 8]; + size_t next_; // index within state() Randen impl_; }; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_hwaes_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_hwaes_test.cc index 66ddb43fd6..2348b55c35 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_hwaes_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_hwaes_test.cc @@ -27,44 +27,39 @@ namespace { using absl::random_internal::RandenHwAes; using absl::random_internal::RandenTraits; -// Local state parameters. -constexpr size_t kSeedBytes = - RandenTraits::kStateBytes - RandenTraits::kCapacityBytes; -constexpr size_t kStateSizeT = RandenTraits::kStateBytes / sizeof(uint64_t); -constexpr size_t kSeedSizeT = kSeedBytes / sizeof(uint32_t); - -struct alignas(16) randen { - uint64_t state[kStateSizeT]; - uint32_t seed[kSeedSizeT]; -}; - TEST(RandenHwAesTest, Default) { EXPECT_TRUE(absl::random_internal::CPUSupportsRandenHwAes()); - constexpr uint64_t kGolden[] = { - 0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977, - 0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912, - 0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6, - 0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a, - 0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37, - 0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556, - 0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42, - 0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc, - 0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f, - 0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3, - 0x026ff374c101da7e, 0x811ef0821c3de851, + constexpr uint8_t kGolden[] = { + 0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d, + 0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3, + 0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f, + 0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0, + 0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff, + 0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2, + 0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5, + 0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c, + 0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56, + 0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4, + 0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91, + 0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5, + 0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d, + 0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58, + 0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e, + 0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49, + 0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0, + 0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6, + 0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61, + 0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35, + 0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c, + 0x82, 0xf0, 0x1e, 0x81, }; - alignas(16) randen d; - memset(d.state, 0, sizeof(d.state)); - RandenHwAes::Generate(RandenHwAes::GetKeys(), d.state); + alignas(16) uint8_t state[RandenTraits::kStateBytes]; + std::memset(state, 0, sizeof(state)); - uint64_t* id = d.state; - for (const auto& elem : kGolden) { - auto a = absl::StrFormat("%#x", elem); - auto b = absl::StrFormat("%#x", *id++); - EXPECT_EQ(a, b); - } + RandenHwAes::Generate(RandenHwAes::GetKeys(), state); + EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state))); } } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_slow.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_slow.cc index d5c9347ba9..9bfd2a4092 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_slow.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_slow.cc @@ -395,6 +395,23 @@ inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Permute( } } +// Enables native loads in the round loop by pre-swapping. +inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian( + absl::uint128* state) { +#ifdef ABSL_IS_BIG_ENDIAN + for (uint32_t block = 0; block < RandenTraits::kFeistelBlocks; ++block) { + uint64_t new_lo = absl::little_endian::ToHost64( + static_cast(state[block] >> 64)); + uint64_t new_hi = absl::little_endian::ToHost64( + static_cast((state[block] << 64) >> 64)); + state[block] = (static_cast(new_hi) << 64) | new_lo; + } +#else + // Avoid warning about unused variable. + (void)state; +#endif +} + } // namespace namespace absl { @@ -439,8 +456,12 @@ void RandenSlow::Generate(const void* keys_void, void* state_void) { const absl::uint128 prev_inner = state[0]; + SwapEndian(state); + Permute(state, keys); + SwapEndian(state); + // Ensure backtracking resistance. *state ^= prev_inner; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_slow_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_slow_test.cc index 4861ffa4f1..ed6039586c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_slow_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_slow_test.cc @@ -25,40 +25,37 @@ namespace { using absl::random_internal::RandenSlow; using absl::random_internal::RandenTraits; -// Local state parameters. -constexpr size_t kSeedBytes = - RandenTraits::kStateBytes - RandenTraits::kCapacityBytes; -constexpr size_t kStateSizeT = RandenTraits::kStateBytes / sizeof(uint64_t); -constexpr size_t kSeedSizeT = kSeedBytes / sizeof(uint32_t); - -struct alignas(16) randen { - uint64_t state[kStateSizeT]; - uint32_t seed[kSeedSizeT]; -}; - TEST(RandenSlowTest, Default) { - constexpr uint64_t kGolden[] = { - 0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977, - 0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912, - 0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6, - 0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a, - 0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37, - 0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556, - 0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42, - 0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc, - 0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f, - 0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3, - 0x026ff374c101da7e, 0x811ef0821c3de851, + constexpr uint8_t kGolden[] = { + 0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d, + 0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3, + 0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f, + 0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0, + 0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff, + 0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2, + 0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5, + 0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c, + 0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56, + 0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4, + 0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91, + 0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5, + 0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d, + 0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58, + 0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e, + 0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49, + 0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0, + 0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6, + 0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61, + 0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35, + 0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c, + 0x82, 0xf0, 0x1e, 0x81, }; - alignas(16) randen d; - std::memset(d.state, 0, sizeof(d.state)); - RandenSlow::Generate(RandenSlow::GetKeys(), d.state); + alignas(16) uint8_t state[RandenTraits::kStateBytes]; + std::memset(state, 0, sizeof(state)); - uint64_t* id = d.state; - for (const auto& elem : kGolden) { - EXPECT_EQ(absl::little_endian::FromHost64(elem), *id++); - } + RandenSlow::Generate(RandenSlow::GetKeys(), state); + EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state))); } } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_test.cc index c186fe0d68..92773b8d9a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/randen_test.cc @@ -23,9 +23,6 @@ namespace { using absl::random_internal::Randen; -// Local state parameters. -constexpr size_t kStateSizeT = Randen::kStateBytes / sizeof(uint64_t); - TEST(RandenTest, CopyAndMove) { static_assert(std::is_copy_constructible::value, "Randen must be copy constructible"); @@ -41,30 +38,38 @@ TEST(RandenTest, CopyAndMove) { } TEST(RandenTest, Default) { - constexpr uint64_t kGolden[] = { - 0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977, - 0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912, - 0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6, - 0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a, - 0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37, - 0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556, - 0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42, - 0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc, - 0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f, - 0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3, - 0x026ff374c101da7e, 0x811ef0821c3de851, + constexpr uint8_t kGolden[] = { + 0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d, + 0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3, + 0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f, + 0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0, + 0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff, + 0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2, + 0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5, + 0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c, + 0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56, + 0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4, + 0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91, + 0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5, + 0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d, + 0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58, + 0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e, + 0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49, + 0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0, + 0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6, + 0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61, + 0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35, + 0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c, + 0x82, 0xf0, 0x1e, 0x81, }; - alignas(16) uint64_t state[kStateSizeT]; + alignas(16) uint8_t state[Randen::kStateBytes]; std::memset(state, 0, sizeof(state)); Randen r; r.Generate(state); - auto id = std::begin(state); - for (const auto& elem : kGolden) { - EXPECT_EQ(elem, *id++); - } + EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state))); } } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/salted_seed_seq.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/salted_seed_seq.h index 5953a090f8..06291865e3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/salted_seed_seq.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/salted_seed_seq.h @@ -22,6 +22,7 @@ #include #include #include +#include #include "absl/container/inlined_vector.h" #include "absl/meta/type_traits.h" @@ -65,15 +66,19 @@ class SaltedSeedSeq { template void generate(RandomAccessIterator begin, RandomAccessIterator end) { + using U = typename std::iterator_traits::value_type; + // The common case is that generate is called with ContiguousIterators // to uint arrays. Such contiguous memory regions may be optimized, // which we detect here. - using tag = absl::conditional_t< - (std::is_pointer::value && - std::is_same, uint32_t>::value), + using TagType = absl::conditional_t< + (std::is_same::value && + (std::is_pointer::value || + std::is_same::iterator>::value)), ContiguousAndUint32Tag, DefaultTag>; if (begin != end) { - generate_impl(begin, end, tag{}); + generate_impl(TagType{}, begin, end, std::distance(begin, end)); } } @@ -89,8 +94,15 @@ class SaltedSeedSeq { struct DefaultTag {}; // Generate which requires the iterators are contiguous pointers to uint32_t. - void generate_impl(uint32_t* begin, uint32_t* end, ContiguousAndUint32Tag) { - generate_contiguous(absl::MakeSpan(begin, end)); + // Fills the initial seed buffer the underlying SSeq::generate() call, + // then mixes in the salt material. + template + void generate_impl(ContiguousAndUint32Tag, Contiguous begin, Contiguous end, + size_t n) { + seq_->generate(begin, end); + const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0); + auto span = absl::Span(&*begin, n); + MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), span); } // The uncommon case for generate is that it is called with iterators over @@ -98,27 +110,13 @@ class SaltedSeedSeq { // case we allocate a temporary 32-bit buffer and then copy-assign back // to the initial inputs. template - void generate_impl(RandomAccessIterator begin, RandomAccessIterator end, - DefaultTag) { - return generate_and_copy(std::distance(begin, end), begin); - } - - // Fills the initial seed buffer the underlying SSeq::generate() call, - // mixing in the salt material. - void generate_contiguous(absl::Span buffer) { - seq_->generate(buffer.begin(), buffer.end()); - const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0); - MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), buffer); - } - - // Allocates a seed buffer of `n` elements, generates the seed, then - // copies the result into the `out` iterator. - template - void generate_and_copy(size_t n, Iterator out) { - // Allocate a temporary buffer, generate, and then copy. + void generate_impl(DefaultTag, RandomAccessIterator begin, + RandomAccessIterator, size_t n) { + // Allocates a seed buffer of `n` elements, generates the seed, then + // copies the result into the `out` iterator. absl::InlinedVector data(n, 0); - generate_contiguous(absl::MakeSpan(data.data(), data.size())); - std::copy(data.begin(), data.end(), out); + generate_impl(ContiguousAndUint32Tag{}, data.begin(), data.end(), n); + std::copy(data.begin(), data.end(), begin); } // Because [rand.req.seedseq] is not required to be copy-constructible, diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/seed_material.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/seed_material.cc index c03cad8502..1041302b58 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/seed_material.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/seed_material.cc @@ -173,12 +173,12 @@ bool ReadSeedMaterialFromDevURandom(absl::Span values) { } while (success && buffer_size > 0) { - int bytes_read = read(dev_urandom, buffer, buffer_size); + ssize_t bytes_read = read(dev_urandom, buffer, buffer_size); int read_error = errno; success = (bytes_read > 0); if (success) { buffer += bytes_read; - buffer_size -= bytes_read; + buffer_size -= static_cast(bytes_read); } else if (bytes_read == -1 && read_error == EINTR) { success = true; // Need to try again. } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/traits.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/traits.h index 75772bd9ab..f874a0f78b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/traits.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/traits.h @@ -20,6 +20,8 @@ #include #include "absl/base/config.h" +#include "absl/numeric/bits.h" +#include "absl/numeric/int128.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -59,6 +61,31 @@ class is_widening_convertible { rank() <= rank(); }; +template +struct IsIntegral : std::is_integral {}; +template <> +struct IsIntegral : std::true_type {}; +template <> +struct IsIntegral : std::true_type {}; + +template +struct MakeUnsigned : std::make_unsigned {}; +template <> +struct MakeUnsigned { + using type = absl::uint128; +}; +template <> +struct MakeUnsigned { + using type = absl::uint128; +}; + +template +struct IsUnsigned : std::is_unsigned {}; +template <> +struct IsUnsigned : std::false_type {}; +template <> +struct IsUnsigned : std::true_type {}; + // unsigned_bits::type returns the unsigned int type with the indicated // number of bits. template @@ -81,19 +108,40 @@ struct unsigned_bits<64> { using type = uint64_t; }; -#ifdef ABSL_HAVE_INTRINSIC_INT128 template <> struct unsigned_bits<128> { - using type = __uint128_t; + using type = absl::uint128; +}; + +// 256-bit wrapper for wide multiplications. +struct U256 { + uint128 hi; + uint128 lo; +}; +template <> +struct unsigned_bits<256> { + using type = U256; }; -#endif template struct make_unsigned_bits { - using type = typename unsigned_bits::type>::digits>::type; + using type = typename unsigned_bits< + std::numeric_limits::type>::digits>::type; }; +template +int BitWidth(T v) { + // Workaround for bit_width not supporting int128. + // Don't hardcode `64` to make sure this code does not trigger compiler + // warnings in smaller types. + constexpr int half_bits = sizeof(T) * 8 / 2; + if (sizeof(T) == 16 && (v >> half_bits) != 0) { + return bit_width(static_cast(v >> half_bits)) + half_bits; + } else { + return bit_width(static_cast(v)); + } +} + } // namespace random_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/uniform_helper.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/uniform_helper.h index 1243bc1c62..e68b82ee5c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/uniform_helper.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/uniform_helper.h @@ -100,7 +100,7 @@ using uniform_inferred_return_t = template typename absl::enable_if_t< absl::conjunction< - std::is_integral, + IsIntegral, absl::disjunction, std::is_same>>::value, IntType> @@ -131,7 +131,7 @@ uniform_lower_bound(Tag, NumType a, NumType) { template typename absl::enable_if_t< absl::conjunction< - std::is_integral, + IsIntegral, absl::disjunction, std::is_same>>::value, IntType> @@ -153,7 +153,7 @@ uniform_upper_bound(Tag, FloatType, FloatType b) { template typename absl::enable_if_t< absl::conjunction< - std::is_integral, + IsIntegral, absl::disjunction, std::is_same>>::value, IntType> @@ -201,7 +201,7 @@ is_uniform_range_valid(FloatType a, FloatType b) { } template -absl::enable_if_t::value, bool> +absl::enable_if_t::value, bool> is_uniform_range_valid(IntType a, IntType b) { return a <= b; } @@ -210,7 +210,7 @@ is_uniform_range_valid(IntType a, IntType b) { // or absl::uniform_real_distribution depending on the NumType parameter. template using UniformDistribution = - typename std::conditional::value, + typename std::conditional::value, absl::uniform_int_distribution, absl::uniform_real_distribution>::type; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/wide_multiply.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/wide_multiply.h index b6e6c4b6aa..891e3630b7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/wide_multiply.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/wide_multiply.h @@ -34,43 +34,6 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace random_internal { -// Helper object to multiply two 64-bit values to a 128-bit value. -// MultiplyU64ToU128 multiplies two 64-bit values to a 128-bit value. -// If an intrinsic is available, it is used, otherwise use native 32-bit -// multiplies to construct the result. -inline absl::uint128 MultiplyU64ToU128(uint64_t a, uint64_t b) { -#if defined(ABSL_HAVE_INTRINSIC_INT128) - return absl::uint128(static_cast<__uint128_t>(a) * b); -#elif defined(ABSL_INTERNAL_USE_UMUL128) - // uint64_t * uint64_t => uint128 multiply using imul intrinsic on MSVC. - uint64_t high = 0; - const uint64_t low = _umul128(a, b, &high); - return absl::MakeUint128(high, low); -#else - // uint128(a) * uint128(b) in emulated mode computes a full 128-bit x 128-bit - // multiply. However there are many cases where that is not necessary, and it - // is only necessary to support a 64-bit x 64-bit = 128-bit multiply. This is - // for those cases. - const uint64_t a00 = static_cast(a); - const uint64_t a32 = a >> 32; - const uint64_t b00 = static_cast(b); - const uint64_t b32 = b >> 32; - - const uint64_t c00 = a00 * b00; - const uint64_t c32a = a00 * b32; - const uint64_t c32b = a32 * b00; - const uint64_t c64 = a32 * b32; - - const uint32_t carry = - static_cast(((c00 >> 32) + static_cast(c32a) + - static_cast(c32b)) >> - 32); - - return absl::MakeUint128(c64 + (c32a >> 32) + (c32b >> 32) + carry, - c00 + (c32a << 32) + (c32b << 32)); -#endif -} - // wide_multiply multiplies two N-bit values to a 2N-bit result. template struct wide_multiply { @@ -82,27 +45,49 @@ struct wide_multiply { return static_cast(a) * b; } - static input_type hi(result_type r) { return r >> kN; } - static input_type lo(result_type r) { return r; } + static input_type hi(result_type r) { + return static_cast(r >> kN); + } + static input_type lo(result_type r) { return static_cast(r); } static_assert(std::is_unsigned::value, "Class-template wide_multiply<> argument must be unsigned."); }; -#ifndef ABSL_HAVE_INTRINSIC_INT128 -template <> -struct wide_multiply { - using input_type = uint64_t; - using result_type = absl::uint128; +// MultiplyU128ToU256 multiplies two 128-bit values to a 256-bit value. +inline U256 MultiplyU128ToU256(uint128 a, uint128 b) { + const uint128 a00 = static_cast(a); + const uint128 a64 = a >> 64; + const uint128 b00 = static_cast(b); + const uint128 b64 = b >> 64; - static result_type multiply(uint64_t a, uint64_t b) { - return MultiplyU64ToU128(a, b); + const uint128 c00 = a00 * b00; + const uint128 c64a = a00 * b64; + const uint128 c64b = a64 * b00; + const uint128 c128 = a64 * b64; + + const uint64_t carry = + static_cast(((c00 >> 64) + static_cast(c64a) + + static_cast(c64b)) >> + 64); + + return {c128 + (c64a >> 64) + (c64b >> 64) + carry, + c00 + (c64a << 64) + (c64b << 64)}; +} + + +template <> +struct wide_multiply { + using input_type = uint128; + using result_type = U256; + + static result_type multiply(input_type a, input_type b) { + return MultiplyU128ToU256(a, b); } - static uint64_t hi(result_type r) { return absl::Uint128High64(r); } - static uint64_t lo(result_type r) { return absl::Uint128Low64(r); } + static input_type hi(result_type r) { return r.hi; } + static input_type lo(result_type r) { return r.lo; } }; -#endif } // namespace random_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/wide_multiply_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/wide_multiply_test.cc index e276cb51cf..f8ee35c03e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/wide_multiply_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/internal/wide_multiply_test.cc @@ -14,52 +14,106 @@ #include "absl/random/internal/wide_multiply.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/numeric/int128.h" -using absl::random_internal::MultiplyU64ToU128; +using absl::random_internal::MultiplyU128ToU256; +using absl::random_internal::U256; namespace { -TEST(WideMultiplyTest, MultiplyU64ToU128Test) { - constexpr uint64_t k1 = 1; - constexpr uint64_t kMax = ~static_cast(0); +U256 LeftShift(U256 v, int s) { + if (s == 0) { + return v; + } else if (s < 128) { + return {(v.hi << s) | (v.lo >> (128 - s)), v.lo << s}; + } else { + return {v.lo << (s - 128), 0}; + } +} - EXPECT_EQ(absl::uint128(0), MultiplyU64ToU128(0, 0)); +MATCHER_P2(Eq256, hi, lo, "") { return arg.hi == hi && arg.lo == lo; } +MATCHER_P(Eq256, v, "") { return arg.hi == v.hi && arg.lo == v.lo; } - // Max uint64_t - EXPECT_EQ(MultiplyU64ToU128(kMax, kMax), - absl::MakeUint128(0xfffffffffffffffe, 0x0000000000000001)); - EXPECT_EQ(absl::MakeUint128(0, kMax), MultiplyU64ToU128(kMax, 1)); - EXPECT_EQ(absl::MakeUint128(0, kMax), MultiplyU64ToU128(1, kMax)); +TEST(WideMultiplyTest, MultiplyU128ToU256Test) { + using absl::uint128; + constexpr uint128 k1 = 1; + constexpr uint128 kMax = ~static_cast(0); + + EXPECT_THAT(MultiplyU128ToU256(0, 0), Eq256(0, 0)); + + // Max uin128_t + EXPECT_THAT(MultiplyU128ToU256(kMax, kMax), Eq256(kMax << 1, 1)); + EXPECT_THAT(MultiplyU128ToU256(kMax, 1), Eq256(0, kMax)); + EXPECT_THAT(MultiplyU128ToU256(1, kMax), Eq256(0, kMax)); for (int i = 0; i < 64; ++i) { - EXPECT_EQ(absl::MakeUint128(0, kMax) << i, - MultiplyU64ToU128(kMax, k1 << i)); - EXPECT_EQ(absl::MakeUint128(0, kMax) << i, - MultiplyU64ToU128(k1 << i, kMax)); + SCOPED_TRACE(i); + EXPECT_THAT(MultiplyU128ToU256(kMax, k1 << i), + Eq256(LeftShift({0, kMax}, i))); + EXPECT_THAT(MultiplyU128ToU256(k1 << i, kMax), + Eq256(LeftShift({0, kMax}, i))); } // 1-bit x 1-bit. for (int i = 0; i < 64; ++i) { for (int j = 0; j < 64; ++j) { - EXPECT_EQ(absl::MakeUint128(0, 1) << (i + j), - MultiplyU64ToU128(k1 << i, k1 << j)); - EXPECT_EQ(absl::MakeUint128(0, 1) << (i + j), - MultiplyU64ToU128(k1 << i, k1 << j)); + EXPECT_THAT(MultiplyU128ToU256(k1 << i, k1 << j), + Eq256(LeftShift({0, 1}, i + j))); } } // Verified multiplies - EXPECT_EQ(MultiplyU64ToU128(0xffffeeeeddddcccc, 0xbbbbaaaa99998888), - absl::MakeUint128(0xbbbb9e2692c5dddc, 0xc28f7531048d2c60)); - EXPECT_EQ(MultiplyU64ToU128(0x0123456789abcdef, 0xfedcba9876543210), - absl::MakeUint128(0x0121fa00ad77d742, 0x2236d88fe5618cf0)); - EXPECT_EQ(MultiplyU64ToU128(0x0123456789abcdef, 0xfdb97531eca86420), - absl::MakeUint128(0x0120ae99d26725fc, 0xce197f0ecac319e0)); - EXPECT_EQ(MultiplyU64ToU128(0x97a87f4f261ba3f2, 0xfedcba9876543210), - absl::MakeUint128(0x96fbf1a8ae78d0ba, 0x5a6dd4b71f278320)); - EXPECT_EQ(MultiplyU64ToU128(0xfedcba9876543210, 0xfdb97531eca86420), - absl::MakeUint128(0xfc98c6981a413e22, 0x342d0bbf48948200)); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0xc502da0d6ea99fe8, 0xfa3c9141a1f50912), + absl::MakeUint128(0x96bcf1ac37f97bd6, 0x27e2cdeb5fb2299e)), + Eq256(absl::MakeUint128(0x740113d838f96a64, 0x22e8cfa4d71f89ea), + absl::MakeUint128(0x19184a345c62e993, 0x237871b630337b1c))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0x6f29e670cee07230, 0xc3d8e6c3e4d86759), + absl::MakeUint128(0x3227d29fa6386db1, 0x231682bb1e4b764f)), + Eq256(absl::MakeUint128(0x15c779d9d5d3b07c, 0xd7e6c827f0c81cbe), + absl::MakeUint128(0xf88e3914f7fa287a, 0x15b79975137dea77))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0xafb77107215646e1, 0x3b844cb1ac5769e7), + absl::MakeUint128(0x1ff7b2d888b62479, 0x92f758ae96fcba0b)), + Eq256(absl::MakeUint128(0x15f13b70181f6985, 0x2adb36bbabce7d02), + absl::MakeUint128(0x6c470d72e13aad04, 0x63fba3f5841762ed))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0xd85d5558d67ac905, 0xf88c70654dae19b1), + absl::MakeUint128(0x17252c6727db3738, 0x399ff658c511eedc)), + Eq256(absl::MakeUint128(0x138fcdaf8b0421ee, 0x1b465ddf2a0d03f6), + absl::MakeUint128(0x8f573ba68296860f, 0xf327d2738741a21c))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0x46f0421a37ff6bee, 0xa61df89f09d140b1), + absl::MakeUint128(0x3d712ec9f37ca2e1, 0x9658a2cba47ef4b1)), + Eq256(absl::MakeUint128(0x11069cc48ee7c95d, 0xd35fb1c7aa91c978), + absl::MakeUint128(0xbe2f4a6de874b015, 0xd2f7ac1b76746e61))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0x730d27c72d58fa49, 0x3ebeda7498f8827c), + absl::MakeUint128(0xa2c959eca9f503af, 0x189c687eb842bbd8)), + Eq256(absl::MakeUint128(0x4928d0ea356ba022, 0x1546d34a2963393), + absl::MakeUint128(0x7481531e1e0a16d1, 0xdd8025015cf6aca0))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0x6ca41020f856d2f1, 0xb9b0838c04a7f4aa), + absl::MakeUint128(0x9cf41d28a8396f54, 0x1d681695e377ffe6)), + Eq256(absl::MakeUint128(0x429b92934d9be6f1, 0xea182877157c1e7), + absl::MakeUint128(0x7135c23f0a4a475, 0xc1adc366f4a126bc))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0x57472833797c332, 0x6c79272fdec4687a), + absl::MakeUint128(0xb5f022ea3838e46b, 0x16face2f003e27a6)), + Eq256(absl::MakeUint128(0x3e072e0962b3400, 0x5d9fe8fdc3d0e1f4), + absl::MakeUint128(0x7dc0df47cedafd62, 0xbe6501f1acd2551c))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0xf0fb4198322eb1c2, 0xfe7f5f31f3885938), + absl::MakeUint128(0xd99012b71bb7aa31, 0xac7a6f9eb190789)), + Eq256(absl::MakeUint128(0xcccc998cf075ca01, 0x642d144322fb873a), + absl::MakeUint128(0xc79dc12b69d91ed4, 0xa83459132ce046f8))); + EXPECT_THAT(MultiplyU128ToU256( + absl::MakeUint128(0xb5c04120848cdb47, 0x8aa62a827bf52635), + absl::MakeUint128(0x8d07a359be2f1380, 0x467bb90d59da0dea)), + Eq256(absl::MakeUint128(0x64205019d139a9ce, 0x99425c5fb6e7a977), + absl::MakeUint128(0xd3e99628a9e5fca7, 0x9c7824cb7279d72))); } } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/log_uniform_int_distribution.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/log_uniform_int_distribution.h index 43e101169c..4afff8f604 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/log_uniform_int_distribution.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/log_uniform_int_distribution.h @@ -69,10 +69,8 @@ class log_uniform_int_distribution { if (base_ == 2) { // Determine where the first set bit is on range(), giving a log2(range) // value which can be used to construct bounds. - log_range_ = - (std::min)(bit_width(range()), - static_cast( - std::numeric_limits::digits)); + log_range_ = (std::min)(random_internal::BitWidth(range()), + std::numeric_limits::digits); } else { // NOTE: Computing the logN(x) introduces error from 2 sources: // 1. Conversion of int to double loses precision for values >= @@ -83,7 +81,7 @@ class log_uniform_int_distribution { // // Thus a result which should equal K may equal K +/- epsilon, // which can eliminate some values depending on where the bounds fall. - const double inv_log_base = 1.0 / std::log(base_); + const double inv_log_base = 1.0 / std::log(static_cast(base_)); const double log_range = std::log(static_cast(range()) + 0.5); log_range_ = static_cast(std::ceil(inv_log_base * log_range)); } @@ -113,7 +111,7 @@ class log_uniform_int_distribution { unsigned_type range_; // max - min int log_range_; // ceil(logN(range_)) - static_assert(std::is_integral::value, + static_assert(random_internal::IsIntegral::value, "Class-template absl::log_uniform_int_distribution<> must be " "parameterized using an integral type."); }; @@ -139,7 +137,7 @@ class log_uniform_int_distribution { template result_type operator()(URBG& g, // NOLINT(runtime/references) const param_type& p) { - return (p.min)() + Generate(g, p); + return static_cast((p.min)() + Generate(g, p)); } result_type(min)() const { return (param_.min)(); } @@ -193,8 +191,8 @@ log_uniform_int_distribution::Generate( ? (std::numeric_limits::max)() : (static_cast(1) << e) - 1; } else { - const double r = std::pow(p.base(), d); - const double s = (r * p.base()) - 1.0; + const double r = std::pow(static_cast(p.base()), d); + const double s = (r * static_cast(p.base())) - 1.0; base_e = (r > static_cast((std::numeric_limits::max)())) @@ -211,7 +209,8 @@ log_uniform_int_distribution::Generate( const unsigned_type hi = (top_e >= p.range()) ? p.range() : top_e; // choose uniformly over [lo, hi] - return absl::uniform_int_distribution(lo, hi)(g); + return absl::uniform_int_distribution( + static_cast(lo), static_cast(hi))(g); } template diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc index 5e780d96d3..0d0fcb9597 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc @@ -42,7 +42,7 @@ class LogUniformIntDistributionTypeTest : public ::testing::Test {}; using IntTypes = ::testing::Types; -TYPED_TEST_CASE(LogUniformIntDistributionTypeTest, IntTypes); +TYPED_TEST_SUITE(LogUniformIntDistributionTypeTest, IntTypes); TYPED_TEST(LogUniformIntDistributionTypeTest, SerializeTest) { using param_type = diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/mocking_bit_gen.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/mocking_bit_gen.h index 7b2b80eb35..89fa5a47a6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/mocking_bit_gen.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/mocking_bit_gen.h @@ -87,7 +87,7 @@ class BitGenRef; // // ON_CALL(absl::MockUniform(), Call(bitgen, testing::_, testing::_)) // .WillByDefault([] (int low, int high) { -// return (low + high) / 2; +// return low + (high - low) / 2; // }); // // EXPECT_EQ(absl::Uniform(gen, 0, 10), 5); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/poisson_distribution.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/poisson_distribution.h index cb5f5d5d0f..f4573082e1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/poisson_distribution.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/poisson_distribution.h @@ -26,6 +26,7 @@ #include "absl/random/internal/fastmath.h" #include "absl/random/internal/generate_real.h" #include "absl/random/internal/iostream_state_saver.h" +#include "absl/random/internal/traits.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -80,7 +81,7 @@ class poisson_distribution { double log_k_; int split_; - static_assert(std::is_integral::value, + static_assert(random_internal::IsIntegral::value, "Class-template absl::poisson_distribution<> must be " "parameterized using an integral type."); }; @@ -133,7 +134,8 @@ template poisson_distribution::param_type::param_type(double mean) : mean_(mean), split_(0) { assert(mean >= 0); - assert(mean <= (std::numeric_limits::max)()); + assert(mean <= + static_cast((std::numeric_limits::max)())); // As a defensive measure, avoid large values of the mean. The rejection // algorithm used does not support very large values well. It my be worth // changing algorithms to better deal with these cases. @@ -222,8 +224,9 @@ poisson_distribution::operator()( // clang-format on const double lhs = 2.0 * std::log(u) + p.log_k_ + s; if (lhs < rhs) { - return x > (max)() ? (max)() - : static_cast(x); // f(x)/k >= u^2 + return x > static_cast((max)()) + ? (max)() + : static_cast(x); // f(x)/k >= u^2 } } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/poisson_distribution_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/poisson_distribution_test.cc index 8baabd1118..4f585b9b2b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/poisson_distribution_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/poisson_distribution_test.cc @@ -73,7 +73,7 @@ class PoissonDistributionInterfaceTest : public ::testing::Test {}; using IntTypes = ::testing::Types; -TYPED_TEST_CASE(PoissonDistributionInterfaceTest, IntTypes); +TYPED_TEST_SUITE(PoissonDistributionInterfaceTest, IntTypes); TYPED_TEST(PoissonDistributionInterfaceTest, SerializeTest) { using param_type = typename absl::poisson_distribution::param_type; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/seed_sequences.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/seed_sequences.h index ff1340cc8e..c3af4b00a4 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/seed_sequences.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/seed_sequences.h @@ -28,6 +28,7 @@ #include #include +#include "absl/base/config.h" #include "absl/random/internal/salted_seed_seq.h" #include "absl/random/internal/seed_material.h" #include "absl/random/seed_gen_exception.h" diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_int_distribution.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_int_distribution.h index c1f54ccebc..fae80252e3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_int_distribution.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_int_distribution.h @@ -97,7 +97,7 @@ class uniform_int_distribution { result_type lo_; unsigned_type range_; - static_assert(std::is_integral::value, + static_assert(random_internal::IsIntegral::value, "Class-template absl::uniform_int_distribution<> must be " "parameterized using an integral type."); }; // param_type @@ -125,7 +125,7 @@ class uniform_int_distribution { template result_type operator()( URBG& gen, const param_type& param) { // NOLINT(runtime/references) - return param.a() + Generate(gen, param.range()); + return static_cast(param.a() + Generate(gen, param.range())); } result_type a() const { return param_.a(); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_int_distribution_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_int_distribution_test.cc index 276d72ad20..a830117aeb 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_int_distribution_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_int_distribution_test.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "gmock/gmock.h" @@ -136,7 +137,7 @@ TYPED_TEST(UniformIntDistributionTest, TestMoments) { typename absl::uniform_int_distribution::param_type; // We use a fixed bit generator for distribution accuracy tests. This allows - // these tests to be deterministic, while still testing the qualify of the + // these tests to be deterministic, while still testing the quality of the // implementation. absl::random_internal::pcg64_2018_engine rng{0x2B7E151628AED2A6}; @@ -172,7 +173,7 @@ TYPED_TEST(UniformIntDistributionTest, ChiSquaredTest50) { using absl::random_internal::kChiSquared; constexpr size_t kTrials = 1000; - constexpr int kBuckets = 50; // inclusive, so actally +1 + constexpr int kBuckets = 50; // inclusive, so actually +1 constexpr double kExpected = static_cast(kTrials) / static_cast(kBuckets); @@ -184,7 +185,7 @@ TYPED_TEST(UniformIntDistributionTest, ChiSquaredTest50) { const TypeParam max = min + kBuckets; // We use a fixed bit generator for distribution accuracy tests. This allows - // these tests to be deterministic, while still testing the qualify of the + // these tests to be deterministic, while still testing the quality of the // implementation. absl::random_internal::pcg64_2018_engine rng{0x2B7E151628AED2A6}; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_real_distribution.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_real_distribution.h index 5ba17b2341..196833415e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_real_distribution.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_real_distribution.h @@ -73,12 +73,12 @@ class uniform_real_distribution { : lo_(lo), hi_(hi), range_(hi - lo) { // [rand.dist.uni.real] preconditions 2 & 3 assert(lo <= hi); + // NOTE: For integral types, we can promote the range to an unsigned type, // which gives full width of the range. However for real (fp) types, this // is not possible, so value generation cannot use the full range of the // real type. assert(range_ <= (std::numeric_limits::max)()); - assert(std::isfinite(range_)); } result_type a() const { return lo_; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_real_distribution_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_real_distribution_test.cc index 035bd284d1..07f199d34c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_real_distribution_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/uniform_real_distribution_test.cc @@ -78,62 +78,74 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) { GTEST_SKIP() << "Skipping the test because we detected x87 floating-point semantics"; #endif + using DistributionType = absl::uniform_real_distribution; + using real_type = TypeParam; + using param_type = typename DistributionType::param_type; - using param_type = - typename absl::uniform_real_distribution::param_type; + constexpr const real_type kMax = std::numeric_limits::max(); + constexpr const real_type kMin = std::numeric_limits::min(); + constexpr const real_type kEpsilon = + std::numeric_limits::epsilon(); + constexpr const real_type kLowest = + std::numeric_limits::lowest(); // -max - constexpr const TypeParam a{1152921504606846976}; + const real_type kDenormMax = std::nextafter(kMin, real_type{0}); + const real_type kOneMinusE = + std::nextafter(real_type{1}, real_type{0}); // 1 - epsilon + + constexpr const real_type kTwo60{1152921504606846976}; // 2^60 constexpr int kCount = 1000; absl::InsecureBitGen gen; for (const auto& param : { param_type(), - param_type(TypeParam(2.0), TypeParam(2.0)), // Same - param_type(TypeParam(-0.1), TypeParam(0.1)), - param_type(TypeParam(0.05), TypeParam(0.12)), - param_type(TypeParam(-0.05), TypeParam(0.13)), - param_type(TypeParam(-0.05), TypeParam(-0.02)), + param_type(real_type{0}, real_type{1}), + param_type(real_type(-0.1), real_type(0.1)), + param_type(real_type(0.05), real_type(0.12)), + param_type(real_type(-0.05), real_type(0.13)), + param_type(real_type(-0.05), real_type(-0.02)), + // range = 0 + param_type(real_type(2.0), real_type(2.0)), // Same // double range = 0 // 2^60 , 2^60 + 2^6 - param_type(a, TypeParam(1152921504606847040)), + param_type(kTwo60, real_type(1152921504606847040)), // 2^60 , 2^60 + 2^7 - param_type(a, TypeParam(1152921504606847104)), + param_type(kTwo60, real_type(1152921504606847104)), // double range = 2^8 // 2^60 , 2^60 + 2^8 - param_type(a, TypeParam(1152921504606847232)), + param_type(kTwo60, real_type(1152921504606847232)), // float range = 0 // 2^60 , 2^60 + 2^36 - param_type(a, TypeParam(1152921573326323712)), + param_type(kTwo60, real_type(1152921573326323712)), // 2^60 , 2^60 + 2^37 - param_type(a, TypeParam(1152921642045800448)), + param_type(kTwo60, real_type(1152921642045800448)), // float range = 2^38 // 2^60 , 2^60 + 2^38 - param_type(a, TypeParam(1152921779484753920)), + param_type(kTwo60, real_type(1152921779484753920)), // Limits - param_type(0, std::numeric_limits::max()), - param_type(std::numeric_limits::lowest(), 0), - param_type(0, std::numeric_limits::epsilon()), - param_type(-std::numeric_limits::epsilon(), - std::numeric_limits::epsilon()), - param_type(std::numeric_limits::epsilon(), - 2 * std::numeric_limits::epsilon()), + param_type(0, kMax), + param_type(kLowest, 0), + param_type(0, kMin), + param_type(0, kEpsilon), + param_type(-kEpsilon, kEpsilon), + param_type(0, kOneMinusE), + param_type(0, kDenormMax), }) { // Validate parameters. const auto a = param.a(); const auto b = param.b(); - absl::uniform_real_distribution before(a, b); + DistributionType before(a, b); EXPECT_EQ(before.a(), param.a()); EXPECT_EQ(before.b(), param.b()); { - absl::uniform_real_distribution via_param(param); + DistributionType via_param(param); EXPECT_EQ(via_param, before); } std::stringstream ss; ss << before; - absl::uniform_real_distribution after(TypeParam(1.0), - TypeParam(3.1)); + DistributionType after(real_type(1.0), real_type(3.1)); EXPECT_NE(before.a(), after.a()); EXPECT_NE(before.b(), after.b()); @@ -168,7 +180,7 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) { } } - if (!std::is_same::value) { + if (!std::is_same::value) { // static_cast(long double) can overflow. std::string msg = absl::StrCat("Range: ", static_cast(sample_min), ", ", static_cast(sample_max)); @@ -182,33 +194,52 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) { #pragma warning(disable:4756) // Constant arithmetic overflow. #endif TYPED_TEST(UniformRealDistributionTest, ViolatesPreconditionsDeathTest) { + using DistributionType = absl::uniform_real_distribution; + using real_type = TypeParam; + #if GTEST_HAS_DEATH_TEST // Hi < Lo - EXPECT_DEBUG_DEATH( - { absl::uniform_real_distribution dist(10.0, 1.0); }, ""); + EXPECT_DEBUG_DEATH({ DistributionType dist(10.0, 1.0); }, ""); // Hi - Lo > numeric_limits<>::max() EXPECT_DEBUG_DEATH( { - absl::uniform_real_distribution dist( - std::numeric_limits::lowest(), - std::numeric_limits::max()); + DistributionType dist(std::numeric_limits::lowest(), + std::numeric_limits::max()); }, ""); + + // kEpsilon guarantees that max + kEpsilon = inf. + const auto kEpsilon = std::nexttoward( + (std::numeric_limits::max() - + std::nexttoward(std::numeric_limits::max(), 0.0)) / + 2, + std::numeric_limits::max()); + EXPECT_DEBUG_DEATH( + { + DistributionType dist(-kEpsilon, std::numeric_limits::max()); + }, + ""); + EXPECT_DEBUG_DEATH( + { + DistributionType dist(std::numeric_limits::lowest(), + kEpsilon); + }, + ""); + #endif // GTEST_HAS_DEATH_TEST #if defined(NDEBUG) // opt-mode, for invalid parameters, will generate a garbage value, // but should not enter an infinite loop. absl::InsecureBitGen gen; { - absl::uniform_real_distribution dist(10.0, 1.0); + DistributionType dist(10.0, 1.0); auto x = dist(gen); EXPECT_FALSE(std::isnan(x)) << x; } { - absl::uniform_real_distribution dist( - std::numeric_limits::lowest(), - std::numeric_limits::max()); + DistributionType dist(std::numeric_limits::lowest(), + std::numeric_limits::max()); auto x = dist(gen); // Infinite result. EXPECT_FALSE(std::isfinite(x)) << x; @@ -220,6 +251,8 @@ TYPED_TEST(UniformRealDistributionTest, ViolatesPreconditionsDeathTest) { #endif TYPED_TEST(UniformRealDistributionTest, TestMoments) { + using DistributionType = absl::uniform_real_distribution; + constexpr int kSize = 1000000; std::vector values(kSize); @@ -228,7 +261,7 @@ TYPED_TEST(UniformRealDistributionTest, TestMoments) { // implementation. absl::random_internal::pcg64_2018_engine rng{0x2B7E151628AED2A6}; - absl::uniform_real_distribution dist; + DistributionType dist; for (int i = 0; i < kSize; i++) { values[i] = dist(rng); } @@ -242,9 +275,10 @@ TYPED_TEST(UniformRealDistributionTest, TestMoments) { } TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) { + using DistributionType = absl::uniform_real_distribution; + using param_type = typename DistributionType::param_type; + using absl::random_internal::kChiSquared; - using param_type = - typename absl::uniform_real_distribution::param_type; constexpr size_t kTrials = 100000; constexpr int kBuckets = 50; @@ -269,7 +303,7 @@ TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) { const double factor = kBuckets / (max_val - min_val); std::vector counts(kBuckets, 0); - absl::uniform_real_distribution dist(param); + DistributionType dist(param); for (size_t i = 0; i < kTrials; i++) { auto x = dist(rng); auto bucket = static_cast((x - min_val) * factor); @@ -297,8 +331,11 @@ TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) { } TYPED_TEST(UniformRealDistributionTest, StabilityTest) { + using DistributionType = absl::uniform_real_distribution; + using real_type = TypeParam; + // absl::uniform_real_distribution stability relies only on - // random_internal::RandU64ToDouble and random_internal::RandU64ToFloat. + // random_internal::GenerateRealFromBits. absl::random_internal::sequence_urbg urbg( {0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull, 0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull, @@ -307,9 +344,9 @@ TYPED_TEST(UniformRealDistributionTest, StabilityTest) { std::vector output(12); - absl::uniform_real_distribution dist; + DistributionType dist; std::generate(std::begin(output), std::end(output), [&] { - return static_cast(TypeParam(1000000) * dist(urbg)); + return static_cast(real_type(1000000) * dist(urbg)); }); EXPECT_THAT( diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/zipf_distribution.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/zipf_distribution.h index 22ebc756cf..03497b1b26 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/zipf_distribution.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/zipf_distribution.h @@ -23,13 +23,14 @@ #include #include "absl/random/internal/iostream_state_saver.h" +#include "absl/random/internal/traits.h" #include "absl/random/uniform_real_distribution.h" namespace absl { ABSL_NAMESPACE_BEGIN // absl::zipf_distribution produces random integer-values in the range [0, k], -// distributed according to the discrete probability function: +// distributed according to the unnormalized discrete probability function: // // P(x) = (v + x) ^ -q // @@ -94,7 +95,7 @@ class zipf_distribution { double hxm_; // h(k + 0.5) double hx0_minus_hxm_; // h(x0) - h(k + 0.5) - static_assert(std::is_integral::value, + static_assert(random_internal::IsIntegral::value, "Class-template absl::zipf_distribution<> must be " "parameterized using an integral type."); }; @@ -221,7 +222,7 @@ zipf_distribution::operator()( const double u = p.hxm_ + v * p.hx0_minus_hxm_; const double x = p.hinv(u); k = rint(x); // std::floor(x + 0.5); - if (k > p.k()) continue; // reject k > max_k + if (k > static_cast(p.k())) continue; // reject k > max_k if (k - x <= p.s_) break; const double h = p.h(k + 0.5); const double r = p.pow_negative_q(p.v_ + k); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/zipf_distribution_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/zipf_distribution_test.cc index f8cf70e0dd..c8bb89db2e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/zipf_distribution_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/random/zipf_distribution_test.cc @@ -44,7 +44,7 @@ class ZipfDistributionTypedTest : public ::testing::Test {}; using IntTypes = ::testing::Types; -TYPED_TEST_CASE(ZipfDistributionTypedTest, IntTypes); +TYPED_TEST_SUITE(ZipfDistributionTypedTest, IntTypes); TYPED_TEST(ZipfDistributionTypedTest, SerializeTest) { using param_type = typename absl::zipf_distribution::param_type; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/CMakeLists.txt index 438985649f..15db36af68 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/CMakeLists.txt @@ -28,15 +28,17 @@ absl_cc_library( DEPS absl::atomic_hook absl::config - absl::core_headers - absl::raw_logging_internal - absl::inlined_vector - absl::stacktrace - absl::symbolize - absl::strings absl::cord - absl::str_format + absl::core_headers + absl::function_ref + absl::inlined_vector absl::optional + absl::raw_logging_internal + absl::stacktrace + absl::str_format + absl::strerror + absl::strings + absl::symbolize PUBLIC ) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/internal/status_internal.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/internal/status_internal.h index ac12940a6d..873eb5c245 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/internal/status_internal.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/internal/status_internal.h @@ -14,8 +14,11 @@ #ifndef ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_ #define ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_ +#include #include +#include +#include "absl/base/attributes.h" #include "absl/container/inlined_vector.h" #include "absl/strings/cord.h" @@ -25,7 +28,14 @@ namespace absl { ABSL_NAMESPACE_BEGIN // Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs // as part of a class definitions (b/6995610), so we use a forward declaration. +// +// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict +// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available. +#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) +class [[nodiscard]] Status; +#else class ABSL_MUST_USE_RESULT Status; +#endif ABSL_NAMESPACE_END } // namespace absl #endif // !SWIG @@ -61,6 +71,14 @@ struct StatusRep { }; absl::StatusCode MapToLocalCode(int value); + +// Returns a pointer to a newly-allocated string with the given `prefix`, +// suitable for output as an error message in assertion/`CHECK()` failures. +// +// This is an internal implementation detail for Abseil logging. +std::string* MakeCheckFailString(const absl::Status* status, + const char* prefix); + } // namespace status_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status.cc index 53c198e19b..bbf2335d85 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status.cc @@ -13,9 +13,13 @@ // limitations under the License. #include "absl/status/status.h" +#include + #include +#include #include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/strerror.h" #include "absl/debugging/stacktrace.h" #include "absl/debugging/symbolize.h" #include "absl/status/status_payload_printer.h" @@ -74,15 +78,17 @@ std::ostream& operator<<(std::ostream& os, StatusCode code) { namespace status_internal { -static int FindPayloadIndexByUrl(const Payloads* payloads, - absl::string_view type_url) { - if (payloads == nullptr) return -1; +static absl::optional FindPayloadIndexByUrl( + const Payloads* payloads, + absl::string_view type_url) { + if (payloads == nullptr) + return absl::nullopt; for (size_t i = 0; i < payloads->size(); ++i) { if ((*payloads)[i].type_url == type_url) return i; } - return -1; + return absl::nullopt; } // Convert canonical code to a value known to this binary. @@ -116,8 +122,10 @@ absl::StatusCode MapToLocalCode(int value) { absl::optional Status::GetPayload( absl::string_view type_url) const { const auto* payloads = GetPayloads(); - int index = status_internal::FindPayloadIndexByUrl(payloads, type_url); - if (index != -1) return (*payloads)[index].payload; + absl::optional index = + status_internal::FindPayloadIndexByUrl(payloads, type_url); + if (index.has_value()) + return (*payloads)[index.value()].payload; return absl::nullopt; } @@ -132,10 +140,10 @@ void Status::SetPayload(absl::string_view type_url, absl::Cord payload) { rep->payloads = absl::make_unique(); } - int index = + absl::optional index = status_internal::FindPayloadIndexByUrl(rep->payloads.get(), type_url); - if (index != -1) { - (*rep->payloads)[index].payload = std::move(payload); + if (index.has_value()) { + (*rep->payloads)[index.value()].payload = std::move(payload); return; } @@ -143,10 +151,11 @@ void Status::SetPayload(absl::string_view type_url, absl::Cord payload) { } bool Status::ErasePayload(absl::string_view type_url) { - int index = status_internal::FindPayloadIndexByUrl(GetPayloads(), type_url); - if (index != -1) { + absl::optional index = + status_internal::FindPayloadIndexByUrl(GetPayloads(), type_url); + if (index.has_value()) { PrepareToModify(); - GetPayloads()->erase(GetPayloads()->begin() + index); + GetPayloads()->erase(GetPayloads()->begin() + index.value()); if (GetPayloads()->empty() && message().empty()) { // Special case: If this can be represented inlined, it MUST be // inlined (EqualsSlow depends on this behavior). @@ -161,7 +170,7 @@ bool Status::ErasePayload(absl::string_view type_url) { } void Status::ForEachPayload( - const std::function& visitor) + absl::FunctionRef visitor) const { if (auto* payloads = GetPayloads()) { bool in_reverse = @@ -185,11 +194,16 @@ void Status::ForEachPayload( } const std::string* Status::EmptyString() { - static std::string* empty_string = new std::string(); - return empty_string; + static union EmptyString { + std::string str; + ~EmptyString() {} + } empty = {{}}; + return &empty.str; } +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr const char Status::kMovedFromString[]; +#endif const std::string* Status::MovedFromString() { static std::string* moved_from_string = new std::string(kMovedFromString); @@ -440,5 +454,169 @@ bool IsUnknown(const Status& status) { return status.code() == absl::StatusCode::kUnknown; } +StatusCode ErrnoToStatusCode(int error_number) { + switch (error_number) { + case 0: + return StatusCode::kOk; + case EINVAL: // Invalid argument + case ENAMETOOLONG: // Filename too long + case E2BIG: // Argument list too long + case EDESTADDRREQ: // Destination address required + case EDOM: // Mathematics argument out of domain of function + case EFAULT: // Bad address + case EILSEQ: // Illegal byte sequence + case ENOPROTOOPT: // Protocol not available + case ENOSTR: // Not a STREAM + case ENOTSOCK: // Not a socket + case ENOTTY: // Inappropriate I/O control operation + case EPROTOTYPE: // Protocol wrong type for socket + case ESPIPE: // Invalid seek + return StatusCode::kInvalidArgument; + case ETIMEDOUT: // Connection timed out + case ETIME: // Timer expired + return StatusCode::kDeadlineExceeded; + case ENODEV: // No such device + case ENOENT: // No such file or directory +#ifdef ENOMEDIUM + case ENOMEDIUM: // No medium found +#endif + case ENXIO: // No such device or address + case ESRCH: // No such process + return StatusCode::kNotFound; + case EEXIST: // File exists + case EADDRNOTAVAIL: // Address not available + case EALREADY: // Connection already in progress +#ifdef ENOTUNIQ + case ENOTUNIQ: // Name not unique on network +#endif + return StatusCode::kAlreadyExists; + case EPERM: // Operation not permitted + case EACCES: // Permission denied +#ifdef ENOKEY + case ENOKEY: // Required key not available +#endif + case EROFS: // Read only file system + return StatusCode::kPermissionDenied; + case ENOTEMPTY: // Directory not empty + case EISDIR: // Is a directory + case ENOTDIR: // Not a directory + case EADDRINUSE: // Address already in use + case EBADF: // Invalid file descriptor +#ifdef EBADFD + case EBADFD: // File descriptor in bad state +#endif + case EBUSY: // Device or resource busy + case ECHILD: // No child processes + case EISCONN: // Socket is connected +#ifdef EISNAM + case EISNAM: // Is a named type file +#endif +#ifdef ENOTBLK + case ENOTBLK: // Block device required +#endif + case ENOTCONN: // The socket is not connected + case EPIPE: // Broken pipe +#ifdef ESHUTDOWN + case ESHUTDOWN: // Cannot send after transport endpoint shutdown +#endif + case ETXTBSY: // Text file busy +#ifdef EUNATCH + case EUNATCH: // Protocol driver not attached +#endif + return StatusCode::kFailedPrecondition; + case ENOSPC: // No space left on device +#ifdef EDQUOT + case EDQUOT: // Disk quota exceeded +#endif + case EMFILE: // Too many open files + case EMLINK: // Too many links + case ENFILE: // Too many open files in system + case ENOBUFS: // No buffer space available + case ENODATA: // No message is available on the STREAM read queue + case ENOMEM: // Not enough space + case ENOSR: // No STREAM resources +#ifdef EUSERS + case EUSERS: // Too many users +#endif + return StatusCode::kResourceExhausted; +#ifdef ECHRNG + case ECHRNG: // Channel number out of range +#endif + case EFBIG: // File too large + case EOVERFLOW: // Value too large to be stored in data type + case ERANGE: // Result too large + return StatusCode::kOutOfRange; +#ifdef ENOPKG + case ENOPKG: // Package not installed +#endif + case ENOSYS: // Function not implemented + case ENOTSUP: // Operation not supported + case EAFNOSUPPORT: // Address family not supported +#ifdef EPFNOSUPPORT + case EPFNOSUPPORT: // Protocol family not supported +#endif + case EPROTONOSUPPORT: // Protocol not supported +#ifdef ESOCKTNOSUPPORT + case ESOCKTNOSUPPORT: // Socket type not supported +#endif + case EXDEV: // Improper link + return StatusCode::kUnimplemented; + case EAGAIN: // Resource temporarily unavailable +#ifdef ECOMM + case ECOMM: // Communication error on send +#endif + case ECONNREFUSED: // Connection refused + case ECONNABORTED: // Connection aborted + case ECONNRESET: // Connection reset + case EINTR: // Interrupted function call +#ifdef EHOSTDOWN + case EHOSTDOWN: // Host is down +#endif + case EHOSTUNREACH: // Host is unreachable + case ENETDOWN: // Network is down + case ENETRESET: // Connection aborted by network + case ENETUNREACH: // Network unreachable + case ENOLCK: // No locks available + case ENOLINK: // Link has been severed +#ifdef ENONET + case ENONET: // Machine is not on the network +#endif + return StatusCode::kUnavailable; + case EDEADLK: // Resource deadlock avoided +#ifdef ESTALE + case ESTALE: // Stale file handle +#endif + return StatusCode::kAborted; + case ECANCELED: // Operation cancelled + return StatusCode::kCancelled; + default: + return StatusCode::kUnknown; + } +} + +namespace { +std::string MessageForErrnoToStatus(int error_number, + absl::string_view message) { + return absl::StrCat(message, ": ", + absl::base_internal::StrError(error_number)); +} +} // namespace + +Status ErrnoToStatus(int error_number, absl::string_view message) { + return Status(ErrnoToStatusCode(error_number), + MessageForErrnoToStatus(error_number, message)); +} + +namespace status_internal { + +std::string* MakeCheckFailString(const absl::Status* status, + const char* prefix) { + return new std::string( + absl::StrCat(prefix, " (", + status->ToString(StatusToStringMode::kWithEverything), ")")); +} + +} // namespace status_internal + ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status.h index c5fe0a70f6..4e8292fc0e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status.h @@ -24,11 +24,11 @@ // * A set of helper functions for creating status codes and checking their // values // -// Within Google, `absl::Status` is the primary mechanism for gracefully -// handling errors across API boundaries (and in particular across RPC -// boundaries). Some of these errors may be recoverable, but others may not. -// Most functions that can produce a recoverable error should be designed to -// return an `absl::Status` (or `absl::StatusOr`). +// Within Google, `absl::Status` is the primary mechanism for communicating +// errors in C++, and is used to represent error state in both in-process +// library calls as well as RPC calls. Some of these errors may be recoverable, +// but others may not. Most functions that can produce a recoverable error +// should be designed to return an `absl::Status` (or `absl::StatusOr`). // // Example: // @@ -51,10 +51,11 @@ #ifndef ABSL_STATUS_STATUS_H_ #define ABSL_STATUS_STATUS_H_ -#include +#include #include +#include -#include "absl/container/inlined_vector.h" +#include "absl/functional/function_ref.h" #include "absl/status/internal/status_internal.h" #include "absl/strings/cord.h" #include "absl/strings/string_view.h" @@ -346,7 +347,7 @@ inline StatusToStringMode& operator^=(StatusToStringMode& lhs, // API developers should construct their functions to return `absl::OkStatus()` // upon success, or an `absl::StatusCode` upon another type of error (e.g // an `absl::StatusCode::kInvalidArgument` error). The API provides convenience -// functions to constuct each status code. +// functions to construct each status code. // // Example: // @@ -468,8 +469,9 @@ class Status final { // Status::ok() // - // Returns `true` if `this->ok()`. Prefer checking for an OK status using this - // member function. + // Returns `true` if `this->code()` == `absl::StatusCode::kOk`, + // indicating the absence of an error. + // Prefer checking for an OK status using this member function. ABSL_MUST_USE_RESULT bool ok() const; // Status::code() @@ -494,7 +496,7 @@ class Status final { // Returns the error message associated with this error code, if available. // Note that this message rarely describes the error code. It is not unusual // for the error message to be the empty string. As a result, prefer - // `Status::ToString()` for debug logging. + // `operator<<` or `Status::ToString()` for debug logging. absl::string_view message() const; friend bool operator==(const Status&, const Status&); @@ -531,7 +533,7 @@ class Status final { //---------------------------------------------------------------------------- // A payload may be attached to a status to provide additional context to an - // error that may not be satisifed by an existing `absl::StatusCode`. + // error that may not be satisfied by an existing `absl::StatusCode`. // Typically, this payload serves one of several purposes: // // * It may provide more fine-grained semantic information about the error @@ -590,7 +592,7 @@ class Status final { // NOTE: Any mutation on the same 'absl::Status' object during visitation is // forbidden and could result in undefined behavior. void ForEachPayload( - const std::function& visitor) + absl::FunctionRef visitor) const; private: @@ -611,10 +613,6 @@ class Status final { const status_internal::Payloads* GetPayloads() const; status_internal::Payloads* GetPayloads(); - // Takes ownership of payload. - static uintptr_t NewRep( - absl::StatusCode code, absl::string_view msg, - std::unique_ptr payload); static bool EqualsSlow(const absl::Status& a, const absl::Status& b); // MSVC 14.0 limitation requires the const. @@ -740,6 +738,19 @@ Status UnavailableError(absl::string_view message); Status UnimplementedError(absl::string_view message); Status UnknownError(absl::string_view message); +// ErrnoToStatusCode() +// +// Returns the StatusCode for `error_number`, which should be an `errno` value. +// See https://en.cppreference.com/w/cpp/error/errno_macros and similar +// references. +absl::StatusCode ErrnoToStatusCode(int error_number); + +// ErrnoToStatus() +// +// Convenience function that creates a `absl::Status` using an `error_number`, +// which should be an `errno` value. +Status ErrnoToStatus(int error_number, absl::string_view message); + //------------------------------------------------------------------------------ // Implementation details follow //------------------------------------------------------------------------------ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status_test.cc index 1b038f6d98..89cce7dfd7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/status_test.cc @@ -14,6 +14,8 @@ #include "absl/status/status.h" +#include + #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/strings/str_cat.h" @@ -485,4 +487,22 @@ TEST(Status, Swap) { test_swap(no_payload, with_payload); test_swap(with_payload, no_payload); } + +TEST(StatusErrno, ErrnoToStatusCode) { + EXPECT_EQ(absl::ErrnoToStatusCode(0), absl::StatusCode::kOk); + + // Spot-check a few errno values. + EXPECT_EQ(absl::ErrnoToStatusCode(EINVAL), + absl::StatusCode::kInvalidArgument); + EXPECT_EQ(absl::ErrnoToStatusCode(ENOENT), absl::StatusCode::kNotFound); + + // We'll pick a very large number so it hopefully doesn't collide to errno. + EXPECT_EQ(absl::ErrnoToStatusCode(19980927), absl::StatusCode::kUnknown); +} + +TEST(StatusErrno, ErrnoToStatus) { + absl::Status status = absl::ErrnoToStatus(ENOENT, "Cannot open 'path'"); + EXPECT_EQ(status.code(), absl::StatusCode::kNotFound); + EXPECT_EQ(status.message(), "Cannot open 'path': No such file or directory"); +} } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/statusor.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/statusor.h index 7fa623fda9..a76e720153 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/statusor.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/statusor.h @@ -106,7 +106,13 @@ class BadStatusOrAccess : public std::exception { // Returned StatusOr objects may not be ignored. template +#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) +// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict +// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available. +class [[nodiscard]] StatusOr; +#else class ABSL_MUST_USE_RESULT StatusOr; +#endif // ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) // absl::StatusOr // @@ -156,8 +162,8 @@ class ABSL_MUST_USE_RESULT StatusOr; // A `absl::StatusOr` can be constructed from a null pointer like any other // pointer value, and the result will be that `ok()` returns `true` and // `value()` returns `nullptr`. Checking the value of pointer in an -// `absl::StatusOr` generally requires a bit more care, to ensure both that a -// value is present and that value is not null: +// `absl::StatusOr` generally requires a bit more care, to ensure both that +// a value is present and that value is not null: // // StatusOr> result = FooFactory::MakeNewFoo(arg); // if (!result.ok()) { @@ -429,8 +435,8 @@ class StatusOr : private internal_statusor::StatusOrData, // if `T` can be constructed from a `U`. Can accept move or copy constructors. // // This constructor is explicit if `U` is not convertible to `T`. To avoid - // ambiguity, this constuctor is disabled if `U` is a `StatusOr`, where `J` - // is convertible to `T`. + // ambiguity, this constructor is disabled if `U` is a `StatusOr`, where + // `J` is convertible to `T`. template < typename U = T, absl::enable_if_t< @@ -471,7 +477,7 @@ class StatusOr : private internal_statusor::StatusOrData, // StatusOr::ok() // // Returns whether or not this `absl::StatusOr` holds a `T` value. This - // member function is analagous to `absl::Status::ok()` and should be used + // member function is analogous to `absl::Status::ok()` and should be used // similarly to check the status of return values. // // Example: diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/statusor_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/statusor_test.cc index 7cae90e185..2902154367 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/statusor_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/status/statusor_test.cc @@ -1521,7 +1521,7 @@ static absl::StatusOr MakeStatus() { return 100; } TEST(StatusOr, TestIgnoreError) { MakeStatus().IgnoreError(); } TEST(StatusOr, EqualityOperator) { - constexpr int kNumCases = 4; + constexpr size_t kNumCases = 4; std::array, kNumCases> group1 = { absl::StatusOr(1), absl::StatusOr(2), absl::StatusOr(absl::InvalidArgumentError("msg")), @@ -1530,8 +1530,8 @@ TEST(StatusOr, EqualityOperator) { absl::StatusOr(1), absl::StatusOr(2), absl::StatusOr(absl::InvalidArgumentError("msg")), absl::StatusOr(absl::InternalError("msg"))}; - for (int i = 0; i < kNumCases; ++i) { - for (int j = 0; j < kNumCases; ++j) { + for (size_t i = 0; i < kNumCases; ++i) { + for (size_t j = 0; j < kNumCases; ++j) { if (i == j) { EXPECT_TRUE(group1[i] == group2[j]); EXPECT_FALSE(group1[i] != group2[j]); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/CMakeLists.txt index 8ad5f9c7f9..7e91ebf21b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/CMakeLists.txt @@ -21,7 +21,9 @@ absl_cc_library( "ascii.h" "charconv.h" "escaping.h" + "internal/damerau_levenshtein_distance.h" "internal/string_constant.h" + "internal/has_absl_stringify.h" "match.h" "numbers.h" "str_cat.h" @@ -39,8 +41,11 @@ absl_cc_library( "internal/charconv_bigint.h" "internal/charconv_parse.cc" "internal/charconv_parse.h" + "internal/damerau_levenshtein_distance.cc" "internal/memutil.cc" "internal/memutil.h" + "internal/stringify_sink.h" + "internal/stringify_sink.cc" "internal/stl_type_traits.h" "internal/str_join_internal.h" "internal/str_split_internal.h" @@ -68,6 +73,7 @@ absl_cc_library( PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME strings_internal @@ -131,6 +137,19 @@ absl_cc_test( GTest::gmock_main ) +absl_cc_test( + NAME + damerau_levenshtein_distance_test + SRCS + "internal/damerau_levenshtein_distance_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::strings + absl::base + GTest::gmock_main +) + absl_cc_test( NAME memutil_test @@ -280,6 +299,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::strings + absl::str_format absl::core_headers GTest::gmock_main ) @@ -385,6 +405,7 @@ absl_cc_library( PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME str_format_internal @@ -412,6 +433,7 @@ absl_cc_library( absl::core_headers absl::numeric_representation absl::type_traits + absl::utility absl::int128 absl::span ) @@ -492,6 +514,7 @@ absl_cc_test( DEPS absl::strings absl::str_format_internal + absl::core_headers absl::raw_logging_internal absl::int128 GTest::gmock_main @@ -523,6 +546,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME pow10_helper @@ -550,14 +574,17 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cord_internal HDRS + "internal/cord_data_edge.h" "internal/cord_internal.h" "internal/cord_rep_btree.h" "internal/cord_rep_btree_navigator.h" "internal/cord_rep_btree_reader.h" + "internal/cord_rep_crc.h" "internal/cord_rep_consume.h" "internal/cord_rep_flat.h" "internal/cord_rep_ring.h" @@ -567,6 +594,7 @@ absl_cc_library( "internal/cord_rep_btree.cc" "internal/cord_rep_btree_navigator.cc" "internal/cord_rep_btree_reader.cc" + "internal/cord_rep_crc.cc" "internal/cord_rep_consume.cc" "internal/cord_rep_ring.cc" COPTS @@ -585,6 +613,7 @@ absl_cc_library( absl::type_traits ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cordz_update_tracker @@ -611,6 +640,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cordz_functions @@ -639,6 +669,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cordz_statistics @@ -653,6 +684,7 @@ absl_cc_library( absl::synchronization ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cordz_handle @@ -686,6 +718,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cordz_info @@ -753,6 +786,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cordz_sample_token @@ -791,6 +825,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cordz_update_scope @@ -829,8 +864,12 @@ absl_cc_library( cord HDRS "cord.h" + "cord_buffer.h" SRCS "cord.cc" + "cord_analysis.cc" + "cord_analysis.h" + "cord_buffer.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -848,11 +887,13 @@ absl_cc_library( absl::inlined_vector absl::optional absl::raw_logging_internal + absl::span absl::strings absl::type_traits PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cord_rep_test_util @@ -881,8 +922,10 @@ absl_cc_library( absl::cord_internal absl::strings TESTONLY + PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME cordz_test_helpers @@ -920,6 +963,8 @@ absl_cc_test( absl::cordz_test_helpers absl::core_headers absl::endian + absl::hash + absl::random_random absl::raw_logging_internal absl::fixed_array GTest::gmock_main @@ -927,18 +972,17 @@ absl_cc_test( absl_cc_test( NAME - cord_rep_consume_test + cord_data_edge_test SRCS - "internal/cord_rep_consume_test.cc" + "internal/cord_data_edge_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::config absl::cord_internal + absl::cord_rep_test_util absl::core_headers - absl::function_ref - absl::raw_logging_internal absl::strings GTest::gmock_main ) @@ -998,6 +1042,20 @@ absl_cc_test( GTest::gmock_main ) +absl_cc_test( + NAME + cord_rep_crc_test + SRCS + "internal/cord_rep_crc_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::cord_internal + absl::cord_rep_test_util + GTest::gmock_main +) + absl_cc_test( NAME cord_ring_test diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii.cc index 93bb03e958..868df2d102 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii.cc @@ -157,13 +157,13 @@ ABSL_DLL const char kToUpper[256] = { void AsciiStrToLower(std::string* s) { for (auto& ch : *s) { - ch = absl::ascii_tolower(ch); + ch = absl::ascii_tolower(static_cast(ch)); } } void AsciiStrToUpper(std::string* s) { for (auto& ch : *s) { - ch = absl::ascii_toupper(ch); + ch = absl::ascii_toupper(static_cast(ch)); } } @@ -183,17 +183,17 @@ void RemoveExtraAsciiWhitespace(std::string* str) { for (; input_it < input_end; ++input_it) { if (is_ws) { // Consecutive whitespace? Keep only the last. - is_ws = absl::ascii_isspace(*input_it); + is_ws = absl::ascii_isspace(static_cast(*input_it)); if (is_ws) --output_it; } else { - is_ws = absl::ascii_isspace(*input_it); + is_ws = absl::ascii_isspace(static_cast(*input_it)); } *output_it = *input_it; ++output_it; } - str->erase(output_it - &(*str)[0]); + str->erase(static_cast(output_it - &(*str)[0])); } ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii.h index b46bc71f35..42eadaea6c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii.h @@ -133,7 +133,7 @@ inline bool ascii_isdigit(unsigned char c) { return c >= '0' && c <= '9'; } // ascii_isprint() // -// Determines whether the given character is printable, including whitespace. +// Determines whether the given character is printable, including spaces. inline bool ascii_isprint(unsigned char c) { return c >= 32 && c < 127; } // ascii_isgraph() @@ -197,7 +197,7 @@ ABSL_MUST_USE_RESULT inline std::string AsciiStrToUpper(absl::string_view s) { ABSL_MUST_USE_RESULT inline absl::string_view StripLeadingAsciiWhitespace( absl::string_view str) { auto it = std::find_if_not(str.begin(), str.end(), absl::ascii_isspace); - return str.substr(it - str.begin()); + return str.substr(static_cast(it - str.begin())); } // Strips in place whitespace from the beginning of the given string. @@ -211,13 +211,13 @@ inline void StripLeadingAsciiWhitespace(std::string* str) { ABSL_MUST_USE_RESULT inline absl::string_view StripTrailingAsciiWhitespace( absl::string_view str) { auto it = std::find_if_not(str.rbegin(), str.rend(), absl::ascii_isspace); - return str.substr(0, str.rend() - it); + return str.substr(0, static_cast(str.rend() - it)); } // Strips in place whitespace from the end of the given string inline void StripTrailingAsciiWhitespace(std::string* str) { auto it = std::find_if_not(str->rbegin(), str->rend(), absl::ascii_isspace); - str->erase(str->rend() - it); + str->erase(static_cast(str->rend() - it)); } // Returns absl::string_view with whitespace stripped from both ends of the diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii_test.cc index 83af7825e1..dfed114c21 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/ascii_test.cc @@ -27,103 +27,99 @@ namespace { TEST(AsciiIsFoo, All) { for (int i = 0; i < 256; i++) { - if ((i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z')) - EXPECT_TRUE(absl::ascii_isalpha(i)) << ": failed on " << i; + const auto c = static_cast(i); + if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) + EXPECT_TRUE(absl::ascii_isalpha(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_isalpha(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_isalpha(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { - if ((i >= '0' && i <= '9')) - EXPECT_TRUE(absl::ascii_isdigit(i)) << ": failed on " << i; + const auto c = static_cast(i); + if ((c >= '0' && c <= '9')) + EXPECT_TRUE(absl::ascii_isdigit(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_isdigit(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_isdigit(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { - if (absl::ascii_isalpha(i) || absl::ascii_isdigit(i)) - EXPECT_TRUE(absl::ascii_isalnum(i)) << ": failed on " << i; + const auto c = static_cast(i); + if (absl::ascii_isalpha(c) || absl::ascii_isdigit(c)) + EXPECT_TRUE(absl::ascii_isalnum(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_isalnum(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_isalnum(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { + const auto c = static_cast(i); if (i != '\0' && strchr(" \r\n\t\v\f", i)) - EXPECT_TRUE(absl::ascii_isspace(i)) << ": failed on " << i; + EXPECT_TRUE(absl::ascii_isspace(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_isspace(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_isspace(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { + const auto c = static_cast(i); if (i >= 32 && i < 127) - EXPECT_TRUE(absl::ascii_isprint(i)) << ": failed on " << i; + EXPECT_TRUE(absl::ascii_isprint(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_isprint(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_isprint(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { - if (absl::ascii_isprint(i) && !absl::ascii_isspace(i) && - !absl::ascii_isalnum(i)) - EXPECT_TRUE(absl::ascii_ispunct(i)) << ": failed on " << i; - else - EXPECT_TRUE(!absl::ascii_ispunct(i)) << ": failed on " << i; + const auto c = static_cast(i); + if (absl::ascii_isprint(c) && !absl::ascii_isspace(c) && + !absl::ascii_isalnum(c)) { + EXPECT_TRUE(absl::ascii_ispunct(c)) << ": failed on " << c; + } else { + EXPECT_TRUE(!absl::ascii_ispunct(c)) << ": failed on " << c; + } } for (int i = 0; i < 256; i++) { + const auto c = static_cast(i); if (i == ' ' || i == '\t') - EXPECT_TRUE(absl::ascii_isblank(i)) << ": failed on " << i; + EXPECT_TRUE(absl::ascii_isblank(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_isblank(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_isblank(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { + const auto c = static_cast(i); if (i < 32 || i == 127) - EXPECT_TRUE(absl::ascii_iscntrl(i)) << ": failed on " << i; + EXPECT_TRUE(absl::ascii_iscntrl(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_iscntrl(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_iscntrl(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { - if (absl::ascii_isdigit(i) || (i >= 'A' && i <= 'F') || - (i >= 'a' && i <= 'f')) - EXPECT_TRUE(absl::ascii_isxdigit(i)) << ": failed on " << i; - else - EXPECT_TRUE(!absl::ascii_isxdigit(i)) << ": failed on " << i; + const auto c = static_cast(i); + if (absl::ascii_isdigit(c) || (i >= 'A' && i <= 'F') || + (i >= 'a' && i <= 'f')) { + EXPECT_TRUE(absl::ascii_isxdigit(c)) << ": failed on " << c; + } else { + EXPECT_TRUE(!absl::ascii_isxdigit(c)) << ": failed on " << c; + } } for (int i = 0; i < 256; i++) { + const auto c = static_cast(i); if (i > 32 && i < 127) - EXPECT_TRUE(absl::ascii_isgraph(i)) << ": failed on " << i; + EXPECT_TRUE(absl::ascii_isgraph(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_isgraph(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_isgraph(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { + const auto c = static_cast(i); if (i >= 'A' && i <= 'Z') - EXPECT_TRUE(absl::ascii_isupper(i)) << ": failed on " << i; + EXPECT_TRUE(absl::ascii_isupper(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_isupper(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_isupper(c)) << ": failed on " << c; } for (int i = 0; i < 256; i++) { + const auto c = static_cast(i); if (i >= 'a' && i <= 'z') - EXPECT_TRUE(absl::ascii_islower(i)) << ": failed on " << i; + EXPECT_TRUE(absl::ascii_islower(c)) << ": failed on " << c; else - EXPECT_TRUE(!absl::ascii_islower(i)) << ": failed on " << i; + EXPECT_TRUE(!absl::ascii_islower(c)) << ": failed on " << c; } - for (int i = 0; i < 128; i++) { - EXPECT_TRUE(absl::ascii_isascii(i)) << ": failed on " << i; + for (unsigned char c = 0; c < 128; c++) { + EXPECT_TRUE(absl::ascii_isascii(c)) << ": failed on " << c; } for (int i = 128; i < 256; i++) { - EXPECT_TRUE(!absl::ascii_isascii(i)) << ": failed on " << i; - } - - // The official is* functions don't accept negative signed chars, but - // our absl::ascii_is* functions do. - for (int i = 0; i < 256; i++) { - signed char sc = static_cast(static_cast(i)); - EXPECT_EQ(absl::ascii_isalpha(i), absl::ascii_isalpha(sc)) << i; - EXPECT_EQ(absl::ascii_isdigit(i), absl::ascii_isdigit(sc)) << i; - EXPECT_EQ(absl::ascii_isalnum(i), absl::ascii_isalnum(sc)) << i; - EXPECT_EQ(absl::ascii_isspace(i), absl::ascii_isspace(sc)) << i; - EXPECT_EQ(absl::ascii_ispunct(i), absl::ascii_ispunct(sc)) << i; - EXPECT_EQ(absl::ascii_isblank(i), absl::ascii_isblank(sc)) << i; - EXPECT_EQ(absl::ascii_iscntrl(i), absl::ascii_iscntrl(sc)) << i; - EXPECT_EQ(absl::ascii_isxdigit(i), absl::ascii_isxdigit(sc)) << i; - EXPECT_EQ(absl::ascii_isprint(i), absl::ascii_isprint(sc)) << i; - EXPECT_EQ(absl::ascii_isgraph(i), absl::ascii_isgraph(sc)) << i; - EXPECT_EQ(absl::ascii_isupper(i), absl::ascii_isupper(sc)) << i; - EXPECT_EQ(absl::ascii_islower(i), absl::ascii_islower(sc)) << i; - EXPECT_EQ(absl::ascii_isascii(i), absl::ascii_isascii(sc)) << i; + const auto c = static_cast(i); + EXPECT_TRUE(!absl::ascii_isascii(c)) << ": failed on " << c; } } @@ -137,19 +133,20 @@ TEST(AsciiIsFoo, SameAsIsFoo) { #endif for (int i = 0; i < 256; i++) { - EXPECT_EQ(isalpha(i) != 0, absl::ascii_isalpha(i)) << i; - EXPECT_EQ(isdigit(i) != 0, absl::ascii_isdigit(i)) << i; - EXPECT_EQ(isalnum(i) != 0, absl::ascii_isalnum(i)) << i; - EXPECT_EQ(isspace(i) != 0, absl::ascii_isspace(i)) << i; - EXPECT_EQ(ispunct(i) != 0, absl::ascii_ispunct(i)) << i; - EXPECT_EQ(isblank(i) != 0, absl::ascii_isblank(i)) << i; - EXPECT_EQ(iscntrl(i) != 0, absl::ascii_iscntrl(i)) << i; - EXPECT_EQ(isxdigit(i) != 0, absl::ascii_isxdigit(i)) << i; - EXPECT_EQ(isprint(i) != 0, absl::ascii_isprint(i)) << i; - EXPECT_EQ(isgraph(i) != 0, absl::ascii_isgraph(i)) << i; - EXPECT_EQ(isupper(i) != 0, absl::ascii_isupper(i)) << i; - EXPECT_EQ(islower(i) != 0, absl::ascii_islower(i)) << i; - EXPECT_EQ(isascii(i) != 0, absl::ascii_isascii(i)) << i; + const auto c = static_cast(i); + EXPECT_EQ(isalpha(c) != 0, absl::ascii_isalpha(c)) << c; + EXPECT_EQ(isdigit(c) != 0, absl::ascii_isdigit(c)) << c; + EXPECT_EQ(isalnum(c) != 0, absl::ascii_isalnum(c)) << c; + EXPECT_EQ(isspace(c) != 0, absl::ascii_isspace(c)) << c; + EXPECT_EQ(ispunct(c) != 0, absl::ascii_ispunct(c)) << c; + EXPECT_EQ(isblank(c) != 0, absl::ascii_isblank(c)) << c; + EXPECT_EQ(iscntrl(c) != 0, absl::ascii_iscntrl(c)) << c; + EXPECT_EQ(isxdigit(c) != 0, absl::ascii_isxdigit(c)) << c; + EXPECT_EQ(isprint(c) != 0, absl::ascii_isprint(c)) << c; + EXPECT_EQ(isgraph(c) != 0, absl::ascii_isgraph(c)) << c; + EXPECT_EQ(isupper(c) != 0, absl::ascii_isupper(c)) << c; + EXPECT_EQ(islower(c) != 0, absl::ascii_islower(c)) << c; + EXPECT_EQ(isascii(c) != 0, absl::ascii_isascii(c)) << c; } #ifndef __ANDROID__ @@ -166,25 +163,20 @@ TEST(AsciiToFoo, All) { #endif for (int i = 0; i < 256; i++) { - if (absl::ascii_islower(i)) - EXPECT_EQ(absl::ascii_toupper(i), 'A' + (i - 'a')) << i; + const auto c = static_cast(i); + if (absl::ascii_islower(c)) + EXPECT_EQ(absl::ascii_toupper(c), 'A' + (i - 'a')) << c; else - EXPECT_EQ(absl::ascii_toupper(i), static_cast(i)) << i; + EXPECT_EQ(absl::ascii_toupper(c), static_cast(i)) << c; - if (absl::ascii_isupper(i)) - EXPECT_EQ(absl::ascii_tolower(i), 'a' + (i - 'A')) << i; + if (absl::ascii_isupper(c)) + EXPECT_EQ(absl::ascii_tolower(c), 'a' + (i - 'A')) << c; else - EXPECT_EQ(absl::ascii_tolower(i), static_cast(i)) << i; + EXPECT_EQ(absl::ascii_tolower(c), static_cast(i)) << c; // These CHECKs only hold in a C locale. - EXPECT_EQ(static_cast(tolower(i)), absl::ascii_tolower(i)) << i; - EXPECT_EQ(static_cast(toupper(i)), absl::ascii_toupper(i)) << i; - - // The official to* functions don't accept negative signed chars, but - // our absl::ascii_to* functions do. - signed char sc = static_cast(static_cast(i)); - EXPECT_EQ(absl::ascii_tolower(i), absl::ascii_tolower(sc)) << i; - EXPECT_EQ(absl::ascii_toupper(i), absl::ascii_toupper(sc)) << i; + EXPECT_EQ(static_cast(tolower(i)), absl::ascii_tolower(c)) << c; + EXPECT_EQ(static_cast(toupper(i)), absl::ascii_toupper(c)) << c; } #ifndef __ANDROID__ // restore the old locale. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/atod_manual_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/atod_manual_test.cc new file mode 100644 index 0000000000..6cf28b0d61 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/atod_manual_test.cc @@ -0,0 +1,193 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This program tests the absl::SimpleAtod and absl::SimpleAtof functions. Run +// it as "atod_manual_test pnftd/data/*.txt" where the pnftd directory is a +// local checkout of the https://github.com/nigeltao/parse-number-fxx-test-data +// repository. The test suite lives in a separate repository because its more +// than 5 million test cases weigh over several hundred megabytes and because +// the test cases are also useful to other software projects, not just Abseil. +// Its data/*.txt files contain one test case per line, like: +// +// 3C00 3F800000 3FF0000000000000 1 +// 3D00 3FA00000 3FF4000000000000 1.25 +// 3D9A 3FB33333 3FF6666666666666 1.4 +// 57B7 42F6E979 405EDD2F1A9FBE77 123.456 +// 622A 44454000 4088A80000000000 789 +// 7C00 7F800000 7FF0000000000000 123.456e789 +// +// For each line (and using 0-based column indexes), columns [5..13] and +// [14..30] contain the 32-bit float and 64-bit double result of parsing +// columns [31..]. +// +// For example, parsing "1.4" as a float gives the bits 0x3FB33333. +// +// In this 6-line example, the final line's float and double values are all +// infinity. The largest finite float and double values are approximately +// 3.40e+38 and 1.80e+308. + +#include +#include +#include + +#include "absl/base/casts.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +static constexpr uint8_t kUnhex[256] = { + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, // '0' ..= '7' + 0x8, 0x9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // '8' ..= '9' + + 0x0, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 0x0, // 'A' ..= 'F' + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // +}; + +static absl::optional ReadFileToString(const char* filename) { + FILE* f = fopen(filename, "rb"); + if (!f) { + return absl::nullopt; + } + fseek(f, 0, SEEK_END); + size_t size = ftell(f); + fseek(f, 0, SEEK_SET); + std::string s(size, '\x00'); + size_t n = fread(&s[0], 1, size, f); + fclose(f); + if (n != size) { + return absl::nullopt; + } + return s; +} + +static bool ProcessOneTestFile(const char* filename) { + absl::optional contents = ReadFileToString(filename); + if (!contents) { + absl::FPrintF(stderr, "Invalid file: %s\n", filename); + return false; + } + + int num_cases = 0; + for (absl::string_view v(*contents); !v.empty();) { + size_t new_line = v.find('\n'); + if ((new_line == absl::string_view::npos) || (new_line < 32)) { + break; + } + absl::string_view input = v.substr(31, new_line - 31); + + // Test absl::SimpleAtof. + { + float f; + if (!absl::SimpleAtof(input, &f)) { + absl::FPrintF(stderr, "Could not parse \"%s\" in %s\n", input, + filename); + return false; + } + uint32_t have32 = absl::bit_cast(f); + + uint32_t want32 = 0; + for (int i = 0; i < 8; i++) { + want32 = (want32 << 4) | kUnhex[static_cast(v[5 + i])]; + } + + if (have32 != want32) { + absl::FPrintF(stderr, + "absl::SimpleAtof failed parsing \"%s\" in %s\n have " + "%08X\n want %08X\n", + input, filename, have32, want32); + return false; + } + } + + // Test absl::SimpleAtod. + { + double d; + if (!absl::SimpleAtod(input, &d)) { + absl::FPrintF(stderr, "Could not parse \"%s\" in %s\n", input, + filename); + return false; + } + uint64_t have64 = absl::bit_cast(d); + + uint64_t want64 = 0; + for (int i = 0; i < 16; i++) { + want64 = (want64 << 4) | kUnhex[static_cast(v[14 + i])]; + } + + if (have64 != want64) { + absl::FPrintF(stderr, + "absl::SimpleAtod failed parsing \"%s\" in %s\n have " + "%016X\n want %016X\n", + input, filename, have64, want64); + return false; + } + } + + num_cases++; + v = v.substr(new_line + 1); + } + printf("%8d OK in %s\n", num_cases, filename); + return true; +} + +int main(int argc, char** argv) { + if (argc < 2) { + absl::FPrintF( + stderr, + "Usage: %s pnftd/data/*.txt\nwhere the pnftd directory is a local " + "checkout of " + "the\nhttps://github.com/nigeltao/parse-number-fxx-test-data " + "repository.\n", + argv[0]); + return 1; + } + + for (int i = 1; i < argc; i++) { + if (!ProcessOneTestFile(argv[i])) { + return 1; + } + } + return 0; +} diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/charconv.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/charconv.cc index fefcfc90a5..69d420bcea 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/charconv.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/charconv.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include "absl/base/casts.h" #include "absl/numeric/bits.h" @@ -65,6 +66,14 @@ struct FloatTraits; template <> struct FloatTraits { + using mantissa_t = uint64_t; + + // The number of bits in the given float type. + static constexpr int kTargetBits = 64; + + // The number of exponent bits in the given float type. + static constexpr int kTargetExponentBits = 11; + // The number of mantissa bits in the given float type. This includes the // implied high bit. static constexpr int kTargetMantissaBits = 53; @@ -83,6 +92,31 @@ struct FloatTraits { // m * 2**kMinNormalExponent is exactly equal to DBL_MIN. static constexpr int kMinNormalExponent = -1074; + // The IEEE exponent bias. It equals ((1 << (kTargetExponentBits - 1)) - 1). + static constexpr int kExponentBias = 1023; + + // The Eisel-Lemire "Shifting to 54/25 Bits" adjustment. It equals (63 - 1 - + // kTargetMantissaBits). + static constexpr int kEiselLemireShift = 9; + + // The Eisel-Lemire high64_mask. It equals ((1 << kEiselLemireShift) - 1). + static constexpr uint64_t kEiselLemireMask = uint64_t{0x1FF}; + + // The smallest negative integer N (smallest negative means furthest from + // zero) such that parsing 9999999999999999999eN, with 19 nines, is still + // positive. Parsing a smaller (more negative) N will produce zero. + // + // Adjusting the decimal point and exponent, without adjusting the value, + // 9999999999999999999eN equals 9.999999999999999999eM where M = N + 18. + // + // 9999999999999999999, with 19 nines but no decimal point, is the largest + // "repeated nines" integer that fits in a uint64_t. + static constexpr int kEiselLemireMinInclusiveExp10 = -324 - 18; + + // The smallest positive integer N such that parsing 1eN produces infinity. + // Parsing a smaller N will produce something finite. + static constexpr int kEiselLemireMaxExclusiveExp10 = 309; + static double MakeNan(const char* tagp) { // Support nan no matter which namespace it's in. Some platforms // incorrectly don't put it in namespace std. @@ -103,7 +137,7 @@ struct FloatTraits { // a normal value is made, or it must be less narrow than that, in which case // `exponent` must be exactly kMinNormalExponent, and a subnormal value is // made. - static double Make(uint64_t mantissa, int exponent, bool sign) { + static double Make(mantissa_t mantissa, int exponent, bool sign) { #ifndef ABSL_BIT_PACK_FLOATS // Support ldexp no matter which namespace it's in. Some platforms // incorrectly don't put it in namespace std. @@ -116,8 +150,10 @@ struct FloatTraits { if (mantissa > kMantissaMask) { // Normal value. // Adjust by 1023 for the exponent representation bias, and an additional - // 52 due to the implied decimal point in the IEEE mantissa represenation. - dbl += uint64_t{exponent + 1023u + kTargetMantissaBits - 1} << 52; + // 52 due to the implied decimal point in the IEEE mantissa + // representation. + dbl += static_cast(exponent + 1023 + kTargetMantissaBits - 1) + << 52; mantissa &= kMantissaMask; } else { // subnormal value @@ -134,16 +170,27 @@ struct FloatTraits { // members and methods. template <> struct FloatTraits { + using mantissa_t = uint32_t; + + static constexpr int kTargetBits = 32; + static constexpr int kTargetExponentBits = 8; static constexpr int kTargetMantissaBits = 24; static constexpr int kMaxExponent = 104; static constexpr int kMinNormalExponent = -149; + static constexpr int kExponentBias = 127; + static constexpr int kEiselLemireShift = 38; + static constexpr uint64_t kEiselLemireMask = uint64_t{0x3FFFFFFFFF}; + static constexpr int kEiselLemireMinInclusiveExp10 = -46 - 18; + static constexpr int kEiselLemireMaxExclusiveExp10 = 39; + static float MakeNan(const char* tagp) { // Support nanf no matter which namespace it's in. Some platforms // incorrectly don't put it in namespace std. using namespace std; // NOLINT return nanf(tagp); } - static float Make(uint32_t mantissa, int exponent, bool sign) { + + static float Make(mantissa_t mantissa, int exponent, bool sign) { #ifndef ABSL_BIT_PACK_FLOATS // Support ldexpf no matter which namespace it's in. Some platforms // incorrectly don't put it in namespace std. @@ -157,7 +204,8 @@ struct FloatTraits { // Normal value. // Adjust by 127 for the exponent representation bias, and an additional // 23 due to the implied decimal point in the IEEE mantissa represenation. - flt += uint32_t{exponent + 127u + kTargetMantissaBits - 1} << 23; + flt += static_cast(exponent + 127 + kTargetMantissaBits - 1) + << 23; mantissa &= kMantissaMask; } else { // subnormal value @@ -181,39 +229,45 @@ struct FloatTraits { // // 2**63 <= Power10Mantissa(n) < 2**64. // +// See the "Table of powers of 10" comment below for a "1e60" example. +// // Lookups into the power-of-10 table must first check the Power10Overflow() and // Power10Underflow() functions, to avoid out-of-bounds table access. // -// Indexes into these tables are biased by -kPower10TableMin, and the table has -// values in the range [kPower10TableMin, kPower10TableMax]. -extern const uint64_t kPower10MantissaTable[]; -extern const int16_t kPower10ExponentTable[]; +// Indexes into these tables are biased by -kPower10TableMinInclusive. Valid +// indexes range from kPower10TableMinInclusive to kPower10TableMaxExclusive. +extern const uint64_t kPower10MantissaHighTable[]; // High 64 of 128 bits. +extern const uint64_t kPower10MantissaLowTable[]; // Low 64 of 128 bits. -// The smallest allowed value for use with the Power10Mantissa() and -// Power10Exponent() functions below. (If a smaller exponent is needed in +// The smallest (inclusive) allowed value for use with the Power10Mantissa() +// and Power10Exponent() functions below. (If a smaller exponent is needed in // calculations, the end result is guaranteed to underflow.) -constexpr int kPower10TableMin = -342; +constexpr int kPower10TableMinInclusive = -342; -// The largest allowed value for use with the Power10Mantissa() and -// Power10Exponent() functions below. (If a smaller exponent is needed in -// calculations, the end result is guaranteed to overflow.) -constexpr int kPower10TableMax = 308; +// The largest (exclusive) allowed value for use with the Power10Mantissa() and +// Power10Exponent() functions below. (If a larger-or-equal exponent is needed +// in calculations, the end result is guaranteed to overflow.) +constexpr int kPower10TableMaxExclusive = 309; uint64_t Power10Mantissa(int n) { - return kPower10MantissaTable[n - kPower10TableMin]; + return kPower10MantissaHighTable[n - kPower10TableMinInclusive]; } int Power10Exponent(int n) { - return kPower10ExponentTable[n - kPower10TableMin]; + // The 217706 etc magic numbers encode the results as a formula instead of a + // table. Their equivalence (over the kPower10TableMinInclusive .. + // kPower10TableMaxExclusive range) is confirmed by + // https://github.com/google/wuffs/blob/315b2e52625ebd7b02d8fac13e3cd85ea374fb80/script/print-mpb-powers-of-10.go + return (217706 * n >> 16) - 63; } // Returns true if n is large enough that 10**n always results in an IEEE // overflow. -bool Power10Overflow(int n) { return n > kPower10TableMax; } +bool Power10Overflow(int n) { return n >= kPower10TableMaxExclusive; } // Returns true if n is small enough that 10**n times a ParsedFloat mantissa // always results in an IEEE underflow. -bool Power10Underflow(int n) { return n < kPower10TableMin; } +bool Power10Underflow(int n) { return n < kPower10TableMinInclusive; } // Returns true if Power10Mantissa(n) * 2**Power10Exponent(n) is exactly equal // to 10**n numerically. Put another way, this returns true if there is no @@ -242,9 +296,11 @@ struct CalculatedFloat { // Returns the bit width of the given uint128. (Equivalently, returns 128 // minus the number of leading zero bits.) -unsigned BitWidth(uint128 value) { +int BitWidth(uint128 value) { if (Uint128High64(value) == 0) { - return static_cast(bit_width(Uint128Low64(value))); + // This static_cast is only needed when using a std::bit_width() + // implementation that does not have the fix for LWG 3656 applied. + return static_cast(bit_width(Uint128Low64(value))); } return 128 - countl_zero(Uint128High64(value)); } @@ -285,14 +341,19 @@ template bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative, FloatType* value) { if (input.type == strings_internal::FloatType::kNan) { - // A bug in both clang and gcc would cause the compiler to optimize away the - // buffer we are building below. Declaring the buffer volatile avoids the - // issue, and has no measurable performance impact in microbenchmarks. + // A bug in both clang < 7 and gcc would cause the compiler to optimize + // away the buffer we are building below. Declaring the buffer volatile + // avoids the issue, and has no measurable performance impact in + // microbenchmarks. // // https://bugs.llvm.org/show_bug.cgi?id=37778 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86113 constexpr ptrdiff_t kNanBufferSize = 128; +#if defined(__GNUC__) || (defined(__clang__) && __clang_major__ < 7) volatile char n_char_sequence[kNanBufferSize]; +#else + char n_char_sequence[kNanBufferSize]; +#endif if (input.subrange_begin == nullptr) { n_char_sequence[0] = '\0'; } else { @@ -337,8 +398,10 @@ void EncodeResult(const CalculatedFloat& calculated, bool negative, *value = negative ? -0.0 : 0.0; return; } - *value = FloatTraits::Make(calculated.mantissa, - calculated.exponent, negative); + *value = FloatTraits::Make( + static_cast::mantissa_t>( + calculated.mantissa), + calculated.exponent, negative); } // Returns the given uint128 shifted to the right by `shift` bits, and rounds @@ -519,7 +582,9 @@ CalculatedFloat CalculateFromParsedHexadecimal( const strings_internal::ParsedFloat& parsed_hex) { uint64_t mantissa = parsed_hex.mantissa; int exponent = parsed_hex.exponent; - auto mantissa_width = static_cast(bit_width(mantissa)); + // This static_cast is only needed when using a std::bit_width() + // implementation that does not have the fix for LWG 3656 applied. + int mantissa_width = static_cast(bit_width(mantissa)); const int shift = NormalizedShiftSize(mantissa_width, exponent); bool result_exact; exponent += shift; @@ -595,6 +660,185 @@ CalculatedFloat CalculateFromParsedDecimal( binary_exponent); } +// As discussed in https://nigeltao.github.io/blog/2020/eisel-lemire.html the +// primary goal of the Eisel-Lemire algorithm is speed, for 99+% of the cases, +// not 100% coverage. As long as Eisel-Lemire doesn’t claim false positives, +// the combined approach (falling back to an alternative implementation when +// this function returns false) is both fast and correct. +template +bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative, + FloatType* value, std::errc* ec) { + uint64_t man = input.mantissa; + int exp10 = input.exponent; + if (exp10 < FloatTraits::kEiselLemireMinInclusiveExp10) { + *value = negative ? -0.0 : 0.0; + *ec = std::errc::result_out_of_range; + return true; + } else if (exp10 >= FloatTraits::kEiselLemireMaxExclusiveExp10) { + // Return max (a finite value) consistent with from_chars and DR 3081. For + // SimpleAtod and SimpleAtof, post-processing will return infinity. + *value = negative ? -std::numeric_limits::max() + : std::numeric_limits::max(); + *ec = std::errc::result_out_of_range; + return true; + } + + // Assert kPower10TableMinInclusive <= exp10 < kPower10TableMaxExclusive. + // Equivalently, !Power10Underflow(exp10) and !Power10Overflow(exp10). + static_assert( + FloatTraits::kEiselLemireMinInclusiveExp10 >= + kPower10TableMinInclusive, + "(exp10-kPower10TableMinInclusive) in kPower10MantissaHighTable bounds"); + static_assert( + FloatTraits::kEiselLemireMaxExclusiveExp10 <= + kPower10TableMaxExclusive, + "(exp10-kPower10TableMinInclusive) in kPower10MantissaHighTable bounds"); + + // The terse (+) comments in this function body refer to sections of the + // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post. + // + // That blog post discusses double precision (11 exponent bits with a -1023 + // bias, 52 mantissa bits), but the same approach applies to single precision + // (8 exponent bits with a -127 bias, 23 mantissa bits). Either way, the + // computation here happens with 64-bit values (e.g. man) or 128-bit values + // (e.g. x) before finally converting to 64- or 32-bit floating point. + // + // See also "Number Parsing at a Gigabyte per Second, Software: Practice and + // Experience 51 (8), 2021" (https://arxiv.org/abs/2101.11408) for detail. + + // (+) Normalization. + int clz = countl_zero(man); + man <<= static_cast(clz); + // The 217706 etc magic numbers are from the Power10Exponent function. + uint64_t ret_exp2 = + static_cast((217706 * exp10 >> 16) + 64 + + FloatTraits::kExponentBias - clz); + + // (+) Multiplication. + uint128 x = static_cast(man) * + static_cast( + kPower10MantissaHighTable[exp10 - kPower10TableMinInclusive]); + + // (+) Wider Approximation. + static constexpr uint64_t high64_mask = + FloatTraits::kEiselLemireMask; + if (((Uint128High64(x) & high64_mask) == high64_mask) && + (man > (std::numeric_limits::max() - Uint128Low64(x)))) { + uint128 y = + static_cast(man) * + static_cast( + kPower10MantissaLowTable[exp10 - kPower10TableMinInclusive]); + x += Uint128High64(y); + // For example, parsing "4503599627370497.5" will take the if-true + // branch here (for double precision), since: + // - x = 0x8000000000000BFF_FFFFFFFFFFFFFFFF + // - y = 0x8000000000000BFF_7FFFFFFFFFFFF400 + // - man = 0xA000000000000F00 + // Likewise, when parsing "0.0625" for single precision: + // - x = 0x7FFFFFFFFFFFFFFF_FFFFFFFFFFFFFFFF + // - y = 0x813FFFFFFFFFFFFF_8A00000000000000 + // - man = 0x9C40000000000000 + if (((Uint128High64(x) & high64_mask) == high64_mask) && + ((Uint128Low64(x) + 1) == 0) && + (man > (std::numeric_limits::max() - Uint128Low64(y)))) { + return false; + } + } + + // (+) Shifting to 54 Bits (or for single precision, to 25 bits). + uint64_t msb = Uint128High64(x) >> 63; + uint64_t ret_man = + Uint128High64(x) >> (msb + FloatTraits::kEiselLemireShift); + ret_exp2 -= 1 ^ msb; + + // (+) Half-way Ambiguity. + // + // For example, parsing "1e+23" will take the if-true branch here (for double + // precision), since: + // - x = 0x54B40B1F852BDA00_0000000000000000 + // - ret_man = 0x002A5A058FC295ED + // Likewise, when parsing "20040229.0" for single precision: + // - x = 0x4C72894000000000_0000000000000000 + // - ret_man = 0x000000000131CA25 + if ((Uint128Low64(x) == 0) && ((Uint128High64(x) & high64_mask) == 0) && + ((ret_man & 3) == 1)) { + return false; + } + + // (+) From 54 to 53 Bits (or for single precision, from 25 to 24 bits). + ret_man += ret_man & 1; // Line From54a. + ret_man >>= 1; // Line From54b. + // Incrementing ret_man (at line From54a) may have overflowed 54 bits (53 + // bits after the right shift by 1 at line From54b), so adjust for that. + // + // For example, parsing "9223372036854775807" will take the if-true branch + // here (for double precision), since: + // - ret_man = 0x0020000000000000 = (1 << 53) + // Likewise, when parsing "2147483647.0" for single precision: + // - ret_man = 0x0000000001000000 = (1 << 24) + if ((ret_man >> FloatTraits::kTargetMantissaBits) > 0) { + ret_exp2 += 1; + // Conceptually, we need a "ret_man >>= 1" in this if-block to balance + // incrementing ret_exp2 in the line immediately above. However, we only + // get here when line From54a overflowed (after adding a 1), so ret_man + // here is (1 << 53). Its low 53 bits are therefore all zeroes. The only + // remaining use of ret_man is to mask it with ((1 << 52) - 1), so only its + // low 52 bits matter. A "ret_man >>= 1" would have no effect in practice. + // + // We omit the "ret_man >>= 1", even if it is cheap (and this if-branch is + // rarely taken) and technically 'more correct', so that mutation tests + // that would otherwise modify or omit that "ret_man >>= 1" don't complain + // that such code mutations have no observable effect. + } + + // ret_exp2 is a uint64_t. Zero or underflow means that we're in subnormal + // space. max_exp2 (0x7FF for double precision, 0xFF for single precision) or + // above means that we're in Inf/NaN space. + // + // The if block is equivalent to (but has fewer branches than): + // if ((ret_exp2 <= 0) || (ret_exp2 >= max_exp2)) { etc } + // + // For example, parsing "4.9406564584124654e-324" will take the if-true + // branch here, since ret_exp2 = -51. + static constexpr uint64_t max_exp2 = + (1 << FloatTraits::kTargetExponentBits) - 1; + if ((ret_exp2 - 1) >= (max_exp2 - 1)) { + return false; + } + +#ifndef ABSL_BIT_PACK_FLOATS + if (FloatTraits::kTargetBits == 64) { + *value = FloatTraits::Make( + (ret_man & 0x000FFFFFFFFFFFFFu) | 0x0010000000000000u, + static_cast(ret_exp2) - 1023 - 52, negative); + return true; + } else if (FloatTraits::kTargetBits == 32) { + *value = FloatTraits::Make( + (static_cast(ret_man) & 0x007FFFFFu) | 0x00800000u, + static_cast(ret_exp2) - 127 - 23, negative); + return true; + } +#else + if (FloatTraits::kTargetBits == 64) { + uint64_t ret_bits = (ret_exp2 << 52) | (ret_man & 0x000FFFFFFFFFFFFFu); + if (negative) { + ret_bits |= 0x8000000000000000u; + } + *value = absl::bit_cast(ret_bits); + return true; + } else if (FloatTraits::kTargetBits == 32) { + uint32_t ret_bits = (static_cast(ret_exp2) << 23) | + (static_cast(ret_man) & 0x007FFFFFu); + if (negative) { + ret_bits |= 0x80000000u; + } + *value = absl::bit_cast(ret_bits); + return true; + } +#endif // ABSL_BIT_PACK_FLOATS + return false; +} + template from_chars_result FromCharsImpl(const char* first, const char* last, FloatType& value, chars_format fmt_flags) { @@ -668,6 +912,12 @@ from_chars_result FromCharsImpl(const char* first, const char* last, if (HandleEdgeCase(decimal_parse, negative, &value)) { return result; } + // A nullptr subrange_begin means that the decimal_parse.mantissa is exact + // (not truncated), a precondition of the Eisel-Lemire algorithm. + if ((decimal_parse.subrange_begin == nullptr) && + EiselLemire(decimal_parse, negative, &value, &result.ec)) { + return result; + } CalculatedFloat calculated = CalculateFromParsedDecimal(decimal_parse); EncodeResult(calculated, negative, &result, &value); @@ -688,15 +938,46 @@ from_chars_result from_chars(const char* first, const char* last, float& value, namespace { -// Table of powers of 10, from kPower10TableMin to kPower10TableMax. +// Table of powers of 10, from kPower10TableMinInclusive to +// kPower10TableMaxExclusive. // -// kPower10MantissaTable[i - kPower10TableMin] stores the 64-bit mantissa (high -// bit always on), and kPower10ExponentTable[i - kPower10TableMin] stores the -// power-of-two exponent. For a given number i, this gives the unique mantissa -// and exponent such that mantissa * 2**exponent <= 10**i < (mantissa + 1) * -// 2**exponent. +// kPower10MantissaHighTable[i - kPower10TableMinInclusive] stores the 64-bit +// mantissa. The high bit is always on. +// +// kPower10MantissaLowTable extends that 64-bit mantissa to 128 bits. +// +// Power10Exponent(i) calculates the power-of-two exponent. +// +// For a number i, this gives the unique mantissaHigh and exponent such that +// (mantissaHigh * 2**exponent) <= 10**i < ((mantissaHigh + 1) * 2**exponent). +// +// For example, Python can confirm that the exact hexadecimal value of 1e60 is: +// >>> a = 1000000000000000000000000000000000000000000000000000000000000 +// >>> hex(a) +// '0x9f4f2726179a224501d762422c946590d91000000000000000' +// Adding underscores at every 8th hex digit shows 50 hex digits: +// '0x9f4f2726_179a2245_01d76242_2c946590_d9100000_00000000_00'. +// In this case, the high bit of the first hex digit, 9, is coincidentally set, +// so we do not have to do further shifting to deduce the 128-bit mantissa: +// - kPower10MantissaHighTable[60 - kP10TMI] = 0x9f4f2726179a2245U +// - kPower10MantissaLowTable[ 60 - kP10TMI] = 0x01d762422c946590U +// where kP10TMI is kPower10TableMinInclusive. The low 18 of those 50 hex +// digits are truncated. +// +// 50 hex digits (with the high bit set) is 200 bits and mantissaHigh holds 64 +// bits, so Power10Exponent(60) = 200 - 64 = 136. Again, Python can confirm: +// >>> b = 0x9f4f2726179a2245 +// >>> ((b+0)<<136) <= a +// True +// >>> ((b+1)<<136) <= a +// False +// +// The tables were generated by +// https://github.com/google/wuffs/blob/315b2e52625ebd7b02d8fac13e3cd85ea374fb80/script/print-mpb-powers-of-10.go +// after re-formatting its output into two arrays of N uint64_t values (instead +// of an N element array of uint64_t pairs). -const uint64_t kPower10MantissaTable[] = { +const uint64_t kPower10MantissaHighTable[] = { 0xeef453d6923bd65aU, 0x9558b4661b6565f8U, 0xbaaee17fa23ebf76U, 0xe95a99df8ace6f53U, 0x91d8a02bb6c10594U, 0xb64ec836a47146f9U, 0xe3e27a444d8d98b7U, 0x8e6d8c6ab0787f72U, 0xb208ef855c969f4fU, @@ -916,67 +1197,224 @@ const uint64_t kPower10MantissaTable[] = { 0xb6472e511c81471dU, 0xe3d8f9e563a198e5U, 0x8e679c2f5e44ff8fU, }; -const int16_t kPower10ExponentTable[] = { - -1200, -1196, -1193, -1190, -1186, -1183, -1180, -1176, -1173, -1170, -1166, - -1163, -1160, -1156, -1153, -1150, -1146, -1143, -1140, -1136, -1133, -1130, - -1127, -1123, -1120, -1117, -1113, -1110, -1107, -1103, -1100, -1097, -1093, - -1090, -1087, -1083, -1080, -1077, -1073, -1070, -1067, -1063, -1060, -1057, - -1053, -1050, -1047, -1043, -1040, -1037, -1034, -1030, -1027, -1024, -1020, - -1017, -1014, -1010, -1007, -1004, -1000, -997, -994, -990, -987, -984, - -980, -977, -974, -970, -967, -964, -960, -957, -954, -950, -947, - -944, -940, -937, -934, -931, -927, -924, -921, -917, -914, -911, - -907, -904, -901, -897, -894, -891, -887, -884, -881, -877, -874, - -871, -867, -864, -861, -857, -854, -851, -847, -844, -841, -838, - -834, -831, -828, -824, -821, -818, -814, -811, -808, -804, -801, - -798, -794, -791, -788, -784, -781, -778, -774, -771, -768, -764, - -761, -758, -754, -751, -748, -744, -741, -738, -735, -731, -728, - -725, -721, -718, -715, -711, -708, -705, -701, -698, -695, -691, - -688, -685, -681, -678, -675, -671, -668, -665, -661, -658, -655, - -651, -648, -645, -642, -638, -635, -632, -628, -625, -622, -618, - -615, -612, -608, -605, -602, -598, -595, -592, -588, -585, -582, - -578, -575, -572, -568, -565, -562, -558, -555, -552, -549, -545, - -542, -539, -535, -532, -529, -525, -522, -519, -515, -512, -509, - -505, -502, -499, -495, -492, -489, -485, -482, -479, -475, -472, - -469, -465, -462, -459, -455, -452, -449, -446, -442, -439, -436, - -432, -429, -426, -422, -419, -416, -412, -409, -406, -402, -399, - -396, -392, -389, -386, -382, -379, -376, -372, -369, -366, -362, - -359, -356, -353, -349, -346, -343, -339, -336, -333, -329, -326, - -323, -319, -316, -313, -309, -306, -303, -299, -296, -293, -289, - -286, -283, -279, -276, -273, -269, -266, -263, -259, -256, -253, - -250, -246, -243, -240, -236, -233, -230, -226, -223, -220, -216, - -213, -210, -206, -203, -200, -196, -193, -190, -186, -183, -180, - -176, -173, -170, -166, -163, -160, -157, -153, -150, -147, -143, - -140, -137, -133, -130, -127, -123, -120, -117, -113, -110, -107, - -103, -100, -97, -93, -90, -87, -83, -80, -77, -73, -70, - -67, -63, -60, -57, -54, -50, -47, -44, -40, -37, -34, - -30, -27, -24, -20, -17, -14, -10, -7, -4, 0, 3, - 6, 10, 13, 16, 20, 23, 26, 30, 33, 36, 39, - 43, 46, 49, 53, 56, 59, 63, 66, 69, 73, 76, - 79, 83, 86, 89, 93, 96, 99, 103, 106, 109, 113, - 116, 119, 123, 126, 129, 132, 136, 139, 142, 146, 149, - 152, 156, 159, 162, 166, 169, 172, 176, 179, 182, 186, - 189, 192, 196, 199, 202, 206, 209, 212, 216, 219, 222, - 226, 229, 232, 235, 239, 242, 245, 249, 252, 255, 259, - 262, 265, 269, 272, 275, 279, 282, 285, 289, 292, 295, - 299, 302, 305, 309, 312, 315, 319, 322, 325, 328, 332, - 335, 338, 342, 345, 348, 352, 355, 358, 362, 365, 368, - 372, 375, 378, 382, 385, 388, 392, 395, 398, 402, 405, - 408, 412, 415, 418, 422, 425, 428, 431, 435, 438, 441, - 445, 448, 451, 455, 458, 461, 465, 468, 471, 475, 478, - 481, 485, 488, 491, 495, 498, 501, 505, 508, 511, 515, - 518, 521, 524, 528, 531, 534, 538, 541, 544, 548, 551, - 554, 558, 561, 564, 568, 571, 574, 578, 581, 584, 588, - 591, 594, 598, 601, 604, 608, 611, 614, 617, 621, 624, - 627, 631, 634, 637, 641, 644, 647, 651, 654, 657, 661, - 664, 667, 671, 674, 677, 681, 684, 687, 691, 694, 697, - 701, 704, 707, 711, 714, 717, 720, 724, 727, 730, 734, - 737, 740, 744, 747, 750, 754, 757, 760, 764, 767, 770, - 774, 777, 780, 784, 787, 790, 794, 797, 800, 804, 807, - 810, 813, 817, 820, 823, 827, 830, 833, 837, 840, 843, - 847, 850, 853, 857, 860, 863, 867, 870, 873, 877, 880, - 883, 887, 890, 893, 897, 900, 903, 907, 910, 913, 916, - 920, 923, 926, 930, 933, 936, 940, 943, 946, 950, 953, - 956, 960, +const uint64_t kPower10MantissaLowTable[] = { + 0x113faa2906a13b3fU, 0x4ac7ca59a424c507U, 0x5d79bcf00d2df649U, + 0xf4d82c2c107973dcU, 0x79071b9b8a4be869U, 0x9748e2826cdee284U, + 0xfd1b1b2308169b25U, 0xfe30f0f5e50e20f7U, 0xbdbd2d335e51a935U, + 0xad2c788035e61382U, 0x4c3bcb5021afcc31U, 0xdf4abe242a1bbf3dU, + 0xd71d6dad34a2af0dU, 0x8672648c40e5ad68U, 0x680efdaf511f18c2U, + 0x0212bd1b2566def2U, 0x014bb630f7604b57U, 0x419ea3bd35385e2dU, + 0x52064cac828675b9U, 0x7343efebd1940993U, 0x1014ebe6c5f90bf8U, + 0xd41a26e077774ef6U, 0x8920b098955522b4U, 0x55b46e5f5d5535b0U, + 0xeb2189f734aa831dU, 0xa5e9ec7501d523e4U, 0x47b233c92125366eU, + 0x999ec0bb696e840aU, 0xc00670ea43ca250dU, 0x380406926a5e5728U, + 0xc605083704f5ecf2U, 0xf7864a44c633682eU, 0x7ab3ee6afbe0211dU, + 0x5960ea05bad82964U, 0x6fb92487298e33bdU, 0xa5d3b6d479f8e056U, + 0x8f48a4899877186cU, 0x331acdabfe94de87U, 0x9ff0c08b7f1d0b14U, + 0x07ecf0ae5ee44dd9U, 0xc9e82cd9f69d6150U, 0xbe311c083a225cd2U, + 0x6dbd630a48aaf406U, 0x092cbbccdad5b108U, 0x25bbf56008c58ea5U, + 0xaf2af2b80af6f24eU, 0x1af5af660db4aee1U, 0x50d98d9fc890ed4dU, + 0xe50ff107bab528a0U, 0x1e53ed49a96272c8U, 0x25e8e89c13bb0f7aU, + 0x77b191618c54e9acU, 0xd59df5b9ef6a2417U, 0x4b0573286b44ad1dU, + 0x4ee367f9430aec32U, 0x229c41f793cda73fU, 0x6b43527578c1110fU, + 0x830a13896b78aaa9U, 0x23cc986bc656d553U, 0x2cbfbe86b7ec8aa8U, + 0x7bf7d71432f3d6a9U, 0xdaf5ccd93fb0cc53U, 0xd1b3400f8f9cff68U, + 0x23100809b9c21fa1U, 0xabd40a0c2832a78aU, 0x16c90c8f323f516cU, + 0xae3da7d97f6792e3U, 0x99cd11cfdf41779cU, 0x40405643d711d583U, + 0x482835ea666b2572U, 0xda3243650005eecfU, 0x90bed43e40076a82U, + 0x5a7744a6e804a291U, 0x711515d0a205cb36U, 0x0d5a5b44ca873e03U, + 0xe858790afe9486c2U, 0x626e974dbe39a872U, 0xfb0a3d212dc8128fU, + 0x7ce66634bc9d0b99U, 0x1c1fffc1ebc44e80U, 0xa327ffb266b56220U, + 0x4bf1ff9f0062baa8U, 0x6f773fc3603db4a9U, 0xcb550fb4384d21d3U, + 0x7e2a53a146606a48U, 0x2eda7444cbfc426dU, 0xfa911155fefb5308U, + 0x793555ab7eba27caU, 0x4bc1558b2f3458deU, 0x9eb1aaedfb016f16U, + 0x465e15a979c1cadcU, 0x0bfacd89ec191ec9U, 0xcef980ec671f667bU, + 0x82b7e12780e7401aU, 0xd1b2ecb8b0908810U, 0x861fa7e6dcb4aa15U, + 0x67a791e093e1d49aU, 0xe0c8bb2c5c6d24e0U, 0x58fae9f773886e18U, + 0xaf39a475506a899eU, 0x6d8406c952429603U, 0xc8e5087ba6d33b83U, + 0xfb1e4a9a90880a64U, 0x5cf2eea09a55067fU, 0xf42faa48c0ea481eU, + 0xf13b94daf124da26U, 0x76c53d08d6b70858U, 0x54768c4b0c64ca6eU, + 0xa9942f5dcf7dfd09U, 0xd3f93b35435d7c4cU, 0xc47bc5014a1a6dafU, + 0x359ab6419ca1091bU, 0xc30163d203c94b62U, 0x79e0de63425dcf1dU, + 0x985915fc12f542e4U, 0x3e6f5b7b17b2939dU, 0xa705992ceecf9c42U, + 0x50c6ff782a838353U, 0xa4f8bf5635246428U, 0x871b7795e136be99U, + 0x28e2557b59846e3fU, 0x331aeada2fe589cfU, 0x3ff0d2c85def7621U, + 0x0fed077a756b53a9U, 0xd3e8495912c62894U, 0x64712dd7abbbd95cU, + 0xbd8d794d96aacfb3U, 0xecf0d7a0fc5583a0U, 0xf41686c49db57244U, + 0x311c2875c522ced5U, 0x7d633293366b828bU, 0xae5dff9c02033197U, + 0xd9f57f830283fdfcU, 0xd072df63c324fd7bU, 0x4247cb9e59f71e6dU, + 0x52d9be85f074e608U, 0x67902e276c921f8bU, 0x00ba1cd8a3db53b6U, + 0x80e8a40eccd228a4U, 0x6122cd128006b2cdU, 0x796b805720085f81U, + 0xcbe3303674053bb0U, 0xbedbfc4411068a9cU, 0xee92fb5515482d44U, + 0x751bdd152d4d1c4aU, 0xd262d45a78a0635dU, 0x86fb897116c87c34U, + 0xd45d35e6ae3d4da0U, 0x8974836059cca109U, 0x2bd1a438703fc94bU, + 0x7b6306a34627ddcfU, 0x1a3bc84c17b1d542U, 0x20caba5f1d9e4a93U, + 0x547eb47b7282ee9cU, 0xe99e619a4f23aa43U, 0x6405fa00e2ec94d4U, + 0xde83bc408dd3dd04U, 0x9624ab50b148d445U, 0x3badd624dd9b0957U, + 0xe54ca5d70a80e5d6U, 0x5e9fcf4ccd211f4cU, 0x7647c3200069671fU, + 0x29ecd9f40041e073U, 0xf468107100525890U, 0x7182148d4066eeb4U, + 0xc6f14cd848405530U, 0xb8ada00e5a506a7cU, 0xa6d90811f0e4851cU, + 0x908f4a166d1da663U, 0x9a598e4e043287feU, 0x40eff1e1853f29fdU, + 0xd12bee59e68ef47cU, 0x82bb74f8301958ceU, 0xe36a52363c1faf01U, + 0xdc44e6c3cb279ac1U, 0x29ab103a5ef8c0b9U, 0x7415d448f6b6f0e7U, + 0x111b495b3464ad21U, 0xcab10dd900beec34U, 0x3d5d514f40eea742U, + 0x0cb4a5a3112a5112U, 0x47f0e785eaba72abU, 0x59ed216765690f56U, + 0x306869c13ec3532cU, 0x1e414218c73a13fbU, 0xe5d1929ef90898faU, + 0xdf45f746b74abf39U, 0x6b8bba8c328eb783U, 0x066ea92f3f326564U, + 0xc80a537b0efefebdU, 0xbd06742ce95f5f36U, 0x2c48113823b73704U, + 0xf75a15862ca504c5U, 0x9a984d73dbe722fbU, 0xc13e60d0d2e0ebbaU, + 0x318df905079926a8U, 0xfdf17746497f7052U, 0xfeb6ea8bedefa633U, + 0xfe64a52ee96b8fc0U, 0x3dfdce7aa3c673b0U, 0x06bea10ca65c084eU, + 0x486e494fcff30a62U, 0x5a89dba3c3efccfaU, 0xf89629465a75e01cU, + 0xf6bbb397f1135823U, 0x746aa07ded582e2cU, 0xa8c2a44eb4571cdcU, + 0x92f34d62616ce413U, 0x77b020baf9c81d17U, 0x0ace1474dc1d122eU, + 0x0d819992132456baU, 0x10e1fff697ed6c69U, 0xca8d3ffa1ef463c1U, + 0xbd308ff8a6b17cb2U, 0xac7cb3f6d05ddbdeU, 0x6bcdf07a423aa96bU, + 0x86c16c98d2c953c6U, 0xe871c7bf077ba8b7U, 0x11471cd764ad4972U, + 0xd598e40d3dd89bcfU, 0x4aff1d108d4ec2c3U, 0xcedf722a585139baU, + 0xc2974eb4ee658828U, 0x733d226229feea32U, 0x0806357d5a3f525fU, + 0xca07c2dcb0cf26f7U, 0xfc89b393dd02f0b5U, 0xbbac2078d443ace2U, + 0xd54b944b84aa4c0dU, 0x0a9e795e65d4df11U, 0x4d4617b5ff4a16d5U, + 0x504bced1bf8e4e45U, 0xe45ec2862f71e1d6U, 0x5d767327bb4e5a4cU, + 0x3a6a07f8d510f86fU, 0x890489f70a55368bU, 0x2b45ac74ccea842eU, + 0x3b0b8bc90012929dU, 0x09ce6ebb40173744U, 0xcc420a6a101d0515U, + 0x9fa946824a12232dU, 0x47939822dc96abf9U, 0x59787e2b93bc56f7U, + 0x57eb4edb3c55b65aU, 0xede622920b6b23f1U, 0xe95fab368e45ecedU, + 0x11dbcb0218ebb414U, 0xd652bdc29f26a119U, 0x4be76d3346f0495fU, + 0x6f70a4400c562ddbU, 0xcb4ccd500f6bb952U, 0x7e2000a41346a7a7U, + 0x8ed400668c0c28c8U, 0x728900802f0f32faU, 0x4f2b40a03ad2ffb9U, + 0xe2f610c84987bfa8U, 0x0dd9ca7d2df4d7c9U, 0x91503d1c79720dbbU, + 0x75a44c6397ce912aU, 0xc986afbe3ee11abaU, 0xfbe85badce996168U, + 0xfae27299423fb9c3U, 0xdccd879fc967d41aU, 0x5400e987bbc1c920U, + 0x290123e9aab23b68U, 0xf9a0b6720aaf6521U, 0xf808e40e8d5b3e69U, + 0xb60b1d1230b20e04U, 0xb1c6f22b5e6f48c2U, 0x1e38aeb6360b1af3U, + 0x25c6da63c38de1b0U, 0x579c487e5a38ad0eU, 0x2d835a9df0c6d851U, + 0xf8e431456cf88e65U, 0x1b8e9ecb641b58ffU, 0xe272467e3d222f3fU, + 0x5b0ed81dcc6abb0fU, 0x98e947129fc2b4e9U, 0x3f2398d747b36224U, + 0x8eec7f0d19a03aadU, 0x1953cf68300424acU, 0x5fa8c3423c052dd7U, + 0x3792f412cb06794dU, 0xe2bbd88bbee40bd0U, 0x5b6aceaeae9d0ec4U, + 0xf245825a5a445275U, 0xeed6e2f0f0d56712U, 0x55464dd69685606bU, + 0xaa97e14c3c26b886U, 0xd53dd99f4b3066a8U, 0xe546a8038efe4029U, + 0xde98520472bdd033U, 0x963e66858f6d4440U, 0xdde7001379a44aa8U, + 0x5560c018580d5d52U, 0xaab8f01e6e10b4a6U, 0xcab3961304ca70e8U, + 0x3d607b97c5fd0d22U, 0x8cb89a7db77c506aU, 0x77f3608e92adb242U, + 0x55f038b237591ed3U, 0x6b6c46dec52f6688U, 0x2323ac4b3b3da015U, + 0xabec975e0a0d081aU, 0x96e7bd358c904a21U, 0x7e50d64177da2e54U, + 0xdde50bd1d5d0b9e9U, 0x955e4ec64b44e864U, 0xbd5af13bef0b113eU, + 0xecb1ad8aeacdd58eU, 0x67de18eda5814af2U, 0x80eacf948770ced7U, + 0xa1258379a94d028dU, 0x096ee45813a04330U, 0x8bca9d6e188853fcU, + 0x775ea264cf55347dU, 0x95364afe032a819dU, 0x3a83ddbd83f52204U, + 0xc4926a9672793542U, 0x75b7053c0f178293U, 0x5324c68b12dd6338U, + 0xd3f6fc16ebca5e03U, 0x88f4bb1ca6bcf584U, 0x2b31e9e3d06c32e5U, + 0x3aff322e62439fcfU, 0x09befeb9fad487c2U, 0x4c2ebe687989a9b3U, + 0x0f9d37014bf60a10U, 0x538484c19ef38c94U, 0x2865a5f206b06fb9U, + 0xf93f87b7442e45d3U, 0xf78f69a51539d748U, 0xb573440e5a884d1bU, + 0x31680a88f8953030U, 0xfdc20d2b36ba7c3dU, 0x3d32907604691b4cU, + 0xa63f9a49c2c1b10fU, 0x0fcf80dc33721d53U, 0xd3c36113404ea4a8U, + 0x645a1cac083126e9U, 0x3d70a3d70a3d70a3U, 0xccccccccccccccccU, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U, + 0x0000000000000000U, 0x4000000000000000U, 0x5000000000000000U, + 0xa400000000000000U, 0x4d00000000000000U, 0xf020000000000000U, + 0x6c28000000000000U, 0xc732000000000000U, 0x3c7f400000000000U, + 0x4b9f100000000000U, 0x1e86d40000000000U, 0x1314448000000000U, + 0x17d955a000000000U, 0x5dcfab0800000000U, 0x5aa1cae500000000U, + 0xf14a3d9e40000000U, 0x6d9ccd05d0000000U, 0xe4820023a2000000U, + 0xdda2802c8a800000U, 0xd50b2037ad200000U, 0x4526f422cc340000U, + 0x9670b12b7f410000U, 0x3c0cdd765f114000U, 0xa5880a69fb6ac800U, + 0x8eea0d047a457a00U, 0x72a4904598d6d880U, 0x47a6da2b7f864750U, + 0x999090b65f67d924U, 0xfff4b4e3f741cf6dU, 0xbff8f10e7a8921a4U, + 0xaff72d52192b6a0dU, 0x9bf4f8a69f764490U, 0x02f236d04753d5b4U, + 0x01d762422c946590U, 0x424d3ad2b7b97ef5U, 0xd2e0898765a7deb2U, + 0x63cc55f49f88eb2fU, 0x3cbf6b71c76b25fbU, 0x8bef464e3945ef7aU, + 0x97758bf0e3cbb5acU, 0x3d52eeed1cbea317U, 0x4ca7aaa863ee4bddU, + 0x8fe8caa93e74ef6aU, 0xb3e2fd538e122b44U, 0x60dbbca87196b616U, + 0xbc8955e946fe31cdU, 0x6babab6398bdbe41U, 0xc696963c7eed2dd1U, + 0xfc1e1de5cf543ca2U, 0x3b25a55f43294bcbU, 0x49ef0eb713f39ebeU, + 0x6e3569326c784337U, 0x49c2c37f07965404U, 0xdc33745ec97be906U, + 0x69a028bb3ded71a3U, 0xc40832ea0d68ce0cU, 0xf50a3fa490c30190U, + 0x792667c6da79e0faU, 0x577001b891185938U, 0xed4c0226b55e6f86U, + 0x544f8158315b05b4U, 0x696361ae3db1c721U, 0x03bc3a19cd1e38e9U, + 0x04ab48a04065c723U, 0x62eb0d64283f9c76U, 0x3ba5d0bd324f8394U, + 0xca8f44ec7ee36479U, 0x7e998b13cf4e1ecbU, 0x9e3fedd8c321a67eU, + 0xc5cfe94ef3ea101eU, 0xbba1f1d158724a12U, 0x2a8a6e45ae8edc97U, + 0xf52d09d71a3293bdU, 0x593c2626705f9c56U, 0x6f8b2fb00c77836cU, + 0x0b6dfb9c0f956447U, 0x4724bd4189bd5eacU, 0x58edec91ec2cb657U, + 0x2f2967b66737e3edU, 0xbd79e0d20082ee74U, 0xecd8590680a3aa11U, + 0xe80e6f4820cc9495U, 0x3109058d147fdcddU, 0xbd4b46f0599fd415U, + 0x6c9e18ac7007c91aU, 0x03e2cf6bc604ddb0U, 0x84db8346b786151cU, + 0xe612641865679a63U, 0x4fcb7e8f3f60c07eU, 0xe3be5e330f38f09dU, + 0x5cadf5bfd3072cc5U, 0x73d9732fc7c8f7f6U, 0x2867e7fddcdd9afaU, + 0xb281e1fd541501b8U, 0x1f225a7ca91a4226U, 0x3375788de9b06958U, + 0x0052d6b1641c83aeU, 0xc0678c5dbd23a49aU, 0xf840b7ba963646e0U, + 0xb650e5a93bc3d898U, 0xa3e51f138ab4cebeU, 0xc66f336c36b10137U, + 0xb80b0047445d4184U, 0xa60dc059157491e5U, 0x87c89837ad68db2fU, + 0x29babe4598c311fbU, 0xf4296dd6fef3d67aU, 0x1899e4a65f58660cU, + 0x5ec05dcff72e7f8fU, 0x76707543f4fa1f73U, 0x6a06494a791c53a8U, + 0x0487db9d17636892U, 0x45a9d2845d3c42b6U, 0x0b8a2392ba45a9b2U, + 0x8e6cac7768d7141eU, 0x3207d795430cd926U, 0x7f44e6bd49e807b8U, + 0x5f16206c9c6209a6U, 0x36dba887c37a8c0fU, 0xc2494954da2c9789U, + 0xf2db9baa10b7bd6cU, 0x6f92829494e5acc7U, 0xcb772339ba1f17f9U, + 0xff2a760414536efbU, 0xfef5138519684abaU, 0x7eb258665fc25d69U, + 0xef2f773ffbd97a61U, 0xaafb550ffacfd8faU, 0x95ba2a53f983cf38U, + 0xdd945a747bf26183U, 0x94f971119aeef9e4U, 0x7a37cd5601aab85dU, + 0xac62e055c10ab33aU, 0x577b986b314d6009U, 0xed5a7e85fda0b80bU, + 0x14588f13be847307U, 0x596eb2d8ae258fc8U, 0x6fca5f8ed9aef3bbU, + 0x25de7bb9480d5854U, 0xaf561aa79a10ae6aU, 0x1b2ba1518094da04U, + 0x90fb44d2f05d0842U, 0x353a1607ac744a53U, 0x42889b8997915ce8U, + 0x69956135febada11U, 0x43fab9837e699095U, 0x94f967e45e03f4bbU, + 0x1d1be0eebac278f5U, 0x6462d92a69731732U, 0x7d7b8f7503cfdcfeU, + 0x5cda735244c3d43eU, 0x3a0888136afa64a7U, 0x088aaa1845b8fdd0U, + 0x8aad549e57273d45U, 0x36ac54e2f678864bU, 0x84576a1bb416a7ddU, + 0x656d44a2a11c51d5U, 0x9f644ae5a4b1b325U, 0x873d5d9f0dde1feeU, + 0xa90cb506d155a7eaU, 0x09a7f12442d588f2U, 0x0c11ed6d538aeb2fU, + 0x8f1668c8a86da5faU, 0xf96e017d694487bcU, 0x37c981dcc395a9acU, + 0x85bbe253f47b1417U, 0x93956d7478ccec8eU, 0x387ac8d1970027b2U, + 0x06997b05fcc0319eU, 0x441fece3bdf81f03U, 0xd527e81cad7626c3U, + 0x8a71e223d8d3b074U, 0xf6872d5667844e49U, 0xb428f8ac016561dbU, + 0xe13336d701beba52U, 0xecc0024661173473U, 0x27f002d7f95d0190U, + 0x31ec038df7b441f4U, 0x7e67047175a15271U, 0x0f0062c6e984d386U, + 0x52c07b78a3e60868U, 0xa7709a56ccdf8a82U, 0x88a66076400bb691U, + 0x6acff893d00ea435U, 0x0583f6b8c4124d43U, 0xc3727a337a8b704aU, + 0x744f18c0592e4c5cU, 0x1162def06f79df73U, 0x8addcb5645ac2ba8U, + 0x6d953e2bd7173692U, 0xc8fa8db6ccdd0437U, 0x1d9c9892400a22a2U, + 0x2503beb6d00cab4bU, 0x2e44ae64840fd61dU, 0x5ceaecfed289e5d2U, + 0x7425a83e872c5f47U, 0xd12f124e28f77719U, 0x82bd6b70d99aaa6fU, + 0x636cc64d1001550bU, 0x3c47f7e05401aa4eU, 0x65acfaec34810a71U, + 0x7f1839a741a14d0dU, 0x1ede48111209a050U, 0x934aed0aab460432U, + 0xf81da84d5617853fU, 0x36251260ab9d668eU, 0xc1d72b7c6b426019U, + 0xb24cf65b8612f81fU, 0xdee033f26797b627U, 0x169840ef017da3b1U, + 0x8e1f289560ee864eU, 0xf1a6f2bab92a27e2U, 0xae10af696774b1dbU, + 0xacca6da1e0a8ef29U, 0x17fd090a58d32af3U, 0xddfc4b4cef07f5b0U, + 0x4abdaf101564f98eU, 0x9d6d1ad41abe37f1U, 0x84c86189216dc5edU, + 0x32fd3cf5b4e49bb4U, 0x3fbc8c33221dc2a1U, 0x0fabaf3feaa5334aU, + 0x29cb4d87f2a7400eU, 0x743e20e9ef511012U, 0x914da9246b255416U, + 0x1ad089b6c2f7548eU, 0xa184ac2473b529b1U, 0xc9e5d72d90a2741eU, + 0x7e2fa67c7a658892U, 0xddbb901b98feeab7U, 0x552a74227f3ea565U, + 0xd53a88958f87275fU, 0x8a892abaf368f137U, 0x2d2b7569b0432d85U, + 0x9c3b29620e29fc73U, 0x8349f3ba91b47b8fU, 0x241c70a936219a73U, + 0xed238cd383aa0110U, 0xf4363804324a40aaU, 0xb143c6053edcd0d5U, + 0xdd94b7868e94050aU, 0xca7cf2b4191c8326U, 0xfd1c2f611f63a3f0U, + 0xbc633b39673c8cecU, 0xd5be0503e085d813U, 0x4b2d8644d8a74e18U, + 0xddf8e7d60ed1219eU, 0xcabb90e5c942b503U, 0x3d6a751f3b936243U, + 0x0cc512670a783ad4U, 0x27fb2b80668b24c5U, 0xb1f9f660802dedf6U, + 0x5e7873f8a0396973U, 0xdb0b487b6423e1e8U, 0x91ce1a9a3d2cda62U, + 0x7641a140cc7810fbU, 0xa9e904c87fcb0a9dU, 0x546345fa9fbdcd44U, + 0xa97c177947ad4095U, 0x49ed8eabcccc485dU, 0x5c68f256bfff5a74U, + 0x73832eec6fff3111U, 0xc831fd53c5ff7eabU, 0xba3e7ca8b77f5e55U, + 0x28ce1bd2e55f35ebU, 0x7980d163cf5b81b3U, 0xd7e105bcc332621fU, + 0x8dd9472bf3fefaa7U, 0xb14f98f6f0feb951U, 0x6ed1bf9a569f33d3U, + 0x0a862f80ec4700c8U, 0xcd27bb612758c0faU, 0x8038d51cb897789cU, + 0xe0470a63e6bd56c3U, 0x1858ccfce06cac74U, 0x0f37801e0c43ebc8U, + 0xd30560258f54e6baU, 0x47c6b82ef32a2069U, 0x4cdc331d57fa5441U, + 0xe0133fe4adf8e952U, 0x58180fddd97723a6U, 0x570f09eaa7ea7648U, }; } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/charconv.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/charconv.h index e04be32f95..7c50981245 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/charconv.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/charconv.h @@ -64,8 +64,9 @@ struct from_chars_result { // the result in `value`. // // The matching pattern format is almost the same as that of strtod(), except -// that C locale is not respected, and an initial '+' character in the input -// range will never be matched. +// that (1) C locale is not respected, (2) an initial '+' character in the +// input range will never be matched, and (3) leading whitespaces are not +// ignored. // // If `fmt` is set, it must be one of the enumerator values of the chars_format. // (This is despite the fact that chars_format is a bitmask type.) If set to diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord.cc index 29af9782ce..92822c0588 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -34,9 +35,12 @@ #include "absl/base/port.h" #include "absl/container/fixed_array.h" #include "absl/container/inlined_vector.h" +#include "absl/strings/cord_buffer.h" #include "absl/strings/escaping.h" +#include "absl/strings/internal/cord_data_edge.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_btree.h" +#include "absl/strings/internal/cord_rep_crc.h" #include "absl/strings/internal/cord_rep_flat.h" #include "absl/strings/internal/cordz_statistics.h" #include "absl/strings/internal/cordz_update_scope.h" @@ -52,7 +56,7 @@ ABSL_NAMESPACE_BEGIN using ::absl::cord_internal::CordRep; using ::absl::cord_internal::CordRepBtree; -using ::absl::cord_internal::CordRepConcat; +using ::absl::cord_internal::CordRepCrc; using ::absl::cord_internal::CordRepExternal; using ::absl::cord_internal::CordRepFlat; using ::absl::cord_internal::CordRepSubstring; @@ -64,56 +68,6 @@ using ::absl::cord_internal::kMinFlatLength; using ::absl::cord_internal::kInlinedVectorSize; using ::absl::cord_internal::kMaxBytesToCopy; -constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) { - return n == 0 ? a : Fibonacci(n - 1, b, a + b); -} - -static_assert(Fibonacci(63) == 6557470319842, - "Fibonacci values computed incorrectly"); - -// Minimum length required for a given depth tree -- a tree is considered -// balanced if -// length(t) >= min_length[depth(t)] -// The root node depth is allowed to become twice as large to reduce rebalancing -// for larger strings (see IsRootBalanced). -static constexpr uint64_t min_length[] = { - Fibonacci(2), Fibonacci(3), Fibonacci(4), Fibonacci(5), - Fibonacci(6), Fibonacci(7), Fibonacci(8), Fibonacci(9), - Fibonacci(10), Fibonacci(11), Fibonacci(12), Fibonacci(13), - Fibonacci(14), Fibonacci(15), Fibonacci(16), Fibonacci(17), - Fibonacci(18), Fibonacci(19), Fibonacci(20), Fibonacci(21), - Fibonacci(22), Fibonacci(23), Fibonacci(24), Fibonacci(25), - Fibonacci(26), Fibonacci(27), Fibonacci(28), Fibonacci(29), - Fibonacci(30), Fibonacci(31), Fibonacci(32), Fibonacci(33), - Fibonacci(34), Fibonacci(35), Fibonacci(36), Fibonacci(37), - Fibonacci(38), Fibonacci(39), Fibonacci(40), Fibonacci(41), - Fibonacci(42), Fibonacci(43), Fibonacci(44), Fibonacci(45), - Fibonacci(46), Fibonacci(47), - 0xffffffffffffffffull, // Avoid overflow -}; - -static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length); - -static inline bool btree_enabled() { - return cord_internal::cord_btree_enabled.load( - std::memory_order_relaxed); -} - -static inline bool IsRootBalanced(CordRep* node) { - if (!node->IsConcat()) { - return true; - } else if (node->concat()->depth() <= 15) { - return true; - } else if (node->concat()->depth() > kMinLengthSize) { - return false; - } else { - // Allow depth to become twice as large as implied by fibonacci rule to - // reduce rebalancing for larger strings. - return (node->length >= min_length[node->concat()->depth() / 2]); - } -} - -static CordRep* Rebalance(CordRep* node); static void DumpNode(CordRep* rep, bool include_data, std::ostream* os, int indent = 0); static bool VerifyNode(CordRep* root, CordRep* start_node, @@ -135,75 +89,6 @@ static inline CordRep* VerifyTree(CordRep* node) { return node; } -// Return the depth of a node -static int Depth(const CordRep* rep) { - if (rep->IsConcat()) { - return rep->concat()->depth(); - } else { - return 0; - } -} - -static void SetConcatChildren(CordRepConcat* concat, CordRep* left, - CordRep* right) { - concat->left = left; - concat->right = right; - - concat->length = left->length + right->length; - concat->set_depth(1 + std::max(Depth(left), Depth(right))); -} - -// Create a concatenation of the specified nodes. -// Does not change the refcounts of "left" and "right". -// The returned node has a refcount of 1. -static CordRep* RawConcat(CordRep* left, CordRep* right) { - // Avoid making degenerate concat nodes (one child is empty) - if (left == nullptr) return right; - if (right == nullptr) return left; - if (left->length == 0) { - CordRep::Unref(left); - return right; - } - if (right->length == 0) { - CordRep::Unref(right); - return left; - } - - CordRepConcat* rep = new CordRepConcat(); - rep->tag = cord_internal::CONCAT; - SetConcatChildren(rep, left, right); - - return rep; -} - -static CordRep* Concat(CordRep* left, CordRep* right) { - CordRep* rep = RawConcat(left, right); - if (rep != nullptr && !IsRootBalanced(rep)) { - rep = Rebalance(rep); - } - return VerifyTree(rep); -} - -// Make a balanced tree out of an array of leaf nodes. -static CordRep* MakeBalancedTree(CordRep** reps, size_t n) { - // Make repeated passes over the array, merging adjacent pairs - // until we are left with just a single node. - while (n > 1) { - size_t dst = 0; - for (size_t src = 0; src < n; src += 2) { - if (src + 1 < n) { - reps[dst] = Concat(reps[src], reps[src + 1]); - } else { - reps[dst] = reps[src]; - } - dst++; - } - n = dst; - } - - return reps[0]; -} - static CordRepFlat* CreateFlat(const char* data, size_t length, size_t alloc_hint) { CordRepFlat* flat = CordRepFlat::New(length + alloc_hint); @@ -229,21 +114,7 @@ static CordRep* NewBtree(const char* data, size_t length, size_t alloc_hint) { // The returned node has a refcount of 1. static CordRep* NewTree(const char* data, size_t length, size_t alloc_hint) { if (length == 0) return nullptr; - if (btree_enabled()) { - return NewBtree(data, length, alloc_hint); - } - absl::FixedArray reps((length - 1) / kMaxFlatLength + 1); - size_t n = 0; - do { - const size_t len = std::min(length, kMaxFlatLength); - CordRepFlat* rep = CordRepFlat::New(len + alloc_hint); - rep->length = len; - memcpy(rep->Data(), data, len); - reps[n++] = VerifyTree(rep); - data += len; - length -= len; - } while (length != 0); - return MakeBalancedTree(reps.data(), n); + return NewBtree(data, length, alloc_hint); } namespace cord_internal { @@ -258,22 +129,6 @@ void InitializeCordRepExternal(absl::string_view data, CordRepExternal* rep) { } // namespace cord_internal -static CordRep* NewSubstring(CordRep* child, size_t offset, size_t length) { - // Never create empty substring nodes - if (length == 0) { - CordRep::Unref(child); - return nullptr; - } else { - CordRepSubstring* rep = new CordRepSubstring(); - assert((offset + length) <= child->length); - rep->length = length; - rep->tag = cord_internal::SUBSTRING; - rep->start = offset; - rep->child = child; - return VerifyTree(rep); - } -} - // Creates a CordRep from the provided string. If the string is large enough, // and not wasteful, we move the string into an external cord rep, preserving // the already allocated string contents. @@ -306,13 +161,14 @@ static CordRep* CordRepFromString(std::string&& src) { // -------------------------------------------------------------------- // Cord::InlineRep functions +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr unsigned char Cord::InlineRep::kMaxInline; +#endif -inline void Cord::InlineRep::set_data(const char* data, size_t n, - bool nullify_tail) { +inline void Cord::InlineRep::set_data(const char* data, size_t n) { static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15"); - cord_internal::SmallMemmove(data_.as_chars(), data, n, nullify_tail); + cord_internal::SmallMemmove(data_.as_chars(), data, n); set_inline_size(n); } @@ -329,7 +185,7 @@ inline void Cord::InlineRep::reduce_size(size_t n) { assert(tag >= n); tag -= n; memset(data_.as_chars() + tag, 0, n); - set_inline_size(static_cast(tag)); + set_inline_size(tag); } inline void Cord::InlineRep::remove_prefix(size_t n) { @@ -341,7 +197,9 @@ inline void Cord::InlineRep::remove_prefix(size_t n) { // Returns `rep` converted into a CordRepBtree. // Directly returns `rep` if `rep` is already a CordRepBtree. static CordRepBtree* ForceBtree(CordRep* rep) { - return rep->IsBtree() ? rep->btree() : CordRepBtree::Create(rep); + return rep->IsBtree() + ? rep->btree() + : CordRepBtree::Create(cord_internal::RemoveCrcNode(rep)); } void Cord::InlineRep::AppendTreeToInlined(CordRep* tree, @@ -349,11 +207,7 @@ void Cord::InlineRep::AppendTreeToInlined(CordRep* tree, assert(!is_tree()); if (!data_.is_empty()) { CordRepFlat* flat = MakeFlatWithExtraCapacity(0); - if (btree_enabled()) { - tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree); - } else { - tree = Concat(flat, tree); - } + tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree); } EmplaceTree(tree, method); } @@ -361,16 +215,14 @@ void Cord::InlineRep::AppendTreeToInlined(CordRep* tree, void Cord::InlineRep::AppendTreeToTree(CordRep* tree, MethodIdentifier method) { assert(is_tree()); const CordzUpdateScope scope(data_.cordz_info(), method); - if (btree_enabled()) { - tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree); - } else { - tree = Concat(data_.as_tree(), tree); - } + tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree); SetTree(tree, scope); } void Cord::InlineRep::AppendTree(CordRep* tree, MethodIdentifier method) { - if (tree == nullptr) return; + assert(tree != nullptr); + assert(tree->length != 0); + assert(!tree->IsCrc()); if (data_.is_tree()) { AppendTreeToTree(tree, method); } else { @@ -383,11 +235,7 @@ void Cord::InlineRep::PrependTreeToInlined(CordRep* tree, assert(!is_tree()); if (!data_.is_empty()) { CordRepFlat* flat = MakeFlatWithExtraCapacity(0); - if (btree_enabled()) { - tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree); - } else { - tree = Concat(tree, flat); - } + tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree); } EmplaceTree(tree, method); } @@ -396,16 +244,14 @@ void Cord::InlineRep::PrependTreeToTree(CordRep* tree, MethodIdentifier method) { assert(is_tree()); const CordzUpdateScope scope(data_.cordz_info(), method); - if (btree_enabled()) { - tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree); - } else { - tree = Concat(tree, data_.as_tree()); - } + tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree); SetTree(tree, scope); } void Cord::InlineRep::PrependTree(CordRep* tree, MethodIdentifier method) { assert(tree != nullptr); + assert(tree->length != 0); + assert(!tree->IsCrc()); if (data_.is_tree()) { PrependTreeToTree(tree, method); } else { @@ -428,12 +274,7 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region, } } - // Search down the right-hand path for a non-full FLAT node. CordRep* dst = root; - while (dst->IsConcat() && dst->refcount.IsOne()) { - dst = dst->concat()->right; - } - if (!dst->IsFlat() || !dst->refcount.IsOne()) { *region = nullptr; *size = 0; @@ -448,12 +289,7 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region, return false; } - size_t size_increase = std::min(capacity - in_use, max_length); - - // We need to update the length fields for all nodes, including the leaf node. - for (CordRep* rep = root; rep != dst; rep = rep->concat()->right) { - rep->length += size_increase; - } + const size_t size_increase = std::min(capacity - in_use, max_length); dst->length += size_increase; *region = dst->flat()->Data() + in_use; @@ -461,90 +297,6 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region, return true; } -template -void Cord::InlineRep::GetAppendRegion(char** region, size_t* size, - size_t length) { - auto constexpr method = CordzUpdateTracker::kGetAppendRegion; - - CordRep* root = tree(); - size_t sz = root ? root->length : inline_size(); - if (root == nullptr) { - size_t available = kMaxInline - sz; - if (available >= (has_length ? length : 1)) { - *region = data_.as_chars() + sz; - *size = has_length ? length : available; - set_inline_size(has_length ? sz + length : kMaxInline); - return; - } - } - - size_t extra = has_length ? length : (std::max)(sz, kMinFlatLength); - CordRep* rep = root ? root : MakeFlatWithExtraCapacity(extra); - CordzUpdateScope scope(root ? data_.cordz_info() : nullptr, method); - if (PrepareAppendRegion(rep, region, size, length)) { - CommitTree(root, rep, scope, method); - return; - } - - // Allocate new node. - CordRepFlat* new_node = CordRepFlat::New(extra); - new_node->length = std::min(new_node->Capacity(), length); - *region = new_node->Data(); - *size = new_node->length; - - if (btree_enabled()) { - rep = CordRepBtree::Append(ForceBtree(rep), new_node); - } else { - rep = Concat(rep, new_node); - } - CommitTree(root, rep, scope, method); -} - -// Computes the memory side of the provided edge which must be a valid data edge -// for a btrtee, i.e., a FLAT, EXTERNAL or SUBSTRING of a FLAT or EXTERNAL node. -static bool RepMemoryUsageDataEdge(const CordRep* rep, - size_t* total_mem_usage) { - size_t maybe_sub_size = 0; - if (ABSL_PREDICT_FALSE(rep->IsSubstring())) { - maybe_sub_size = sizeof(cord_internal::CordRepSubstring); - rep = rep->substring()->child; - } - if (rep->IsFlat()) { - *total_mem_usage += maybe_sub_size + rep->flat()->AllocatedSize(); - return true; - } - if (rep->IsExternal()) { - // We don't know anything about the embedded / bound data, but we can safely - // assume it is 'at least' a word / pointer to data. In the future we may - // choose to use the 'data' byte as a tag to identify the types of some - // well-known externals, such as a std::string instance. - *total_mem_usage += maybe_sub_size + - sizeof(cord_internal::CordRepExternalImpl) + - rep->length; - return true; - } - return false; -} - -// If the rep is a leaf, this will increment the value at total_mem_usage and -// will return true. -static bool RepMemoryUsageLeaf(const CordRep* rep, size_t* total_mem_usage) { - if (rep->IsFlat()) { - *total_mem_usage += rep->flat()->AllocatedSize(); - return true; - } - if (rep->IsExternal()) { - // We don't know anything about the embedded / bound data, but we can safely - // assume it is 'at least' a word / pointer to data. In the future we may - // choose to use the 'data' byte as a tag to identify the types of some - // well-known externals, such as a std::string instance. - *total_mem_usage += - sizeof(cord_internal::CordRepExternalImpl) + rep->length; - return true; - } - return false; -} - void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) { assert(&src != this); assert(is_tree() || src.is_tree()); @@ -581,7 +333,7 @@ Cord::Cord(absl::string_view src, MethodIdentifier method) : contents_(InlineData::kDefaultInit) { const size_t n = src.size(); if (n <= InlineRep::kMaxInline) { - contents_.set_data(src.data(), n, true); + contents_.set_data(src.data(), n); } else { CordRep* rep = NewTree(src.data(), n, 0); contents_.EmplaceTree(rep, method); @@ -591,7 +343,7 @@ Cord::Cord(absl::string_view src, MethodIdentifier method) template > Cord::Cord(T&& src) : contents_(InlineData::kDefaultInit) { if (src.size() <= InlineRep::kMaxInline) { - contents_.set_data(src.data(), src.size(), true); + contents_.set_data(src.data(), src.size()); } else { CordRep* rep = CordRepFromString(std::forward(src)); contents_.EmplaceTree(rep, CordzUpdateTracker::kConstructorString); @@ -642,7 +394,7 @@ Cord& Cord::operator=(absl::string_view src) { // - MaybeUntrackCord must be called before set_data() clobbers cordz_info. // - set_data() must be called before Unref(tree) as it may reference tree. if (tree != nullptr) CordzInfo::MaybeUntrackCord(contents_.cordz_info()); - contents_.set_data(data, length, true); + contents_.set_data(data, length); if (tree != nullptr) CordRep::Unref(tree); return *this; } @@ -668,6 +420,7 @@ Cord& Cord::operator=(absl::string_view src) { // we keep it here to make diffs easier. void Cord::InlineRep::AppendArray(absl::string_view src, MethodIdentifier method) { + MaybeRemoveEmptyCrcNode(); if (src.empty()) return; // memcpy(_, nullptr, 0) is undefined. size_t appended = 0; @@ -675,6 +428,7 @@ void Cord::InlineRep::AppendArray(absl::string_view src, const CordRep* const root = rep; CordzUpdateScope scope(root ? cordz_info() : nullptr, method); if (root != nullptr) { + rep = cord_internal::RemoveCrcNode(rep); char* region; if (PrepareAppendRegion(rep, ®ion, &appended, src.size())) { memcpy(region, src.data(), appended); @@ -705,27 +459,11 @@ void Cord::InlineRep::AppendArray(absl::string_view src, return; } - if (btree_enabled()) { - // TODO(b/192061034): keep legacy 10% growth rate: consider other rates. - rep = ForceBtree(rep); - const size_t min_growth = std::max(rep->length / 10, src.size()); - rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size()); - } else { - // Use new block(s) for any remaining bytes that were not handled above. - // Alloc extra memory only if the right child of the root of the new tree - // is going to be a FLAT node, which will permit further inplace appends. - size_t length = src.size(); - if (src.size() < kMaxFlatLength) { - // The new length is either - // - old size + 10% - // - old_size + src.size() - // This will cause a reasonable conservative step-up in size that is - // still large enough to avoid excessive amounts of small fragments - // being added. - length = std::max(rep->length / 10, src.size()); - } - rep = Concat(rep, NewTree(src.data(), src.size(), length - src.size())); - } + // TODO(b/192061034): keep legacy 10% growth rate: consider other rates. + rep = ForceBtree(rep); + const size_t min_growth = std::max(rep->length / 10, src.size()); + rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size()); + CommitTree(root, rep, scope, method); } @@ -742,11 +480,16 @@ inline CordRep* Cord::TakeRep() && { template inline void Cord::AppendImpl(C&& src) { auto constexpr method = CordzUpdateTracker::kAppendCord; + + contents_.MaybeRemoveEmptyCrcNode(); + if (src.empty()) return; + if (empty()) { // Since destination is empty, we can avoid allocating a node, if (src.contents_.is_tree()) { // by taking the tree directly - CordRep* rep = std::forward(src).TakeRep(); + CordRep* rep = + cord_internal::RemoveCrcNode(std::forward(src).TakeRep()); contents_.EmplaceTree(rep, method); } else { // or copying over inline data @@ -782,10 +525,56 @@ inline void Cord::AppendImpl(C&& src) { } // Guaranteed to be a tree (kMaxBytesToCopy > kInlinedSize) - CordRep* rep = std::forward(src).TakeRep(); + CordRep* rep = cord_internal::RemoveCrcNode(std::forward(src).TakeRep()); contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord); } +static CordRep::ExtractResult ExtractAppendBuffer(CordRep* rep, + size_t min_capacity) { + switch (rep->tag) { + case cord_internal::BTREE: + return CordRepBtree::ExtractAppendBuffer(rep->btree(), min_capacity); + default: + if (rep->IsFlat() && rep->refcount.IsOne() && + rep->flat()->Capacity() - rep->length >= min_capacity) { + return {nullptr, rep}; + } + return {rep, nullptr}; + } +} + +static CordBuffer CreateAppendBuffer(InlineData& data, size_t block_size, + size_t capacity) { + // Watch out for overflow, people can ask for size_t::max(). + const size_t size = data.inline_size(); + const size_t max_capacity = std::numeric_limits::max() - size; + capacity = (std::min)(max_capacity, capacity) + size; + CordBuffer buffer = + block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity) + : CordBuffer::CreateWithDefaultLimit(capacity); + cord_internal::SmallMemmove(buffer.data(), data.as_chars(), size); + buffer.SetLength(size); + data = {}; + return buffer; +} + +CordBuffer Cord::GetAppendBufferSlowPath(size_t block_size, size_t capacity, + size_t min_capacity) { + auto constexpr method = CordzUpdateTracker::kGetAppendBuffer; + CordRep* tree = contents_.tree(); + if (tree != nullptr) { + CordzUpdateScope scope(contents_.cordz_info(), method); + CordRep::ExtractResult result = ExtractAppendBuffer(tree, min_capacity); + if (result.extracted != nullptr) { + contents_.SetTreeOrEmpty(result.tree, scope); + return CordBuffer(result.extracted->flat()); + } + return block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity) + : CordBuffer::CreateWithDefaultLimit(capacity); + } + return CreateAppendBuffer(contents_.data_, block_size, capacity); +} + void Cord::Append(const Cord& src) { AppendImpl(src); } @@ -807,10 +596,14 @@ void Cord::Append(T&& src) { template void Cord::Append(std::string&& src); void Cord::Prepend(const Cord& src) { + contents_.MaybeRemoveEmptyCrcNode(); + if (src.empty()) return; + CordRep* src_tree = src.contents_.tree(); if (src_tree != nullptr) { CordRep::Ref(src_tree); - contents_.PrependTree(src_tree, CordzUpdateTracker::kPrependCord); + contents_.PrependTree(cord_internal::RemoveCrcNode(src_tree), + CordzUpdateTracker::kPrependCord); return; } @@ -819,22 +612,51 @@ void Cord::Prepend(const Cord& src) { return Prepend(src_contents); } -void Cord::Prepend(absl::string_view src) { +void Cord::PrependArray(absl::string_view src, MethodIdentifier method) { + contents_.MaybeRemoveEmptyCrcNode(); if (src.empty()) return; // memcpy(_, nullptr, 0) is undefined. + if (!contents_.is_tree()) { size_t cur_size = contents_.inline_size(); if (cur_size + src.size() <= InlineRep::kMaxInline) { // Use embedded storage. - char data[InlineRep::kMaxInline + 1] = {0}; - memcpy(data, src.data(), src.size()); - memcpy(data + src.size(), contents_.data(), cur_size); - memcpy(contents_.data_.as_chars(), data, InlineRep::kMaxInline + 1); - contents_.set_inline_size(cur_size + src.size()); + InlineData data; + memcpy(data.as_chars(), src.data(), src.size()); + memcpy(data.as_chars() + src.size(), contents_.data(), cur_size); + data.set_inline_size(cur_size + src.size()); + contents_.data_ = data; return; } } CordRep* rep = NewTree(src.data(), src.size(), 0); - contents_.PrependTree(rep, CordzUpdateTracker::kPrependString); + contents_.PrependTree(rep, method); +} + +void Cord::AppendPrecise(absl::string_view src, MethodIdentifier method) { + assert(!src.empty()); + assert(src.size() <= cord_internal::kMaxFlatLength); + if (contents_.remaining_inline_capacity() >= src.size()) { + const size_t inline_length = contents_.inline_size(); + memcpy(contents_.data_.as_chars() + inline_length, src.data(), src.size()); + contents_.set_inline_size(inline_length + src.size()); + } else { + contents_.AppendTree(CordRepFlat::Create(src), method); + } +} + +void Cord::PrependPrecise(absl::string_view src, MethodIdentifier method) { + assert(!src.empty()); + assert(src.size() <= cord_internal::kMaxFlatLength); + if (contents_.remaining_inline_capacity() >= src.size()) { + const size_t cur_size = contents_.inline_size(); + InlineData data; + memcpy(data.as_chars(), src.data(), src.size()); + memcpy(data.as_chars() + src.size(), contents_.data(), cur_size); + data.set_inline_size(cur_size + src.size()); + contents_.data_ = data; + } else { + contents_.PrependTree(CordRepFlat::Create(src), method); + } } template > @@ -849,109 +671,32 @@ inline void Cord::Prepend(T&& src) { template void Cord::Prepend(std::string&& src); -static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { - if (n >= node->length) return nullptr; - if (n == 0) return CordRep::Ref(node); - absl::InlinedVector rhs_stack; - - while (node->IsConcat()) { - assert(n <= node->length); - if (n < node->concat()->left->length) { - // Push right to stack, descend left. - rhs_stack.push_back(node->concat()->right); - node = node->concat()->left; - } else { - // Drop left, descend right. - n -= node->concat()->left->length; - node = node->concat()->right; - } - } - assert(n <= node->length); - - if (n == 0) { - CordRep::Ref(node); - } else { - size_t start = n; - size_t len = node->length - n; - if (node->IsSubstring()) { - // Consider in-place update of node, similar to in RemoveSuffixFrom(). - start += node->substring()->start; - node = node->substring()->child; - } - node = NewSubstring(CordRep::Ref(node), start, len); - } - while (!rhs_stack.empty()) { - node = Concat(node, CordRep::Ref(rhs_stack.back())); - rhs_stack.pop_back(); - } - return node; -} - -// RemoveSuffixFrom() is very similar to RemovePrefixFrom(), with the -// exception that removing a suffix has an optimization where a node may be -// edited in place iff that node and all its ancestors have a refcount of 1. -static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) { - if (n >= node->length) return nullptr; - if (n == 0) return CordRep::Ref(node); - absl::InlinedVector lhs_stack; - bool inplace_ok = node->refcount.IsOne(); - - while (node->IsConcat()) { - assert(n <= node->length); - if (n < node->concat()->right->length) { - // Push left to stack, descend right. - lhs_stack.push_back(node->concat()->left); - node = node->concat()->right; - } else { - // Drop right, descend left. - n -= node->concat()->right->length; - node = node->concat()->left; - } - inplace_ok = inplace_ok && node->refcount.IsOne(); - } - assert(n <= node->length); - - if (n == 0) { - CordRep::Ref(node); - } else if (inplace_ok && !node->IsExternal()) { - // Consider making a new buffer if the current node capacity is much - // larger than the new length. - CordRep::Ref(node); - node->length -= n; - } else { - size_t start = 0; - size_t len = node->length - n; - if (node->IsSubstring()) { - start = node->substring()->start; - node = node->substring()->child; - } - node = NewSubstring(CordRep::Ref(node), start, len); - } - while (!lhs_stack.empty()) { - node = Concat(CordRep::Ref(lhs_stack.back()), node); - lhs_stack.pop_back(); - } - return node; -} - void Cord::RemovePrefix(size_t n) { ABSL_INTERNAL_CHECK(n <= size(), absl::StrCat("Requested prefix size ", n, " exceeds Cord's size ", size())); + contents_.MaybeRemoveEmptyCrcNode(); CordRep* tree = contents_.tree(); if (tree == nullptr) { contents_.remove_prefix(n); } else { auto constexpr method = CordzUpdateTracker::kRemovePrefix; CordzUpdateScope scope(contents_.cordz_info(), method); - if (tree->IsBtree()) { + tree = cord_internal::RemoveCrcNode(tree); + if (n >= tree->length) { + CordRep::Unref(tree); + tree = nullptr; + } else if (tree->IsBtree()) { CordRep* old = tree; tree = tree->btree()->SubTree(n, tree->length - n); CordRep::Unref(old); + } else if (tree->IsSubstring() && tree->refcount.IsOne()) { + tree->substring()->start += n; + tree->length -= n; } else { - CordRep* newrep = RemovePrefixFrom(tree, n); + CordRep* rep = CordRepSubstring::Substring(tree, n, tree->length - n); CordRep::Unref(tree); - tree = VerifyTree(newrep); + tree = rep; } contents_.SetTreeOrEmpty(tree, scope); } @@ -961,76 +706,31 @@ void Cord::RemoveSuffix(size_t n) { ABSL_INTERNAL_CHECK(n <= size(), absl::StrCat("Requested suffix size ", n, " exceeds Cord's size ", size())); + contents_.MaybeRemoveEmptyCrcNode(); CordRep* tree = contents_.tree(); if (tree == nullptr) { contents_.reduce_size(n); } else { auto constexpr method = CordzUpdateTracker::kRemoveSuffix; CordzUpdateScope scope(contents_.cordz_info(), method); - if (tree->IsBtree()) { - CordRep* old = tree; - tree = tree->btree()->SubTree(0, tree->length - n); - CordRep::Unref(old); - } else { - CordRep* newrep = RemoveSuffixFrom(tree, n); + tree = cord_internal::RemoveCrcNode(tree); + if (n >= tree->length) { CordRep::Unref(tree); - tree = VerifyTree(newrep); + tree = nullptr; + } else if (tree->IsBtree()) { + tree = CordRepBtree::RemoveSuffix(tree->btree(), n); + } else if (!tree->IsExternal() && tree->refcount.IsOne()) { + assert(tree->IsFlat() || tree->IsSubstring()); + tree->length -= n; + } else { + CordRep* rep = CordRepSubstring::Substring(tree, 0, tree->length - n); + CordRep::Unref(tree); + tree = rep; } contents_.SetTreeOrEmpty(tree, scope); } } -// Work item for NewSubRange(). -struct SubRange { - SubRange(CordRep* a_node, size_t a_pos, size_t a_n) - : node(a_node), pos(a_pos), n(a_n) {} - CordRep* node; // nullptr means concat last 2 results. - size_t pos; - size_t n; -}; - -static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { - absl::InlinedVector results; - absl::InlinedVector todo; - todo.push_back(SubRange(node, pos, n)); - do { - const SubRange& sr = todo.back(); - node = sr.node; - pos = sr.pos; - n = sr.n; - todo.pop_back(); - - if (node == nullptr) { - assert(results.size() >= 2); - CordRep* right = results.back(); - results.pop_back(); - CordRep* left = results.back(); - results.pop_back(); - results.push_back(Concat(left, right)); - } else if (pos == 0 && n == node->length) { - results.push_back(CordRep::Ref(node)); - } else if (!node->IsConcat()) { - if (node->IsSubstring()) { - pos += node->substring()->start; - node = node->substring()->child; - } - results.push_back(NewSubstring(CordRep::Ref(node), pos, n)); - } else if (pos + n <= node->concat()->left->length) { - todo.push_back(SubRange(node->concat()->left, pos, n)); - } else if (pos >= node->concat()->left->length) { - pos -= node->concat()->left->length; - todo.push_back(SubRange(node->concat()->right, pos, n)); - } else { - size_t left_n = node->concat()->left->length - pos; - todo.push_back(SubRange(nullptr, 0, 0)); // Concat() - todo.push_back(SubRange(node->concat()->right, 0, n - left_n)); - todo.push_back(SubRange(node->concat()->left, pos, left_n)); - } - } while (!todo.empty()); - assert(results.size() == 1); - return results[0]; -} - Cord Cord::Subcord(size_t pos, size_t new_size) const { Cord sub_cord; size_t length = size(); @@ -1040,9 +740,7 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const { CordRep* tree = contents_.tree(); if (tree == nullptr) { - // sub_cord is newly constructed, no need to re-zero-out the tail of - // contents_ memory. - sub_cord.contents_.set_data(contents_.data() + pos, new_size, false); + sub_cord.contents_.set_data(contents_.data() + pos, new_size); return sub_cord; } @@ -1062,156 +760,17 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const { return sub_cord; } + tree = cord_internal::SkipCrcNode(tree); if (tree->IsBtree()) { tree = tree->btree()->SubTree(pos, new_size); } else { - tree = NewSubRange(tree, pos, new_size); + tree = CordRepSubstring::Substring(tree, pos, new_size); } sub_cord.contents_.EmplaceTree(tree, contents_.data_, CordzUpdateTracker::kSubCord); return sub_cord; } -// -------------------------------------------------------------------- -// Balancing - -class CordForest { - public: - explicit CordForest(size_t length) - : root_length_(length), trees_(kMinLengthSize, nullptr) {} - - void Build(CordRep* cord_root) { - std::vector pending = {cord_root}; - - while (!pending.empty()) { - CordRep* node = pending.back(); - pending.pop_back(); - CheckNode(node); - if (ABSL_PREDICT_FALSE(!node->IsConcat())) { - AddNode(node); - continue; - } - - CordRepConcat* concat_node = node->concat(); - if (concat_node->depth() >= kMinLengthSize || - concat_node->length < min_length[concat_node->depth()]) { - pending.push_back(concat_node->right); - pending.push_back(concat_node->left); - - if (concat_node->refcount.IsOne()) { - concat_node->left = concat_freelist_; - concat_freelist_ = concat_node; - } else { - CordRep::Ref(concat_node->right); - CordRep::Ref(concat_node->left); - CordRep::Unref(concat_node); - } - } else { - AddNode(node); - } - } - } - - CordRep* ConcatNodes() { - CordRep* sum = nullptr; - for (auto* node : trees_) { - if (node == nullptr) continue; - - sum = PrependNode(node, sum); - root_length_ -= node->length; - if (root_length_ == 0) break; - } - ABSL_INTERNAL_CHECK(sum != nullptr, "Failed to locate sum node"); - return VerifyTree(sum); - } - - private: - CordRep* AppendNode(CordRep* node, CordRep* sum) { - return (sum == nullptr) ? node : MakeConcat(sum, node); - } - - CordRep* PrependNode(CordRep* node, CordRep* sum) { - return (sum == nullptr) ? node : MakeConcat(node, sum); - } - - void AddNode(CordRep* node) { - CordRep* sum = nullptr; - - // Collect together everything with which we will merge with node - int i = 0; - for (; node->length > min_length[i + 1]; ++i) { - auto& tree_at_i = trees_[i]; - - if (tree_at_i == nullptr) continue; - sum = PrependNode(tree_at_i, sum); - tree_at_i = nullptr; - } - - sum = AppendNode(node, sum); - - // Insert sum into appropriate place in the forest - for (; sum->length >= min_length[i]; ++i) { - auto& tree_at_i = trees_[i]; - if (tree_at_i == nullptr) continue; - - sum = MakeConcat(tree_at_i, sum); - tree_at_i = nullptr; - } - - // min_length[0] == 1, which means sum->length >= min_length[0] - assert(i > 0); - trees_[i - 1] = sum; - } - - // Make concat node trying to resue existing CordRepConcat nodes we - // already collected in the concat_freelist_. - CordRep* MakeConcat(CordRep* left, CordRep* right) { - if (concat_freelist_ == nullptr) return RawConcat(left, right); - - CordRepConcat* rep = concat_freelist_; - if (concat_freelist_->left == nullptr) { - concat_freelist_ = nullptr; - } else { - concat_freelist_ = concat_freelist_->left->concat(); - } - SetConcatChildren(rep, left, right); - - return rep; - } - - static void CheckNode(CordRep* node) { - ABSL_INTERNAL_CHECK(node->length != 0u, ""); - if (node->IsConcat()) { - ABSL_INTERNAL_CHECK(node->concat()->left != nullptr, ""); - ABSL_INTERNAL_CHECK(node->concat()->right != nullptr, ""); - ABSL_INTERNAL_CHECK(node->length == (node->concat()->left->length + - node->concat()->right->length), - ""); - } - } - - size_t root_length_; - - // use an inlined vector instead of a flat array to get bounds checking - absl::InlinedVector trees_; - - // List of concat nodes we can re-use for Cord balancing. - CordRepConcat* concat_freelist_ = nullptr; -}; - -static CordRep* Rebalance(CordRep* node) { - VerifyTree(node); - assert(node->IsConcat()); - - if (node->length == 0) { - return nullptr; - } - - CordForest forest(node->length); - forest.Build(node); - return forest.ConcatNodes(); -} - // -------------------------------------------------------------------- // Comparators @@ -1258,7 +817,7 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const { return absl::string_view(data_.as_chars(), data_.inline_size()); } - CordRep* node = tree(); + CordRep* node = cord_internal::SkipCrcNode(tree()); if (node->IsFlat()) { return absl::string_view(node->flat()->Data(), node->length); } @@ -1276,11 +835,6 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const { return tree->Data(tree->begin()); } - // Walk down the left branches until we hit a non-CONCAT node. - while (node->IsConcat()) { - node = node->concat()->left; - } - // Get the child node if we encounter a SUBSTRING. size_t offset = 0; size_t length = node->length; @@ -1300,6 +854,30 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const { return absl::string_view(node->external()->base + offset, length); } +void Cord::SetExpectedChecksum(uint32_t crc) { + auto constexpr method = CordzUpdateTracker::kSetExpectedChecksum; + if (empty()) { + contents_.MaybeRemoveEmptyCrcNode(); + CordRep* rep = CordRepCrc::New(nullptr, crc); + contents_.EmplaceTree(rep, method); + } else if (!contents_.is_tree()) { + CordRep* rep = contents_.MakeFlatWithExtraCapacity(0); + rep = CordRepCrc::New(rep, crc); + contents_.EmplaceTree(rep, method); + } else { + const CordzUpdateScope scope(contents_.data_.cordz_info(), method); + CordRep* rep = CordRepCrc::New(contents_.data_.as_tree(), crc); + contents_.SetTree(rep, scope); + } +} + +absl::optional Cord::ExpectedChecksum() const { + if (!contents_.is_tree() || !contents_.tree()->IsCrc()) { + return absl::nullopt; + } + return contents_.tree()->crc()->crc; +} + inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size, size_t size_to_compare) const { auto advance = [](Cord::ChunkIterator* it, absl::string_view* chunk) { @@ -1365,6 +943,7 @@ inline int Cord::CompareSlowPath(const Cord& rhs, size_t compared_size, } inline absl::string_view Cord::GetFirstChunk(const Cord& c) { + if (c.empty()) return {}; return c.contents_.FindFlatStartPiece(); } inline absl::string_view Cord::GetFirstChunk(absl::string_view sv) { @@ -1475,42 +1054,6 @@ void Cord::CopyToArraySlowPath(char* dst) const { } } -Cord::ChunkIterator& Cord::ChunkIterator::AdvanceStack() { - auto& stack_of_right_children = stack_of_right_children_; - if (stack_of_right_children.empty()) { - assert(!current_chunk_.empty()); // Called on invalid iterator. - // We have reached the end of the Cord. - return *this; - } - - // Process the next node on the stack. - CordRep* node = stack_of_right_children.back(); - stack_of_right_children.pop_back(); - - // Walk down the left branches until we hit a non-CONCAT node. Save the - // right children to the stack for subsequent traversal. - while (node->IsConcat()) { - stack_of_right_children.push_back(node->concat()->right); - node = node->concat()->left; - } - - // Get the child node if we encounter a SUBSTRING. - size_t offset = 0; - size_t length = node->length; - if (node->IsSubstring()) { - offset = node->substring()->start; - node = node->substring()->child; - } - - assert(node->IsExternal() || node->IsFlat()); - assert(length != 0); - const char* data = - node->IsExternal() ? node->external()->base : node->flat()->Data(); - current_chunk_ = absl::string_view(data + offset, length); - current_leaf_ = node; - return *this; -} - Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) { ABSL_HARDENING_ASSERT(bytes_remaining_ >= n && "Attempted to iterate past `end()`"); @@ -1553,166 +1096,33 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) { return subcord; } - auto& stack_of_right_children = stack_of_right_children_; - if (n < current_chunk_.size()) { - // Range to read is a proper subrange of the current chunk. - assert(current_leaf_ != nullptr); - CordRep* subnode = CordRep::Ref(current_leaf_); - const char* data = subnode->IsExternal() ? subnode->external()->base - : subnode->flat()->Data(); - subnode = NewSubstring(subnode, current_chunk_.data() - data, n); - subcord.contents_.EmplaceTree(VerifyTree(subnode), method); - RemoveChunkPrefix(n); - return subcord; - } - - // Range to read begins with a proper subrange of the current chunk. - assert(!current_chunk_.empty()); + // Short circuit if reading the entire data edge. assert(current_leaf_ != nullptr); - CordRep* subnode = CordRep::Ref(current_leaf_); - if (current_chunk_.size() < subnode->length) { - const char* data = subnode->IsExternal() ? subnode->external()->base - : subnode->flat()->Data(); - subnode = NewSubstring(subnode, current_chunk_.data() - data, - current_chunk_.size()); - } - n -= current_chunk_.size(); - bytes_remaining_ -= current_chunk_.size(); - - // Process the next node(s) on the stack, reading whole subtrees depending on - // their length and how many bytes we are advancing. - CordRep* node = nullptr; - while (!stack_of_right_children.empty()) { - node = stack_of_right_children.back(); - stack_of_right_children.pop_back(); - if (node->length > n) break; - // TODO(qrczak): This might unnecessarily recreate existing concat nodes. - // Avoiding that would need pretty complicated logic (instead of - // current_leaf, keep current_subtree_ which points to the highest node - // such that the current leaf can be found on the path of left children - // starting from current_subtree_; delay creating subnode while node is - // below current_subtree_; find the proper node along the path of left - // children starting from current_subtree_ if this loop exits while staying - // below current_subtree_; etc.; alternatively, push parents instead of - // right children on the stack). - subnode = Concat(subnode, CordRep::Ref(node)); - n -= node->length; - bytes_remaining_ -= node->length; - node = nullptr; - } - - if (node == nullptr) { - // We have reached the end of the Cord. - assert(bytes_remaining_ == 0); - subcord.contents_.EmplaceTree(VerifyTree(subnode), method); + if (n == current_leaf_->length) { + bytes_remaining_ = 0; + current_chunk_ = {}; + CordRep* tree = CordRep::Ref(current_leaf_); + subcord.contents_.EmplaceTree(VerifyTree(tree), method); return subcord; } - // Walk down the appropriate branches until we hit a non-CONCAT node. Save the - // right children to the stack for subsequent traversal. - while (node->IsConcat()) { - if (node->concat()->left->length > n) { - // Push right, descend left. - stack_of_right_children.push_back(node->concat()->right); - node = node->concat()->left; - } else { - // Read left, descend right. - subnode = Concat(subnode, CordRep::Ref(node->concat()->left)); - n -= node->concat()->left->length; - bytes_remaining_ -= node->concat()->left->length; - node = node->concat()->right; - } - } + // From this point on, we need a partial substring node. + // Get pointer to the underlying flat or external data payload and + // compute data pointer and offset into current flat or external. + CordRep* payload = current_leaf_->IsSubstring() + ? current_leaf_->substring()->child + : current_leaf_; + const char* data = payload->IsExternal() ? payload->external()->base + : payload->flat()->Data(); + const size_t offset = static_cast(current_chunk_.data() - data); - // Get the child node if we encounter a SUBSTRING. - size_t offset = 0; - size_t length = node->length; - if (node->IsSubstring()) { - offset = node->substring()->start; - node = node->substring()->child; - } - - // Range to read ends with a proper (possibly empty) subrange of the current - // chunk. - assert(node->IsExternal() || node->IsFlat()); - assert(length > n); - if (n > 0) { - subnode = Concat(subnode, NewSubstring(CordRep::Ref(node), offset, n)); - } - const char* data = - node->IsExternal() ? node->external()->base : node->flat()->Data(); - current_chunk_ = absl::string_view(data + offset + n, length - n); - current_leaf_ = node; + auto* tree = CordRepSubstring::Substring(payload, offset, n); + subcord.contents_.EmplaceTree(VerifyTree(tree), method); bytes_remaining_ -= n; - subcord.contents_.EmplaceTree(VerifyTree(subnode), method); + current_chunk_.remove_prefix(n); return subcord; } -void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) { - assert(bytes_remaining_ >= n && "Attempted to iterate past `end()`"); - assert(n >= current_chunk_.size()); // This should only be called when - // iterating to a new node. - - n -= current_chunk_.size(); - bytes_remaining_ -= current_chunk_.size(); - - if (stack_of_right_children_.empty()) { - // We have reached the end of the Cord. - assert(bytes_remaining_ == 0); - return; - } - - // Process the next node(s) on the stack, skipping whole subtrees depending on - // their length and how many bytes we are advancing. - CordRep* node = nullptr; - auto& stack_of_right_children = stack_of_right_children_; - while (!stack_of_right_children.empty()) { - node = stack_of_right_children.back(); - stack_of_right_children.pop_back(); - if (node->length > n) break; - n -= node->length; - bytes_remaining_ -= node->length; - node = nullptr; - } - - if (node == nullptr) { - // We have reached the end of the Cord. - assert(bytes_remaining_ == 0); - return; - } - - // Walk down the appropriate branches until we hit a non-CONCAT node. Save the - // right children to the stack for subsequent traversal. - while (node->IsConcat()) { - if (node->concat()->left->length > n) { - // Push right, descend left. - stack_of_right_children.push_back(node->concat()->right); - node = node->concat()->left; - } else { - // Skip left, descend right. - n -= node->concat()->left->length; - bytes_remaining_ -= node->concat()->left->length; - node = node->concat()->right; - } - } - - // Get the child node if we encounter a SUBSTRING. - size_t offset = 0; - size_t length = node->length; - if (node->IsSubstring()) { - offset = node->substring()->start; - node = node->substring()->child; - } - - assert(node->IsExternal() || node->IsFlat()); - assert(length > n); - const char* data = - node->IsExternal() ? node->external()->base : node->flat()->Data(); - current_chunk_ = absl::string_view(data + offset + n, length - n); - current_leaf_ = node; - bytes_remaining_ -= n; -} - char Cord::operator[](size_t i) const { ABSL_HARDENING_ASSERT(i < size()); size_t offset = i; @@ -1720,6 +1130,7 @@ char Cord::operator[](size_t i) const { if (rep == nullptr) { return contents_.data()[i]; } + rep = cord_internal::SkipCrcNode(rep); while (true) { assert(rep != nullptr); assert(offset < rep->length); @@ -1731,16 +1142,6 @@ char Cord::operator[](size_t i) const { } else if (rep->IsExternal()) { // Get the "i"th character from the external array. return rep->external()->base[offset]; - } else if (rep->IsConcat()) { - // Recursively branch to the side of the concatenation that the "i"th - // character is on. - size_t left_length = rep->concat()->left->length; - if (offset < left_length) { - rep = rep->concat()->left; - } else { - offset -= left_length; - rep = rep->concat()->right; - } } else { // This must be a substring a node, so bypass it to get to the child. assert(rep->IsSubstring()); @@ -1780,6 +1181,11 @@ absl::string_view Cord::FlattenSlowPath() { /* static */ bool Cord::GetFlatAux(CordRep* rep, absl::string_view* fragment) { assert(rep != nullptr); + if (rep->length == 0) { + *fragment = absl::string_view(); + return true; + } + rep = cord_internal::SkipCrcNode(rep); if (rep->IsFlat()) { *fragment = absl::string_view(rep->flat()->Data(), rep->length); return true; @@ -1809,6 +1215,10 @@ absl::string_view Cord::FlattenSlowPath() { /* static */ void Cord::ForEachChunkAux( absl::cord_internal::CordRep* rep, absl::FunctionRef callback) { + assert(rep != nullptr); + if (rep->length == 0) return; + rep = cord_internal::SkipCrcNode(rep); + if (rep->IsBtree()) { ChunkIterator it(rep), end; while (it != end) { @@ -1818,44 +1228,13 @@ absl::string_view Cord::FlattenSlowPath() { return; } - assert(rep != nullptr); - int stack_pos = 0; - constexpr int stack_max = 128; - // Stack of right branches for tree traversal - absl::cord_internal::CordRep* stack[stack_max]; - absl::cord_internal::CordRep* current_node = rep; - while (true) { - if (current_node->IsConcat()) { - if (stack_pos == stack_max) { - // There's no more room on our stack array to add another right branch, - // and the idea is to avoid allocations, so call this function - // recursively to navigate this subtree further. (This is not something - // we expect to happen in practice). - ForEachChunkAux(current_node, callback); - - // Pop the next right branch and iterate. - current_node = stack[--stack_pos]; - continue; - } else { - // Save the right branch for later traversal and continue down the left - // branch. - stack[stack_pos++] = current_node->concat()->right; - current_node = current_node->concat()->left; - continue; - } - } - // This is a leaf node, so invoke our callback. - absl::string_view chunk; - bool success = GetFlatAux(current_node, &chunk); - assert(success); - if (success) { - callback(chunk); - } - if (stack_pos == 0) { - // end of traversal - return; - } - current_node = stack[--stack_pos]; + // This is a leaf node, so invoke our callback. + absl::cord_internal::CordRep* current_node = cord_internal::SkipCrcNode(rep); + absl::string_view chunk; + bool success = GetFlatAux(current_node, &chunk); + assert(success); + if (success) { + callback(chunk); } } @@ -1870,19 +1249,21 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os, *os << " ["; if (include_data) *os << static_cast(rep); *os << "]"; - *os << " " << (IsRootBalanced(rep) ? 'b' : 'u'); *os << " " << std::setw(indent) << ""; - if (rep->IsConcat()) { - *os << "CONCAT depth=" << Depth(rep) << "\n"; + bool leaf = false; + if (rep == nullptr) { + *os << "NULL\n"; + leaf = true; + } else if (rep->IsCrc()) { + *os << "CRC crc=" << rep->crc()->crc << "\n"; indent += kIndentStep; - indents.push_back(indent); - stack.push_back(rep->concat()->right); - rep = rep->concat()->left; + rep = rep->crc()->child; } else if (rep->IsSubstring()) { *os << "SUBSTRING @ " << rep->substring()->start << "\n"; indent += kIndentStep; rep = rep->substring()->child; } else { // Leaf or ring + leaf = true; if (rep->IsExternal()) { *os << "EXTERNAL ["; if (include_data) @@ -1896,6 +1277,8 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os, } else { CordRepBtree::Dump(rep, /*label=*/ "", include_data, *os); } + } + if (leaf) { if (stack.empty()) break; rep = stack.back(); stack.pop_back(); @@ -1914,7 +1297,7 @@ static std::string ReportError(CordRep* root, CordRep* node) { } static bool VerifyNode(CordRep* root, CordRep* start_node, - bool full_validation) { + bool /* full_validation */) { absl::InlinedVector worklist; worklist.push_back(start_node); do { @@ -1924,21 +1307,10 @@ static bool VerifyNode(CordRep* root, CordRep* start_node, ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node)); if (node != root) { ABSL_INTERNAL_CHECK(node->length != 0, ReportError(root, node)); + ABSL_INTERNAL_CHECK(!node->IsCrc(), ReportError(root, node)); } - if (node->IsConcat()) { - ABSL_INTERNAL_CHECK(node->concat()->left != nullptr, - ReportError(root, node)); - ABSL_INTERNAL_CHECK(node->concat()->right != nullptr, - ReportError(root, node)); - ABSL_INTERNAL_CHECK((node->length == node->concat()->left->length + - node->concat()->right->length), - ReportError(root, node)); - if (full_validation) { - worklist.push_back(node->concat()->right); - worklist.push_back(node->concat()->left); - } - } else if (node->IsFlat()) { + if (node->IsFlat()) { ABSL_INTERNAL_CHECK(node->length <= node->flat()->Capacity(), ReportError(root, node)); } else if (node->IsExternal()) { @@ -1951,78 +1323,23 @@ static bool VerifyNode(CordRep* root, CordRep* start_node, ABSL_INTERNAL_CHECK(node->substring()->start + node->length <= node->substring()->child->length, ReportError(root, node)); + } else if (node->IsCrc()) { + ABSL_INTERNAL_CHECK( + node->crc()->child != nullptr || node->crc()->length == 0, + ReportError(root, node)); + if (node->crc()->child != nullptr) { + ABSL_INTERNAL_CHECK(node->crc()->length == node->crc()->child->length, + ReportError(root, node)); + worklist.push_back(node->crc()->child); + } } } while (!worklist.empty()); return true; } -// Traverses the tree and computes the total memory allocated. -/* static */ size_t Cord::MemoryUsageAux(const CordRep* rep) { - size_t total_mem_usage = 0; - - // Allow a quick exit for the common case that the root is a leaf. - if (RepMemoryUsageLeaf(rep, &total_mem_usage)) { - return total_mem_usage; - } - - // Iterate over the tree. cur_node is never a leaf node and leaf nodes will - // never be appended to tree_stack. This reduces overhead from manipulating - // tree_stack. - absl::InlinedVector tree_stack; - const CordRep* cur_node = rep; - while (true) { - const CordRep* next_node = nullptr; - - if (cur_node->IsConcat()) { - total_mem_usage += sizeof(CordRepConcat); - const CordRep* left = cur_node->concat()->left; - if (!RepMemoryUsageLeaf(left, &total_mem_usage)) { - next_node = left; - } - - const CordRep* right = cur_node->concat()->right; - if (!RepMemoryUsageLeaf(right, &total_mem_usage)) { - if (next_node) { - tree_stack.push_back(next_node); - } - next_node = right; - } - } else if (cur_node->IsBtree()) { - total_mem_usage += sizeof(CordRepBtree); - const CordRepBtree* node = cur_node->btree(); - if (node->height() == 0) { - for (const CordRep* edge : node->Edges()) { - RepMemoryUsageDataEdge(edge, &total_mem_usage); - } - } else { - for (const CordRep* edge : node->Edges()) { - tree_stack.push_back(edge); - } - } - } else { - // Since cur_node is not a leaf or a concat node it must be a substring. - assert(cur_node->IsSubstring()); - total_mem_usage += sizeof(CordRepSubstring); - next_node = cur_node->substring()->child; - if (RepMemoryUsageLeaf(next_node, &total_mem_usage)) { - next_node = nullptr; - } - } - - if (!next_node) { - if (tree_stack.empty()) { - return total_mem_usage; - } - next_node = tree_stack.back(); - tree_stack.pop_back(); - } - cur_node = next_node; - } -} - std::ostream& operator<<(std::ostream& out, const Cord& cord) { for (absl::string_view chunk : cord.Chunks()) { - out.write(chunk.data(), chunk.size()); + out.write(chunk.data(), static_cast(chunk.size())); } return out; } @@ -2037,7 +1354,6 @@ uint8_t CordTestAccess::LengthToTag(size_t s) { ABSL_INTERNAL_CHECK(s <= kMaxFlatLength, absl::StrCat("Invalid length ", s)); return cord_internal::AllocatedSizeToTag(s + cord_internal::kFlatOverhead); } -size_t CordTestAccess::SizeofCordRepConcat() { return sizeof(CordRepConcat); } size_t CordTestAccess::SizeofCordRepExternal() { return sizeof(CordRepExternal); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord.h index ac1832f018..6e3da89e62 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord.h @@ -20,8 +20,7 @@ // structure. A Cord is a string-like sequence of characters optimized for // specific use cases. Unlike a `std::string`, which stores an array of // contiguous characters, Cord data is stored in a structure consisting of -// separate, reference-counted "chunks." (Currently, this implementation is a -// tree structure, though that implementation may change.) +// separate, reference-counted "chunks." // // Because a Cord consists of these chunks, data can be added to or removed from // a Cord during its lifetime. Chunks may also be shared between Cords. Unlike a @@ -70,6 +69,7 @@ #include #include +#include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/per_thread_tls.h" @@ -78,9 +78,13 @@ #include "absl/container/inlined_vector.h" #include "absl/functional/function_ref.h" #include "absl/meta/type_traits.h" +#include "absl/strings/cord_analysis.h" +#include "absl/strings/cord_buffer.h" +#include "absl/strings/internal/cord_data_edge.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_btree.h" #include "absl/strings/internal/cord_rep_btree_reader.h" +#include "absl/strings/internal/cord_rep_crc.h" #include "absl/strings/internal/cord_rep_ring.h" #include "absl/strings/internal/cordz_functions.h" #include "absl/strings/internal/cordz_info.h" @@ -100,6 +104,20 @@ template Cord MakeCordFromExternal(absl::string_view, Releaser&&); void CopyCordToString(const Cord& src, std::string* dst); +// Cord memory accounting modes +enum class CordMemoryAccounting { + // Counts the *approximate* number of bytes held in full or in part by this + // Cord (which may not remain the same between invocations). Cords that share + // memory could each be "charged" independently for the same shared memory. + kTotal, + + // Counts the *approximate* number of bytes held in full or in part by this + // Cord weighted by the sharing ratio of that data. For example, if some data + // edge is shared by 4 different Cords, then each cord is attributed 1/4th of + // the total memory usage as a 'fair share' of the total memory usage. + kFairShare, +}; + // Cord // // A Cord is a sequence of characters, designed to be more efficient than a @@ -214,7 +232,7 @@ class Cord { // // Releases the Cord data. Any nodes that share data with other Cords, if // applicable, will have their reference counts reduced by 1. - void Clear(); + ABSL_ATTRIBUTE_REINITIALIZES void Clear(); // Cord::Append() // @@ -226,6 +244,58 @@ class Cord { template = 0> void Append(T&& src); + // Appends `buffer` to this cord, unless `buffer` has a zero length in which + // case this method has no effect on this cord instance. + // This method is guaranteed to consume `buffer`. + void Append(CordBuffer buffer); + + // Returns a CordBuffer, re-using potential existing capacity in this cord. + // + // Cord instances may have additional unused capacity in the last (or first) + // nodes of the underlying tree to facilitate amortized growth. This method + // allows applications to explicitly use this spare capacity if available, + // or create a new CordBuffer instance otherwise. + // If this cord has a final non-shared node with at least `min_capacity` + // available, then this method will return that buffer including its data + // contents. I.e.; the returned buffer will have a non-zero length, and + // a capacity of at least `buffer.length + min_capacity`. Otherwise, this + // method will return `CordBuffer::CreateWithDefaultLimit(capacity)`. + // + // Below an example of using GetAppendBuffer. Notice that in this example we + // use `GetAppendBuffer()` only on the first iteration. As we know nothing + // about any initial extra capacity in `cord`, we may be able to use the extra + // capacity. But as we add new buffers with fully utilized contents after that + // we avoid calling `GetAppendBuffer()` on subsequent iterations: while this + // works fine, it results in an unnecessary inspection of cord contents: + // + // void AppendRandomDataToCord(absl::Cord &cord, size_t n) { + // bool first = true; + // while (n > 0) { + // CordBuffer buffer = first ? cord.GetAppendBuffer(n) + // : CordBuffer::CreateWithDefaultLimit(n); + // absl::Span data = buffer.available_up_to(n); + // FillRandomValues(data.data(), data.size()); + // buffer.IncreaseLengthBy(data.size()); + // cord.Append(std::move(buffer)); + // n -= data.size(); + // first = false; + // } + // } + CordBuffer GetAppendBuffer(size_t capacity, size_t min_capacity = 16); + + // Returns a CordBuffer, re-using potential existing capacity in this cord. + // + // This function is identical to `GetAppendBuffer`, except that in the case + // where a new `CordBuffer` is allocated, it is allocated using the provided + // custom limit instead of the default limit. `GetAppendBuffer` will default + // to `CordBuffer::CreateWithDefaultLimit(capacity)` whereas this method + // will default to `CordBuffer::CreateWithCustomLimit(block_size, capacity)`. + // This method is equivalent to `GetAppendBuffer` if `block_size` is zero. + // See the documentation for `CreateWithCustomLimit` for more details on the + // restrictions and legal values for `block_size`. + CordBuffer GetCustomAppendBuffer(size_t block_size, size_t capacity, + size_t min_capacity = 16); + // Cord::Prepend() // // Prepends data to the Cord, which may come from another Cord or other string @@ -235,6 +305,11 @@ class Cord { template = 0> void Prepend(T&& src); + // Prepends `buffer` to this cord, unless `buffer` has a zero length in which + // case this method has no effect on this cord instance. + // This method is guaranteed to consume `buffer`. + void Prepend(CordBuffer buffer); + // Cord::RemovePrefix() // // Removes the first `n` bytes of a Cord. @@ -256,9 +331,7 @@ class Cord { // swap() // // Swaps the contents of two Cords. - friend void swap(Cord& x, Cord& y) noexcept { - x.swap(y); - } + friend void swap(Cord& x, Cord& y) noexcept { x.swap(y); } // Cord::size() // @@ -272,11 +345,10 @@ class Cord { // Cord::EstimatedMemoryUsage() // - // Returns the *approximate* number of bytes held in full or in part by this - // Cord (which may not remain the same between invocations). Note that Cords - // that share memory could each be "charged" independently for the same shared - // memory. - size_t EstimatedMemoryUsage() const; + // Returns the *approximate* number of bytes held by this cord. + // See CordMemoryAccounting for more information on the accounting method. + size_t EstimatedMemoryUsage(CordMemoryAccounting accounting_method = + CordMemoryAccounting::kTotal) const; // Cord::Compare() // @@ -326,7 +398,7 @@ class Cord { //---------------------------------------------------------------------------- // // A `Cord::ChunkIterator` allows iteration over the constituent chunks of its - // Cord. Such iteration allows you to perform non-const operatons on the data + // Cord. Such iteration allows you to perform non-const operations on the data // of a Cord without modifying it. // // Generally, you do not instantiate a `Cord::ChunkIterator` directly; @@ -374,12 +446,6 @@ class Cord { using CordRepBtree = absl::cord_internal::CordRepBtree; using CordRepBtreeReader = absl::cord_internal::CordRepBtreeReader; - // Stack of right children of concat nodes that we have to visit. - // Keep this at the end of the structure to avoid cache-thrashing. - // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for - // the inlined vector size (47 exists for backward compatibility). - using Stack = absl::InlinedVector; - // Constructs a `begin()` iterator from `tree`. `tree` must not be null. explicit ChunkIterator(cord_internal::CordRep* tree); @@ -395,17 +461,10 @@ class Cord { Cord AdvanceAndReadBytes(size_t n); void AdvanceBytes(size_t n); - // Stack specific operator++ - ChunkIterator& AdvanceStack(); - // Btree specific operator++ ChunkIterator& AdvanceBtree(); void AdvanceBytesBtree(size_t n); - // Iterates `n` bytes, where `n` is expected to be greater than or equal to - // `current_chunk_.size()`. - void AdvanceBytesSlowPath(size_t n); - // A view into bytes of the current `CordRep`. It may only be a view to a // suffix of bytes if this is being used by `CharIterator`. absl::string_view current_chunk_; @@ -418,12 +477,9 @@ class Cord { // Cord reader for cord btrees. Empty if not traversing a btree. CordRepBtreeReader btree_reader_; - - // See 'Stack' alias definition. - Stack stack_of_right_children_; }; - // Cord::ChunkIterator::chunk_begin() + // Cord::chunk_begin() // // Returns an iterator to the first chunk of the `Cord`. // @@ -439,7 +495,7 @@ class Cord { // } ChunkIterator chunk_begin() const; - // Cord::ChunkItertator::chunk_end() + // Cord::chunk_end() // // Returns an iterator one increment past the last chunk of the `Cord`. // @@ -449,7 +505,7 @@ class Cord { ChunkIterator chunk_end() const; //---------------------------------------------------------------------------- - // Cord::ChunkIterator::ChunkRange + // Cord::ChunkRange //---------------------------------------------------------------------------- // // `ChunkRange` is a helper class for iterating over the chunks of the `Cord`, @@ -462,6 +518,16 @@ class Cord { // `Cord::chunk_begin()` and `Cord::chunk_end()`. class ChunkRange { public: + // Fulfill minimum c++ container requirements [container.requirements] + // These (partial) container type definitions allow ChunkRange to be used + // in various utilities expecting a subset of [container.requirements]. + // For example, the below enables using `::testing::ElementsAre(...)` + using value_type = absl::string_view; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = ChunkIterator; + using const_iterator = ChunkIterator; + explicit ChunkRange(const Cord* cord) : cord_(cord) {} ChunkIterator begin() const; @@ -473,9 +539,9 @@ class Cord { // Cord::Chunks() // - // Returns a `Cord::ChunkIterator::ChunkRange` for iterating over the chunks - // of a `Cord` with a range-based for-loop. For most iteration tasks on a - // Cord, use `Cord::Chunks()` to retrieve this iterator. + // Returns a `Cord::ChunkRange` for iterating over the chunks of a `Cord` with + // a range-based for-loop. For most iteration tasks on a Cord, use + // `Cord::Chunks()` to retrieve this iterator. // // Example: // @@ -541,7 +607,7 @@ class Cord { ChunkIterator chunk_iterator_; }; - // Cord::CharIterator::AdvanceAndRead() + // Cord::AdvanceAndRead() // // Advances the `Cord::CharIterator` by `n_bytes` and returns the bytes // advanced as a separate `Cord`. `n_bytes` must be less than or equal to the @@ -549,21 +615,21 @@ class Cord { // valid to pass `char_end()` and `0`. static Cord AdvanceAndRead(CharIterator* it, size_t n_bytes); - // Cord::CharIterator::Advance() + // Cord::Advance() // // Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than // or equal to the number of bytes remaining within the Cord; otherwise, // behavior is undefined. It is valid to pass `char_end()` and `0`. static void Advance(CharIterator* it, size_t n_bytes); - // Cord::CharIterator::ChunkRemaining() + // Cord::ChunkRemaining() // // Returns the longest contiguous view starting at the iterator's position. // // `it` must be dereferenceable. static absl::string_view ChunkRemaining(const CharIterator& it); - // Cord::CharIterator::char_begin() + // Cord::char_begin() // // Returns an iterator to the first character of the `Cord`. // @@ -572,7 +638,7 @@ class Cord { // a `CharIterator` where range-based for-loops may not be available. CharIterator char_begin() const; - // Cord::CharIterator::char_end() + // Cord::char_end() // // Returns an iterator to one past the last character of the `Cord`. // @@ -581,18 +647,28 @@ class Cord { // a `CharIterator` where range-based for-loops are not useful. CharIterator char_end() const; - // Cord::CharIterator::CharRange + // Cord::CharRange // // `CharRange` is a helper class for iterating over the characters of a // producing an iterator which can be used within a range-based for loop. // Construction of a `CharRange` will return an iterator pointing to the first // character of the Cord. Generally, do not construct a `CharRange` directly; - // instead, prefer to use the `Cord::Chars()` method show below. + // instead, prefer to use the `Cord::Chars()` method shown below. // // Implementation note: `CharRange` is simply a convenience wrapper over // `Cord::char_begin()` and `Cord::char_end()`. class CharRange { public: + // Fulfill minimum c++ container requirements [container.requirements] + // Theses (partial) container type definitions allow CharRange to be used + // in various utilities expecting a subset of [container.requirements]. + // For example, the below enables using `::testing::ElementsAre(...)` + using value_type = char; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = CharIterator; + using const_iterator = CharIterator; + explicit CharRange(const Cord* cord) : cord_(cord) {} CharIterator begin() const; @@ -602,11 +678,11 @@ class Cord { const Cord* cord_; }; - // Cord::CharIterator::Chars() + // Cord::Chars() // - // Returns a `Cord::CharIterator` for iterating over the characters of a - // `Cord` with a range-based for-loop. For most character-based iteration - // tasks on a Cord, use `Cord::Chars()` to retrieve this iterator. + // Returns a `Cord::CharRange` for iterating over the characters of a `Cord` + // with a range-based for-loop. For most character-based iteration tasks on a + // Cord, use `Cord::Chars()` to retrieve this iterator. // // Example: // @@ -653,6 +729,29 @@ class Cord { cord->Append(part); } + // Cord::SetExpectedChecksum() + // + // Stores a checksum value with this non-empty cord instance, for later + // retrieval. + // + // The expected checksum is a number stored out-of-band, alongside the data. + // It is preserved across copies and assignments, but any mutations to a cord + // will cause it to lose its expected checksum. + // + // The expected checksum is not part of a Cord's value, and does not affect + // operations such as equality or hashing. + // + // This field is intended to store a CRC32C checksum for later validation, to + // help support end-to-end checksum workflows. However, the Cord API itself + // does no CRC validation, and assigns no meaning to this number. + // + // This call has no effect if this cord is empty. + void SetExpectedChecksum(uint32_t crc); + + // Returns this cord's expected checksum, if it has one. Otherwise, returns + // nullopt. + absl::optional ExpectedChecksum() const; + template friend H AbslHashValue(H hash_state, const absl::Cord& c) { absl::optional maybe_flat = c.TryFlat(); @@ -668,7 +767,8 @@ class Cord { // be used by spelling absl::strings_internal::MakeStringConstant, which is // also an internal API. template - explicit constexpr Cord(strings_internal::StringConstant); + // NOLINTNEXTLINE(google-explicit-constructor) + constexpr Cord(strings_internal::StringConstant); private: using CordRep = absl::cord_internal::CordRep; @@ -720,16 +820,16 @@ class Cord { bool empty() const; size_t size() const; const char* data() const; // Returns nullptr if holding pointer - void set_data(const char* data, size_t n, - bool nullify_tail); // Discards pointer, if any - char* set_data(size_t n); // Write data to the result + void set_data(const char* data, size_t n); // Discards pointer, if any + char* set_data(size_t n); // Write data to the result // Returns nullptr if holding bytes absl::cord_internal::CordRep* tree() const; absl::cord_internal::CordRep* as_tree() const; + const char* as_chars() const; // Returns non-null iff was holding a pointer absl::cord_internal::CordRep* clear(); // Converts to pointer if necessary. - void reduce_size(size_t n); // REQUIRES: holding data + void reduce_size(size_t n); // REQUIRES: holding data void remove_prefix(size_t n); // REQUIRES: holding data void AppendArray(absl::string_view src, MethodIdentifier method); absl::string_view FindFlatStartPiece() const; @@ -773,33 +873,15 @@ class Cord { void PrependTreeToTree(CordRep* tree, MethodIdentifier method); void PrependTree(CordRep* tree, MethodIdentifier method); - template - void GetAppendRegion(char** region, size_t* size, size_t length); - bool IsSame(const InlineRep& other) const { return memcmp(&data_, &other.data_, sizeof(data_)) == 0; } - int BitwiseCompare(const InlineRep& other) const { - uint64_t x, y; - // Use memcpy to avoid aliasing issues. - memcpy(&x, &data_, sizeof(x)); - memcpy(&y, &other.data_, sizeof(y)); - if (x == y) { - memcpy(&x, reinterpret_cast(&data_) + 8, sizeof(x)); - memcpy(&y, reinterpret_cast(&other.data_) + 8, sizeof(y)); - if (x == y) return 0; - } - return absl::big_endian::FromHost64(x) < absl::big_endian::FromHost64(y) - ? -1 - : 1; - } void CopyTo(std::string* dst) const { // memcpy is much faster when operating on a known size. On most supported // platforms, the small string optimization is large enough that resizing // to 15 bytes does not cause a memory allocation. - absl::strings_internal::STLStringResizeUninitialized(dst, - sizeof(data_) - 1); - memcpy(&(*dst)[0], &data_, sizeof(data_) - 1); + absl::strings_internal::STLStringResizeUninitialized(dst, kMaxInline); + memcpy(&(*dst)[0], data_.as_chars(), kMaxInline); // erase is faster than resize because the logic for memory allocation is // not needed. dst->erase(inline_size()); @@ -813,6 +895,11 @@ class Cord { // Returns true if the Cord is being profiled by cordz. bool is_profiled() const { return data_.is_tree() && data_.is_profiled(); } + // Returns the available inlined capacity, or 0 if is_tree() == true. + size_t remaining_inline_capacity() const { + return data_.is_tree() ? 0 : kMaxInline - data_.inline_size(); + } + // Returns the profiled CordzInfo, or nullptr if not sampled. absl::cord_internal::CordzInfo* cordz_info() const { return data_.cordz_info(); @@ -839,13 +926,17 @@ class Cord { void set_inline_size(size_t size) { data_.set_inline_size(size); } size_t inline_size() const { return data_.inline_size(); } + // Empty cords that carry a checksum have a CordRepCrc node with a null + // child node. The code can avoid lots of special cases where it would + // otherwise transition from tree to inline storage if we just remove the + // CordRepCrc node before mutations. Must never be called inside a + // CordzUpdateScope since it untracks the cordz info. + void MaybeRemoveEmptyCrcNode(); + cord_internal::InlineData data_; }; InlineRep contents_; - // Helper for MemoryUsage(). - static size_t MemoryUsageAux(const absl::cord_internal::CordRep* rep); - // Helper for GetFlat() and TryFlat(). static bool GetFlatAux(absl::cord_internal::CordRep* rep, absl::string_view* fragment); @@ -883,6 +974,20 @@ class Cord { template void AppendImpl(C&& src); + // Appends / Prepends `src` to this instance, using precise sizing. + // This method does explicitly not attempt to use any spare capacity + // in any pending last added private owned flat. + // Requires `src` to be <= kMaxFlatLength. + void AppendPrecise(absl::string_view src, MethodIdentifier method); + void PrependPrecise(absl::string_view src, MethodIdentifier method); + + CordBuffer GetAppendBufferSlowPath(size_t block_size, size_t capacity, + size_t min_capacity); + + // Prepends the provided data to this instance. `method` contains the public + // API method for this action which is tracked for Cordz sampling purposes. + void PrependArray(absl::string_view src, MethodIdentifier method); + // Assigns the value in 'src' to this instance, 'stealing' its contents. // Requires src.length() > kMaxBytesToCopy. Cord& AssignLargeString(std::string&& src); @@ -915,17 +1020,17 @@ namespace cord_internal { // Fast implementation of memmove for up to 15 bytes. This implementation is // safe for overlapping regions. If nullify_tail is true, the destination is -// padded with '\0' up to 16 bytes. -inline void SmallMemmove(char* dst, const char* src, size_t n, - bool nullify_tail = false) { +// padded with '\0' up to 15 bytes. +template +inline void SmallMemmove(char* dst, const char* src, size_t n) { if (n >= 8) { - assert(n <= 16); + assert(n <= 15); uint64_t buf1; uint64_t buf2; memcpy(&buf1, src, 8); memcpy(&buf2, src + n - 8, 8); if (nullify_tail) { - memset(dst + 8, 0, 8); + memset(dst + 7, 0, 8); } memcpy(dst, &buf1, 8); memcpy(dst + n - 8, &buf2, 8); @@ -936,7 +1041,7 @@ inline void SmallMemmove(char* dst, const char* src, size_t n, memcpy(&buf2, src + n - 4, 4); if (nullify_tail) { memset(dst + 4, 0, 4); - memset(dst + 8, 0, 8); + memset(dst + 7, 0, 8); } memcpy(dst, &buf1, 4); memcpy(dst + n - 4, &buf2, 4); @@ -947,29 +1052,23 @@ inline void SmallMemmove(char* dst, const char* src, size_t n, dst[n - 1] = src[n - 1]; } if (nullify_tail) { - memset(dst + 8, 0, 8); + memset(dst + 7, 0, 8); memset(dst + n, 0, 8); } } } // Does non-template-specific `CordRepExternal` initialization. -// Expects `data` to be non-empty. +// Requires `data` to be non-empty. void InitializeCordRepExternal(absl::string_view data, CordRepExternal* rep); // Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer -// to it, or `nullptr` if `data` was empty. +// to it. Requires `data` to be non-empty. template // NOLINTNEXTLINE - suppress clang-tidy raw pointer return. CordRep* NewExternalRep(absl::string_view data, Releaser&& releaser) { + assert(!data.empty()); using ReleaserType = absl::decay_t; - if (data.empty()) { - // Never create empty external nodes. - InvokeReleaser(Rank0{}, ReleaserType(std::forward(releaser)), - data); - return nullptr; - } - CordRepExternal* rep = new CordRepExternalImpl( std::forward(releaser), 0); InitializeCordRepExternal(data, rep); @@ -989,10 +1088,15 @@ inline CordRep* NewExternalRep(absl::string_view data, template Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser) { Cord cord; - if (auto* rep = ::absl::cord_internal::NewExternalRep( - data, std::forward(releaser))) { - cord.contents_.EmplaceTree(rep, + if (ABSL_PREDICT_TRUE(!data.empty())) { + cord.contents_.EmplaceTree(::absl::cord_internal::NewExternalRep( + data, std::forward(releaser)), Cord::MethodIdentifier::kMakeCordFromExternal); + } else { + using ReleaserType = absl::decay_t; + cord_internal::InvokeReleaser( + cord_internal::Rank0{}, ReleaserType(std::forward(releaser)), + data); } return cord; } @@ -1047,6 +1151,11 @@ inline const char* Cord::InlineRep::data() const { return is_tree() ? nullptr : data_.as_chars(); } +inline const char* Cord::InlineRep::as_chars() const { + assert(!data_.is_tree()); + return data_.as_chars(); +} + inline absl::cord_internal::CordRep* Cord::InlineRep::as_tree() const { assert(data_.is_tree()); return data_.as_tree(); @@ -1072,7 +1181,7 @@ inline cord_internal::CordRepFlat* Cord::InlineRep::MakeFlatWithExtraCapacity( size_t len = data_.inline_size(); auto* result = CordRepFlat::New(len + extra); result->length = len; - memcpy(result->Data(), data_.as_chars(), sizeof(data_)); + memcpy(result->Data(), data_.as_chars(), InlineRep::kMaxInline); return result; } @@ -1134,6 +1243,18 @@ inline void Cord::InlineRep::CopyToArray(char* dst) const { cord_internal::SmallMemmove(dst, data_.as_chars(), n); } +inline void Cord::InlineRep::MaybeRemoveEmptyCrcNode() { + CordRep* rep = tree(); + if (rep == nullptr || ABSL_PREDICT_TRUE(rep->length > 0)) { + return; + } + assert(rep->IsCrc()); + assert(rep->crc()->child == nullptr); + CordzInfo::MaybeUntrackCord(cordz_info()); + CordRep::Unref(rep); + ResetToEmpty(); +} + constexpr inline Cord::Cord() noexcept {} inline Cord::Cord(absl::string_view src) @@ -1183,12 +1304,17 @@ inline size_t Cord::size() const { return contents_.size(); } -inline bool Cord::empty() const { return contents_.empty(); } +inline bool Cord::empty() const { return size() == 0; } -inline size_t Cord::EstimatedMemoryUsage() const { +inline size_t Cord::EstimatedMemoryUsage( + CordMemoryAccounting accounting_method) const { size_t result = sizeof(Cord); if (const absl::cord_internal::CordRep* rep = contents_.tree()) { - result += MemoryUsageAux(rep); + if (accounting_method == CordMemoryAccounting::kFairShare) { + result += cord_internal::GetEstimatedFairShareMemoryUsage(rep); + } else { + result += cord_internal::GetEstimatedMemoryUsage(rep); + } } return result; } @@ -1222,12 +1348,51 @@ inline void Cord::Append(absl::string_view src) { contents_.AppendArray(src, CordzUpdateTracker::kAppendString); } +inline void Cord::Prepend(absl::string_view src) { + PrependArray(src, CordzUpdateTracker::kPrependString); +} + +inline void Cord::Append(CordBuffer buffer) { + if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return; + absl::string_view short_value; + if (CordRep* rep = buffer.ConsumeValue(short_value)) { + contents_.AppendTree(rep, CordzUpdateTracker::kAppendCordBuffer); + } else { + AppendPrecise(short_value, CordzUpdateTracker::kAppendCordBuffer); + } +} + +inline void Cord::Prepend(CordBuffer buffer) { + if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return; + absl::string_view short_value; + if (CordRep* rep = buffer.ConsumeValue(short_value)) { + contents_.PrependTree(rep, CordzUpdateTracker::kPrependCordBuffer); + } else { + PrependPrecise(short_value, CordzUpdateTracker::kPrependCordBuffer); + } +} + +inline CordBuffer Cord::GetAppendBuffer(size_t capacity, size_t min_capacity) { + if (empty()) return CordBuffer::CreateWithDefaultLimit(capacity); + return GetAppendBufferSlowPath(0, capacity, min_capacity); +} + +inline CordBuffer Cord::GetCustomAppendBuffer(size_t block_size, + size_t capacity, + size_t min_capacity) { + if (empty()) { + return block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity) + : CordBuffer::CreateWithDefaultLimit(capacity); + } + return GetAppendBufferSlowPath(block_size, capacity, min_capacity); +} + extern template void Cord::Append(std::string&& src); extern template void Cord::Prepend(std::string&& src); inline int Cord::Compare(const Cord& rhs) const { if (!contents_.is_tree() && !rhs.contents_.is_tree()) { - return contents_.BitwiseCompare(rhs.contents_); + return contents_.data_.Compare(rhs.contents_.data_); } return CompareImpl(rhs); @@ -1248,27 +1413,31 @@ inline bool Cord::StartsWith(absl::string_view rhs) const { } inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) { + tree = cord_internal::SkipCrcNode(tree); if (tree->tag == cord_internal::BTREE) { current_chunk_ = btree_reader_.Init(tree->btree()); - return; + } else { + current_leaf_ = tree; + current_chunk_ = cord_internal::EdgeData(tree); } - - stack_of_right_children_.push_back(tree); - operator++(); } -inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree) - : bytes_remaining_(tree->length) { +inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree) { + bytes_remaining_ = tree->length; InitTree(tree); } -inline Cord::ChunkIterator::ChunkIterator(const Cord* cord) - : bytes_remaining_(cord->size()) { - if (cord->contents_.is_tree()) { - InitTree(cord->contents_.as_tree()); +inline Cord::ChunkIterator::ChunkIterator(const Cord* cord) { + if (CordRep* tree = cord->contents_.tree()) { + bytes_remaining_ = tree->length; + if (ABSL_PREDICT_TRUE(bytes_remaining_ != 0)) { + InitTree(tree); + } else { + current_chunk_ = {}; + } } else { - current_chunk_ = - absl::string_view(cord->contents_.data(), bytes_remaining_); + bytes_remaining_ = cord->contents_.inline_size(); + current_chunk_ = {cord->contents_.data(), bytes_remaining_}; } } @@ -1298,8 +1467,11 @@ inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() { assert(bytes_remaining_ >= current_chunk_.size()); bytes_remaining_ -= current_chunk_.size(); if (bytes_remaining_ > 0) { - return btree_reader_ ? AdvanceBtree() : AdvanceStack(); - } else { + if (btree_reader_) { + return AdvanceBtree(); + } else { + assert(!current_chunk_.empty()); // Called on invalid iterator. + } current_chunk_ = {}; } return *this; @@ -1340,7 +1512,11 @@ inline void Cord::ChunkIterator::AdvanceBytes(size_t n) { if (ABSL_PREDICT_TRUE(n < current_chunk_.size())) { RemoveChunkPrefix(n); } else if (n != 0) { - btree_reader_ ? AdvanceBytesBtree(n) : AdvanceBytesSlowPath(n); + if (btree_reader_) { + AdvanceBytesBtree(n); + } else { + bytes_remaining_ = 0; + } } } @@ -1427,11 +1603,11 @@ inline void Cord::ForEachChunk( if (rep == nullptr) { callback(absl::string_view(contents_.data(), contents_.size())); } else { - return ForEachChunkAux(rep, callback); + ForEachChunkAux(rep, callback); } } -// Nonmember Cord-to-Cord relational operarators. +// Nonmember Cord-to-Cord relational operators. inline bool operator==(const Cord& lhs, const Cord& rhs) { if (lhs.contents_.IsSame(rhs.contents_)) return true; size_t rhs_size = rhs.size(); @@ -1440,12 +1616,8 @@ inline bool operator==(const Cord& lhs, const Cord& rhs) { } inline bool operator!=(const Cord& x, const Cord& y) { return !(x == y); } -inline bool operator<(const Cord& x, const Cord& y) { - return x.Compare(y) < 0; -} -inline bool operator>(const Cord& x, const Cord& y) { - return x.Compare(y) > 0; -} +inline bool operator<(const Cord& x, const Cord& y) { return x.Compare(y) < 0; } +inline bool operator>(const Cord& x, const Cord& y) { return x.Compare(y) > 0; } inline bool operator<=(const Cord& x, const Cord& y) { return x.Compare(y) <= 0; } @@ -1486,7 +1658,6 @@ class CordTestAccess { public: static size_t FlatOverhead(); static size_t MaxFlatLength(); - static size_t SizeofCordRepConcat(); static size_t SizeofCordRepExternal(); static size_t SizeofCordRepSubstring(); static size_t FlatTagToLength(uint8_t tag); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_analysis.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_analysis.cc new file mode 100644 index 0000000000..73d3c4e6ff --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_analysis.cc @@ -0,0 +1,188 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/cord_analysis.h" + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/container/inlined_vector.h" +#include "absl/strings/internal/cord_data_edge.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_btree.h" +#include "absl/strings/internal/cord_rep_crc.h" +#include "absl/strings/internal/cord_rep_flat.h" +#include "absl/strings/internal/cord_rep_ring.h" +// +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/functional/function_ref.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace cord_internal { +namespace { + +// Accounting mode for analyzing memory usage. +enum class Mode { kTotal, kFairShare }; + +// CordRepRef holds a `const CordRep*` reference in rep, and depending on mode, +// holds a 'fraction' representing a cumulative inverse refcount weight. +template +struct CordRepRef { + // Instantiates a CordRepRef instance. + explicit CordRepRef(const CordRep* r) : rep(r) {} + + // Creates a child reference holding the provided child. + // Overloaded to add cumulative reference count for kFairShare. + CordRepRef Child(const CordRep* child) const { return CordRepRef(child); } + + const CordRep* rep; +}; + +// RawUsage holds the computed total number of bytes. +template +struct RawUsage { + size_t total = 0; + + // Add 'size' to total, ignoring the CordRepRef argument. + void Add(size_t size, CordRepRef) { total += size; } +}; + +// Returns n / refcount avoiding a div for the common refcount == 1. +template +double MaybeDiv(double d, refcount_t refcount) { + return refcount == 1 ? d : d / refcount; +} + +// Overloaded 'kFairShare' specialization for CordRepRef. This class holds a +// `fraction` value which represents a cumulative inverse refcount weight. +// For example, a top node with a reference count of 2 will have a fraction +// value of 1/2 = 0.5, representing the 'fair share' of memory it references. +// A node below such a node with a reference count of 5 then has a fraction of +// 0.5 / 5 = 0.1 representing the fair share of memory below that node, etc. +template <> +struct CordRepRef { + // Creates a CordRepRef with the provided rep and top (parent) fraction. + explicit CordRepRef(const CordRep* r, double frac = 1.0) + : rep(r), fraction(MaybeDiv(frac, r->refcount.Get())) {} + + // Returns a CordRepRef with a fraction of `this->fraction / child.refcount` + CordRepRef Child(const CordRep* child) const { + return CordRepRef(child, fraction); + } + + const CordRep* rep; + double fraction; +}; + +// Overloaded 'kFairShare' specialization for RawUsage +template <> +struct RawUsage { + double total = 0; + + // Adds `size` multiplied by `rep.fraction` to the total size. + void Add(size_t size, CordRepRef rep) { + total += static_cast(size) * rep.fraction; + } +}; + +// Computes the estimated memory size of the provided data edge. +// External reps are assumed 'heap allocated at their exact size'. +template +void AnalyzeDataEdge(CordRepRef rep, RawUsage& raw_usage) { + assert(IsDataEdge(rep.rep)); + + // Consume all substrings + if (rep.rep->tag == SUBSTRING) { + raw_usage.Add(sizeof(CordRepSubstring), rep); + rep = rep.Child(rep.rep->substring()->child); + } + + // Consume FLAT / EXTERNAL + const size_t size = + rep.rep->tag >= FLAT + ? rep.rep->flat()->AllocatedSize() + : rep.rep->length + sizeof(CordRepExternalImpl); + raw_usage.Add(size, rep); +} + +// Computes the memory size of the provided Ring tree. +template +void AnalyzeRing(CordRepRef rep, RawUsage& raw_usage) { + const CordRepRing* ring = rep.rep->ring(); + raw_usage.Add(CordRepRing::AllocSize(ring->capacity()), rep); + ring->ForEach([&](CordRepRing::index_type pos) { + AnalyzeDataEdge(rep.Child(ring->entry_child(pos)), raw_usage); + }); +} + +// Computes the memory size of the provided Btree tree. +template +void AnalyzeBtree(CordRepRef rep, RawUsage& raw_usage) { + raw_usage.Add(sizeof(CordRepBtree), rep); + const CordRepBtree* tree = rep.rep->btree(); + if (tree->height() > 0) { + for (CordRep* edge : tree->Edges()) { + AnalyzeBtree(rep.Child(edge), raw_usage); + } + } else { + for (CordRep* edge : tree->Edges()) { + AnalyzeDataEdge(rep.Child(edge), raw_usage); + } + } +} + +template +size_t GetEstimatedUsage(const CordRep* rep) { + // Zero initialized memory usage totals. + RawUsage raw_usage; + + // Capture top level node and refcount into a CordRepRef. + CordRepRef repref(rep); + + // Consume the top level CRC node if present. + if (repref.rep->tag == CRC) { + raw_usage.Add(sizeof(CordRepCrc), repref); + repref = repref.Child(repref.rep->crc()->child); + } + + if (IsDataEdge(repref.rep)) { + AnalyzeDataEdge(repref, raw_usage); + } else if (repref.rep->tag == BTREE) { + AnalyzeBtree(repref, raw_usage); + } else if (repref.rep->tag == RING) { + AnalyzeRing(repref, raw_usage); + } else { + assert(false); + } + + return static_cast(raw_usage.total); +} + +} // namespace + +size_t GetEstimatedMemoryUsage(const CordRep* rep) { + return GetEstimatedUsage(rep); +} + +size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep) { + return GetEstimatedUsage(rep); +} + +} // namespace cord_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_analysis.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_analysis.h new file mode 100644 index 0000000000..7041ad1aa5 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_analysis.h @@ -0,0 +1,44 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_CORD_ANALYSIS_H_ +#define ABSL_STRINGS_CORD_ANALYSIS_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/internal/cord_internal.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace cord_internal { + +// Returns the *approximate* number of bytes held in full or in part by this +// Cord (which may not remain the same between invocations). Cords that share +// memory could each be "charged" independently for the same shared memory. +size_t GetEstimatedMemoryUsage(const CordRep* rep); + +// Returns the *approximate* number of bytes held in full or in part by this +// CordRep weighted by the sharing ratio of that data. For example, if some data +// edge is shared by 4 different Cords, then each cord is attribute 1/4th of +// the total memory usage as a 'fair share' of the total memory usage. +size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep); + +} // namespace cord_internal +ABSL_NAMESPACE_END +} // namespace absl + + +#endif // ABSL_STRINGS_CORD_ANALYSIS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer.cc new file mode 100644 index 0000000000..fad6269cb9 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer.cc @@ -0,0 +1,30 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/cord_buffer.h" + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr size_t CordBuffer::kDefaultLimit; +constexpr size_t CordBuffer::kCustomLimit; +#endif + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer.h new file mode 100644 index 0000000000..15494b31e0 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer.h @@ -0,0 +1,575 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: cord_buffer.h +// ----------------------------------------------------------------------------- +// +// This file defines an `absl::CordBuffer` data structure to hold data for +// eventual inclusion within an existing `Cord` data structure. Cord buffers are +// useful for building large Cords that may require custom allocation of its +// associated memory. +// +#ifndef ABSL_STRINGS_CORD_BUFFER_H_ +#define ABSL_STRINGS_CORD_BUFFER_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/numeric/bits.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_flat.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +class Cord; +class CordBufferTestPeer; + +// CordBuffer +// +// CordBuffer manages memory buffers for purposes such as zero-copy APIs as well +// as applications building cords with large data requiring granular control +// over the allocation and size of cord data. For example, a function creating +// a cord of random data could use a CordBuffer as follows: +// +// absl::Cord CreateRandomCord(size_t length) { +// absl::Cord cord; +// while (length > 0) { +// CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(length); +// absl::Span data = buffer.available_up_to(length); +// FillRandomValues(data.data(), data.size()); +// buffer.IncreaseLengthBy(data.size()); +// cord.Append(std::move(buffer)); +// length -= data.size(); +// } +// return cord; +// } +// +// CordBuffer instances are by default limited to a capacity of `kDefaultLimit` +// bytes. `kDefaultLimit` is currently just under 4KiB, but this default may +// change in the future and/or for specific architectures. The default limit is +// aimed to provide a good trade-off between performance and memory overhead. +// Smaller buffers typically incur more compute cost while larger buffers are +// more CPU efficient but create significant memory overhead because of such +// allocations being less granular. Using larger buffers may also increase the +// risk of memory fragmentation. +// +// Applications create a buffer using one of the `CreateWithDefaultLimit()` or +// `CreateWithCustomLimit()` methods. The returned instance will have a non-zero +// capacity and a zero length. Applications use the `data()` method to set the +// contents of the managed memory, and once done filling the buffer, use the +// `IncreaseLengthBy()` or 'SetLength()' method to specify the length of the +// initialized data before adding the buffer to a Cord. +// +// The `CreateWithCustomLimit()` method is intended for applications needing +// larger buffers than the default memory limit, allowing the allocation of up +// to a capacity of `kCustomLimit` bytes minus some minimum internal overhead. +// The usage of `CreateWithCustomLimit()` should be limited to only those use +// cases where the distribution of the input is relatively well known, and/or +// where the trade-off between the efficiency gains outweigh the risk of memory +// fragmentation. See the documentation for `CreateWithCustomLimit()` for more +// information on using larger custom limits. +// +// The capacity of a `CordBuffer` returned by one of the `Create` methods may +// be larger than the requested capacity due to rounding, alignment and +// granularity of the memory allocator. Applications should use the `capacity` +// method to obtain the effective capacity of the returned instance as +// demonstrated in the provided example above. +// +// CordBuffer is a move-only class. All references into the managed memory are +// invalidated when an instance is moved into either another CordBuffer instance +// or a Cord. Writing to a location obtained by a previous call to `data()` +// after an instance was moved will lead to undefined behavior. +// +// A `moved from` CordBuffer instance will have a valid, but empty state. +// CordBuffer is thread compatible. +class CordBuffer { + public: + // kDefaultLimit + // + // Default capacity limits of allocated CordBuffers. + // See the class comments for more information on allocation limits. + static constexpr size_t kDefaultLimit = cord_internal::kMaxFlatLength; + + // kCustomLimit + // + // Maximum size for CreateWithCustomLimit() allocated buffers. + // Note that the effective capacity may be slightly less + // because of internal overhead of internal cord buffers. + static constexpr size_t kCustomLimit = 64U << 10; + + // Constructors, Destructors and Assignment Operators + + // Creates an empty CordBuffer. + CordBuffer() = default; + + // Destroys this CordBuffer instance and, if not empty, releases any memory + // managed by this instance, invalidating previously returned references. + ~CordBuffer(); + + // CordBuffer is move-only + CordBuffer(CordBuffer&& rhs) noexcept; + CordBuffer& operator=(CordBuffer&&) noexcept; + CordBuffer(const CordBuffer&) = delete; + CordBuffer& operator=(const CordBuffer&) = delete; + + // CordBuffer::MaximumPayload() + // + // Returns the guaranteed maximum payload for a CordBuffer returned by the + // `CreateWithDefaultLimit()` method. While small, each internal buffer inside + // a Cord incurs an overhead to manage the length, type and reference count + // for the buffer managed inside the cord tree. Applications can use this + // method to get approximate number of buffers required for a given byte + // size, etc. + // + // For example: + // const size_t payload = absl::CordBuffer::MaximumPayload(); + // const size_t buffer_count = (total_size + payload - 1) / payload; + // buffers.reserve(buffer_count); + static constexpr size_t MaximumPayload(); + + // Overload to the above `MaximumPayload()` except that it returns the + // maximum payload for a CordBuffer returned by the `CreateWithCustomLimit()` + // method given the provided `block_size`. + static constexpr size_t MaximumPayload(size_t block_size); + + // CordBuffer::CreateWithDefaultLimit() + // + // Creates a CordBuffer instance of the desired `capacity`, capped at the + // default limit `kDefaultLimit`. The returned buffer has a guaranteed + // capacity of at least `min(kDefaultLimit, capacity)`. See the class comments + // for more information on buffer capacities and intended usage. + static CordBuffer CreateWithDefaultLimit(size_t capacity); + + + // CordBuffer::CreateWithCustomLimit() + // + // Creates a CordBuffer instance of the desired `capacity` rounded to an + // appropriate power of 2 size less than, or equal to `block_size`. + // Requires `block_size` to be a power of 2. + // + // If `capacity` is less than or equal to `kDefaultLimit`, then this method + // behaves identical to `CreateWithDefaultLimit`, which means that the caller + // is guaranteed to get a buffer of at least the requested capacity. + // + // If `capacity` is greater than or equal to `block_size`, then this method + // returns a buffer with an `allocated size` of `block_size` bytes. Otherwise, + // this methods returns a buffer with a suitable smaller power of 2 block size + // to satisfy the request. The actual size depends on a number of factors, and + // is typically (but not necessarily) the highest or second highest power of 2 + // value less than or equal to `capacity`. + // + // The 'allocated size' includes a small amount of overhead required for + // internal state, which is currently 13 bytes on 64-bit platforms. For + // example: a buffer created with `block_size` and `capacity' set to 8KiB + // will have an allocated size of 8KiB, and an effective internal `capacity` + // of 8KiB - 13 = 8179 bytes. + // + // To demonstrate this in practice, let's assume we want to read data from + // somewhat larger files using approximately 64KiB buffers: + // + // absl::Cord ReadFromFile(int fd, size_t n) { + // absl::Cord cord; + // while (n > 0) { + // CordBuffer buffer = CordBuffer::CreateWithCustomLimit(64 << 10, n); + // absl::Span data = buffer.available_up_to(n); + // ReadFileDataOrDie(fd, data.data(), data.size()); + // buffer.IncreaseLengthBy(data.size()); + // cord.Append(std::move(buffer)); + // n -= data.size(); + // } + // return cord; + // } + // + // If we'd use this function to read a file of 659KiB, we may get the + // following pattern of allocated cord buffer sizes: + // + // CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523) + // CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523) + // ... + // CreateWithCustomLimit(64KiB, 19586) --> ~16KiB (16371) + // CreateWithCustomLimit(64KiB, 3215) --> 3215 (at least 3215) + // + // The reason the method returns a 16K buffer instead of a roughly 19K buffer + // is to reduce memory overhead and fragmentation risks. Using carefully + // chosen power of 2 values reduces the entropy of allocated memory sizes. + // + // Additionally, let's assume we'd use the above function on files that are + // generally smaller than 64K. If we'd use 'precise' sized buffers for such + // files, than we'd get a very wide distribution of allocated memory sizes + // rounded to 4K page sizes, and we'd end up with a lot of unused capacity. + // + // In general, application should only use custom sizes if the data they are + // consuming or storing is expected to be many times the chosen block size, + // and be based on objective data and performance metrics. For example, a + // compress function may work faster and consume less CPU when using larger + // buffers. Such an application should pick a size offering a reasonable + // trade-off between expected data size, compute savings with larger buffers, + // and the cost or fragmentation effect of larger buffers. + // Applications must pick a reasonable spot on that curve, and make sure their + // data meets their expectations in size distributions such as "mostly large". + static CordBuffer CreateWithCustomLimit(size_t block_size, size_t capacity); + + // CordBuffer::available() + // + // Returns the span delineating the available capacity in this buffer + // which is defined as `{ data() + length(), capacity() - length() }`. + absl::Span available(); + + // CordBuffer::available_up_to() + // + // Returns the span delineating the available capacity in this buffer limited + // to `size` bytes. This is equivalent to `available().subspan(0, size)`. + absl::Span available_up_to(size_t size); + + // CordBuffer::data() + // + // Returns a non-null reference to the data managed by this instance. + // Applications are allowed to write up to `capacity` bytes of instance data. + // CordBuffer data is uninitialized by default. Reading data from an instance + // that has not yet been initialized will lead to undefined behavior. + char* data(); + const char* data() const; + + // CordBuffer::length() + // + // Returns the length of this instance. The default length of a CordBuffer is + // 0, indicating an 'empty' CordBuffer. Applications must specify the length + // of the data in a CordBuffer before adding it to a Cord. + size_t length() const; + + // CordBuffer::capacity() + // + // Returns the capacity of this instance. All instances have a non-zero + // capacity: default and `moved from` instances have a small internal buffer. + size_t capacity() const; + + // CordBuffer::IncreaseLengthBy() + // + // Increases the length of this buffer by the specified 'n' bytes. + // Applications must make sure all data in this buffer up to the new length + // has been initialized before adding a CordBuffer to a Cord: failure to do so + // will lead to undefined behavior. Requires `length() + n <= capacity()`. + // Typically, applications will use 'available_up_to()` to get a span of the + // desired capacity, and use `span.size()` to increase the length as in: + // absl::Span span = buffer.available_up_to(desired); + // buffer.IncreaseLengthBy(span.size()); + // memcpy(span.data(), src, span.size()); + // etc... + void IncreaseLengthBy(size_t n); + + // CordBuffer::SetLength() + // + // Sets the data length of this instance. Applications must make sure all data + // of the specified length has been initialized before adding a CordBuffer to + // a Cord: failure to do so will lead to undefined behavior. + // Setting the length to a small value or zero does not release any memory + // held by this CordBuffer instance. Requires `length <= capacity()`. + // Applications should preferably use the `IncreaseLengthBy()` method above + // in combination with the 'available()` or `available_up_to()` methods. + void SetLength(size_t length); + + private: + // Make sure we don't accidentally over promise. + static_assert(kCustomLimit <= cord_internal::kMaxLargeFlatSize, ""); + + // Assume the cost of an 'uprounded' allocation to CeilPow2(size) versus + // the cost of allocating at least 1 extra flat <= 4KB: + // - Flat overhead = 13 bytes + // - Btree amortized cost / node =~ 13 bytes + // - 64 byte granularity of tcmalloc at 4K =~ 32 byte average + // CPU cost and efficiency requires we should at least 'save' something by + // splitting, as a poor man's measure, we say the slop needs to be + // at least double the cost offset to make it worth splitting: ~128 bytes. + static constexpr size_t kMaxPageSlop = 128; + + // Overhead for allocation a flat. + static constexpr size_t kOverhead = cord_internal::kFlatOverhead; + + using CordRepFlat = cord_internal::CordRepFlat; + + // `Rep` is the internal data representation of a CordBuffer. The internal + // representation has an internal small size optimization similar to + // std::string (SSO). + struct Rep { + // Inline SSO size of a CordBuffer + static constexpr size_t kInlineCapacity = sizeof(intptr_t) * 2 - 1; + + // Creates a default instance with kInlineCapacity. + Rep() : short_rep{} {} + + // Creates an instance managing an allocated non zero CordRep. + explicit Rep(cord_internal::CordRepFlat* rep) : long_rep{rep} { + assert(rep != nullptr); + } + + // Returns true if this instance manages the SSO internal buffer. + bool is_short() const { + constexpr size_t offset = offsetof(Short, raw_size); + return (reinterpret_cast(this)[offset] & 1) != 0; + } + + // Returns the available area of the internal SSO data + absl::Span short_available() { + const size_t length = short_length(); + return absl::Span(short_rep.data + length, + kInlineCapacity - length); + } + + // Returns the available area of the internal SSO data + absl::Span long_available() { + assert(!is_short()); + const size_t length = long_rep.rep->length; + return absl::Span(long_rep.rep->Data() + length, + long_rep.rep->Capacity() - length); + } + + // Returns the length of the internal SSO data. + size_t short_length() const { + assert(is_short()); + return static_cast(short_rep.raw_size >> 1); + } + + // Sets the length of the internal SSO data. + // Disregards any previously set CordRep instance. + void set_short_length(size_t length) { + short_rep.raw_size = static_cast((length << 1) + 1); + } + + // Adds `n` to the current short length. + void add_short_length(size_t n) { + assert(is_short()); + short_rep.raw_size += static_cast(n << 1); + } + + // Returns reference to the internal SSO data buffer. + char* data() { + assert(is_short()); + return short_rep.data; + } + const char* data() const { + assert(is_short()); + return short_rep.data; + } + + // Returns a pointer the external CordRep managed by this instance. + cord_internal::CordRepFlat* rep() const { + assert(!is_short()); + return long_rep.rep; + } + + // The internal representation takes advantage of the fact that allocated + // memory is always on an even address, and uses the least significant bit + // of the first or last byte (depending on endianness) as the inline size + // indicator overlapping with the least significant byte of the CordRep*. +#if defined(ABSL_IS_BIG_ENDIAN) + struct Long { + explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {} + void* padding; + cord_internal::CordRepFlat* rep; + }; + struct Short { + char data[sizeof(Long) - 1]; + char raw_size = 1; + }; +#else + struct Long { + explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {} + cord_internal::CordRepFlat* rep; + void* padding; + }; + struct Short { + char raw_size = 1; + char data[sizeof(Long) - 1]; + }; +#endif + + union { + Long long_rep; + Short short_rep; + }; + }; + + // Power2 functions + static bool IsPow2(size_t size) { return absl::has_single_bit(size); } + static size_t Log2Floor(size_t size) { + return static_cast(absl::bit_width(size) - 1); + } + static size_t Log2Ceil(size_t size) { + return static_cast(absl::bit_width(size - 1)); + } + + // Implementation of `CreateWithCustomLimit()`. + // This implementation allows for future memory allocation hints to + // be passed down into the CordRepFlat allocation function. + template + static CordBuffer CreateWithCustomLimitImpl(size_t block_size, + size_t capacity, + AllocationHints... hints); + + // Consumes the value contained in this instance and resets the instance. + // This method returns a non-null Cordrep* if the current instances manages a + // CordRep*, and resets the instance to an empty SSO instance. If the current + // instance is an SSO instance, then this method returns nullptr and sets + // `short_value` to the inlined data value. In either case, the current + // instance length is reset to zero. + // This method is intended to be used by Cord internal functions only. + cord_internal::CordRep* ConsumeValue(absl::string_view& short_value) { + cord_internal::CordRep* rep = nullptr; + if (rep_.is_short()) { + short_value = absl::string_view(rep_.data(), rep_.short_length()); + } else { + rep = rep_.rep(); + } + rep_.set_short_length(0); + return rep; + } + + // Internal constructor. + explicit CordBuffer(cord_internal::CordRepFlat* rep) : rep_(rep) { + assert(rep != nullptr); + } + + Rep rep_; + + friend class Cord; + friend class CordBufferTestPeer; +}; + +inline constexpr size_t CordBuffer::MaximumPayload() { + return cord_internal::kMaxFlatLength; +} + +inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) { + // TODO(absl-team): Use std::min when C++11 support is dropped. + return (kCustomLimit < block_size ? kCustomLimit : block_size) - + cord_internal::kFlatOverhead; +} + +inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) { + if (capacity > Rep::kInlineCapacity) { + auto* rep = cord_internal::CordRepFlat::New(capacity); + rep->length = 0; + return CordBuffer(rep); + } + return CordBuffer(); +} + +template +inline CordBuffer CordBuffer::CreateWithCustomLimitImpl( + size_t block_size, size_t capacity, AllocationHints... hints) { + assert(IsPow2(block_size)); + capacity = (std::min)(capacity, kCustomLimit); + block_size = (std::min)(block_size, kCustomLimit); + if (capacity + kOverhead >= block_size) { + capacity = block_size; + } else if (capacity <= kDefaultLimit) { + capacity = capacity + kOverhead; + } else if (!IsPow2(capacity)) { + // Check if rounded up to next power 2 is a good enough fit + // with limited waste making it an acceptable direct fit. + const size_t rounded_up = size_t{1} << Log2Ceil(capacity); + const size_t slop = rounded_up - capacity; + if (slop >= kOverhead && slop <= kMaxPageSlop + kOverhead) { + capacity = rounded_up; + } else { + // Round down to highest power of 2 <= capacity. + // Consider a more aggressive step down if that may reduce the + // risk of fragmentation where 'people are holding it wrong'. + const size_t rounded_down = size_t{1} << Log2Floor(capacity); + capacity = rounded_down; + } + } + const size_t length = capacity - kOverhead; + auto* rep = CordRepFlat::New(CordRepFlat::Large(), length, hints...); + rep->length = 0; + return CordBuffer(rep); +} + +inline CordBuffer CordBuffer::CreateWithCustomLimit(size_t block_size, + size_t capacity) { + return CreateWithCustomLimitImpl(block_size, capacity); +} + +inline CordBuffer::~CordBuffer() { + if (!rep_.is_short()) { + cord_internal::CordRepFlat::Delete(rep_.rep()); + } +} + +inline CordBuffer::CordBuffer(CordBuffer&& rhs) noexcept : rep_(rhs.rep_) { + rhs.rep_.set_short_length(0); +} + +inline CordBuffer& CordBuffer::operator=(CordBuffer&& rhs) noexcept { + if (!rep_.is_short()) cord_internal::CordRepFlat::Delete(rep_.rep()); + rep_ = rhs.rep_; + rhs.rep_.set_short_length(0); + return *this; +} + +inline absl::Span CordBuffer::available() { + return rep_.is_short() ? rep_.short_available() : rep_.long_available(); +} + +inline absl::Span CordBuffer::available_up_to(size_t size) { + return available().subspan(0, size); +} + +inline char* CordBuffer::data() { + return rep_.is_short() ? rep_.data() : rep_.rep()->Data(); +} + +inline const char* CordBuffer::data() const { + return rep_.is_short() ? rep_.data() : rep_.rep()->Data(); +} + +inline size_t CordBuffer::capacity() const { + return rep_.is_short() ? Rep::kInlineCapacity : rep_.rep()->Capacity(); +} + +inline size_t CordBuffer::length() const { + return rep_.is_short() ? rep_.short_length() : rep_.rep()->length; +} + +inline void CordBuffer::SetLength(size_t length) { + ABSL_HARDENING_ASSERT(length <= capacity()); + if (rep_.is_short()) { + rep_.set_short_length(length); + } else { + rep_.rep()->length = length; + } +} + +inline void CordBuffer::IncreaseLengthBy(size_t n) { + ABSL_HARDENING_ASSERT(n <= capacity() && length() + n <= capacity()); + if (rep_.is_short()) { + rep_.add_short_length(n); + } else { + rep_.rep()->length += n; + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_CORD_BUFFER_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer_test.cc new file mode 100644 index 0000000000..5c7437aece --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_buffer_test.cc @@ -0,0 +1,320 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/cord_buffer.h" + + +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/strings/internal/cord_rep_flat.h" +#include "absl/strings/internal/cord_rep_test_util.h" +#include "absl/types/span.h" + +using testing::Eq; +using testing::Ge; +using testing::Le; +using testing::Ne; + +namespace absl { +ABSL_NAMESPACE_BEGIN + +class CordBufferTestPeer { + public: + static cord_internal::CordRep* ConsumeValue(CordBuffer& buffer, + absl::string_view& short_value) { + return buffer.ConsumeValue(short_value); + } +}; + +namespace { + +using ::absl::cordrep_testing::CordToString; + +constexpr size_t kInlinedSize = sizeof(CordBuffer) - 1; +constexpr size_t kDefaultLimit = CordBuffer::kDefaultLimit; +constexpr size_t kCustomLimit = CordBuffer::kCustomLimit; +constexpr size_t kMaxFlatSize = cord_internal::kMaxFlatSize; +constexpr size_t kMaxFlatLength = cord_internal::kMaxFlatLength; +constexpr size_t kFlatOverhead = cord_internal::kFlatOverhead; + +constexpr size_t k8KiB = 8 << 10; +constexpr size_t k16KiB = 16 << 10; +constexpr size_t k64KiB = 64 << 10; +constexpr size_t k1MB = 1 << 20; + +class CordBufferTest : public testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(MediumSize, CordBufferTest, + testing::Values(1, kInlinedSize - 1, kInlinedSize, + kInlinedSize + 1, kDefaultLimit - 1, + kDefaultLimit)); + +TEST_P(CordBufferTest, MaximumPayload) { + EXPECT_THAT(CordBuffer::MaximumPayload(), Eq(kMaxFlatLength)); + EXPECT_THAT(CordBuffer::MaximumPayload(512), Eq(512 - kFlatOverhead)); + EXPECT_THAT(CordBuffer::MaximumPayload(k64KiB), Eq(k64KiB - kFlatOverhead)); + EXPECT_THAT(CordBuffer::MaximumPayload(k1MB), Eq(k64KiB - kFlatOverhead)); +} + +TEST(CordBufferTest, ConstructDefault) { + CordBuffer buffer; + EXPECT_THAT(buffer.capacity(), Eq(sizeof(CordBuffer) - 1)); + EXPECT_THAT(buffer.length(), Eq(0)); + EXPECT_THAT(buffer.data(), Ne(nullptr)); + EXPECT_THAT(buffer.available().data(), Eq(buffer.data())); + EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity())); + memset(buffer.data(), 0xCD, buffer.capacity()); +} + +TEST(CordBufferTest, CreateSsoWithDefaultLimit) { + CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(3); + EXPECT_THAT(buffer.capacity(), Ge(3)); + EXPECT_THAT(buffer.capacity(), Le(sizeof(CordBuffer))); + EXPECT_THAT(buffer.length(), Eq(0)); + memset(buffer.data(), 0xCD, buffer.capacity()); + + memcpy(buffer.data(), "Abc", 3); + buffer.SetLength(3); + EXPECT_THAT(buffer.length(), Eq(3)); + absl::string_view short_value; + EXPECT_THAT(CordBufferTestPeer::ConsumeValue(buffer, short_value), + Eq(nullptr)); + EXPECT_THAT(absl::string_view(buffer.data(), 3), Eq("Abc")); + EXPECT_THAT(short_value, Eq("Abc")); +} + +TEST_P(CordBufferTest, Available) { + const size_t requested = GetParam(); + CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested); + EXPECT_THAT(buffer.available().data(), Eq(buffer.data())); + EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity())); + + buffer.SetLength(2); + EXPECT_THAT(buffer.available().data(), Eq(buffer.data() + 2)); + EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity() - 2)); +} + +TEST_P(CordBufferTest, IncreaseLengthBy) { + const size_t requested = GetParam(); + CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested); + buffer.IncreaseLengthBy(2); + EXPECT_THAT(buffer.length(), Eq(2)); + buffer.IncreaseLengthBy(5); + EXPECT_THAT(buffer.length(), Eq(7)); +} + +TEST_P(CordBufferTest, AvailableUpTo) { + const size_t requested = GetParam(); + CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested); + size_t expected_up_to = std::min(3, buffer.capacity()); + EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data())); + EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to)); + + buffer.SetLength(2); + expected_up_to = std::min(3, buffer.capacity() - 2); + EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data() + 2)); + EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to)); +} + +// Returns the maximum capacity for a given block_size and requested size. +size_t MaxCapacityFor(size_t block_size, size_t requested) { + requested = (std::min)(requested, cord_internal::kMaxLargeFlatSize); + // Maximum returned size is always capped at block_size - kFlatOverhead. + return block_size - kFlatOverhead; +} + +TEST_P(CordBufferTest, CreateWithDefaultLimit) { + const size_t requested = GetParam(); + CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested); + EXPECT_THAT(buffer.capacity(), Ge(requested)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested))); + EXPECT_THAT(buffer.length(), Eq(0)); + + memset(buffer.data(), 0xCD, buffer.capacity()); + + std::string data(requested - 1, 'x'); + memcpy(buffer.data(), data.c_str(), requested); + buffer.SetLength(requested); + + EXPECT_THAT(buffer.length(), Eq(requested)); + EXPECT_THAT(absl::string_view(buffer.data()), Eq(data)); +} + +TEST(CordBufferTest, CreateWithDefaultLimitAskingFor2GB) { + constexpr size_t k2GiB = 1U << 31; + CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(k2GiB); + // Expect to never be awarded more than a reasonable memory size, even in + // cases where a (debug) memory allocator may grant us somewhat more memory + // than `kDefaultLimit` which should be no more than `2 * kDefaultLimit` + EXPECT_THAT(buffer.capacity(), Le(2 * CordBuffer::kDefaultLimit)); + EXPECT_THAT(buffer.length(), Eq(0)); + EXPECT_THAT(buffer.data(), Ne(nullptr)); + memset(buffer.data(), 0xCD, buffer.capacity()); +} + +TEST_P(CordBufferTest, MoveConstruct) { + const size_t requested = GetParam(); + CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested); + const size_t capacity = from.capacity(); + memcpy(from.data(), "Abc", 4); + from.SetLength(4); + + CordBuffer to(std::move(from)); + EXPECT_THAT(to.capacity(), Eq(capacity)); + EXPECT_THAT(to.length(), Eq(4)); + EXPECT_THAT(absl::string_view(to.data()), Eq("Abc")); + + EXPECT_THAT(from.length(), Eq(0)); // NOLINT +} + +TEST_P(CordBufferTest, MoveAssign) { + const size_t requested = GetParam(); + CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested); + const size_t capacity = from.capacity(); + memcpy(from.data(), "Abc", 4); + from.SetLength(4); + + CordBuffer to; + to = std::move(from); + EXPECT_THAT(to.capacity(), Eq(capacity)); + EXPECT_THAT(to.length(), Eq(4)); + EXPECT_THAT(absl::string_view(to.data()), Eq("Abc")); + + EXPECT_THAT(from.length(), Eq(0)); // NOLINT +} + +TEST_P(CordBufferTest, ConsumeValue) { + const size_t requested = GetParam(); + CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested); + memcpy(buffer.data(), "Abc", 4); + buffer.SetLength(3); + + absl::string_view short_value; + if (cord_internal::CordRep* rep = + CordBufferTestPeer::ConsumeValue(buffer, short_value)) { + EXPECT_THAT(CordToString(rep), Eq("Abc")); + cord_internal::CordRep::Unref(rep); + } else { + EXPECT_THAT(short_value, Eq("Abc")); + } + EXPECT_THAT(buffer.length(), Eq(0)); +} + +TEST_P(CordBufferTest, CreateWithCustomLimitWithinDefaultLimit) { + const size_t requested = GetParam(); + CordBuffer buffer = + CordBuffer::CreateWithCustomLimit(kMaxFlatSize, requested); + EXPECT_THAT(buffer.capacity(), Ge(requested)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested))); + EXPECT_THAT(buffer.length(), Eq(0)); + + memset(buffer.data(), 0xCD, buffer.capacity()); + + std::string data(requested - 1, 'x'); + memcpy(buffer.data(), data.c_str(), requested); + buffer.SetLength(requested); + + EXPECT_THAT(buffer.length(), Eq(requested)); + EXPECT_THAT(absl::string_view(buffer.data()), Eq(data)); +} + +TEST(CordLargeBufferTest, CreateAtOrBelowDefaultLimit) { + CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, kDefaultLimit); + EXPECT_THAT(buffer.capacity(), Ge(kDefaultLimit)); + EXPECT_THAT(buffer.capacity(), + Le(MaxCapacityFor(kMaxFlatSize, kDefaultLimit))); + + buffer = CordBuffer::CreateWithCustomLimit(k64KiB, 3178); + EXPECT_THAT(buffer.capacity(), Ge(3178)); +} + +TEST(CordLargeBufferTest, CreateWithCustomLimit) { + ASSERT_THAT((kMaxFlatSize & (kMaxFlatSize - 1)) == 0, "Must be power of 2"); + + for (size_t size = kMaxFlatSize; size <= kCustomLimit; size *= 2) { + CordBuffer buffer = CordBuffer::CreateWithCustomLimit(size, size); + size_t expected = size - kFlatOverhead; + ASSERT_THAT(buffer.capacity(), Ge(expected)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(size, expected))); + } +} + +TEST(CordLargeBufferTest, CreateWithTooLargeLimit) { + CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, k1MB); + ASSERT_THAT(buffer.capacity(), Ge(k64KiB - kFlatOverhead)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k64KiB, k1MB))); +} + +TEST(CordLargeBufferTest, CreateWithHugeValueForOverFlowHardening) { + for (size_t dist_from_max = 0; dist_from_max <= 32; ++dist_from_max) { + size_t capacity = std::numeric_limits::max() - dist_from_max; + + CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(capacity); + ASSERT_THAT(buffer.capacity(), Ge(kDefaultLimit)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, capacity))); + + for (size_t limit = kMaxFlatSize; limit <= kCustomLimit; limit *= 2) { + CordBuffer buffer = CordBuffer::CreateWithCustomLimit(limit, capacity); + ASSERT_THAT(buffer.capacity(), Ge(limit - kFlatOverhead)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(limit, capacity))); + } + } +} + +TEST(CordLargeBufferTest, CreateWithSmallLimit) { + CordBuffer buffer = CordBuffer::CreateWithCustomLimit(512, 1024); + ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 1024))); + + // Ask for precise block size, should return size - kOverhead + buffer = CordBuffer::CreateWithCustomLimit(512, 512); + ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 512))); + + // Corner case: 511 < block_size, but 511 + kOverhead is above + buffer = CordBuffer::CreateWithCustomLimit(512, 511); + ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 511))); + + // Corner case: 498 + kOverhead < block_size + buffer = CordBuffer::CreateWithCustomLimit(512, 498); + ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 498))); +} + +TEST(CordLargeBufferTest, CreateWasteFull) { + // 15 KiB gets rounded down to next pow2 value. + const size_t requested = (15 << 10); + CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested); + ASSERT_THAT(buffer.capacity(), Ge(k8KiB - kFlatOverhead)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k8KiB, requested))); +} + +TEST(CordLargeBufferTest, CreateSmallSlop) { + const size_t requested = k16KiB - 2 * kFlatOverhead; + CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested); + ASSERT_THAT(buffer.capacity(), Ge(k16KiB - kFlatOverhead)); + EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k16KiB, requested))); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_ring_reader_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_ring_reader_test.cc index d9a9a76d1e..8e7183bff0 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_ring_reader_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_ring_reader_test.cc @@ -126,7 +126,7 @@ TEST(CordRingReaderTest, SeekForward) { reader.Reset(ring); size_t consumed = 0; - size_t remaining = ring->length;; + size_t remaining = ring->length; for (int i = 0; i < flats.size(); ++i) { CordRepRing::index_type index = ring->advance(head, i); size_t offset = consumed; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_ring_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_ring_test.cc index f131859532..f39a0a4f8d 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_ring_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_ring_test.cc @@ -44,7 +44,6 @@ using ::absl::cord_internal::CordRepFlat; using ::absl::cord_internal::CordRepRing; using ::absl::cord_internal::CordRepSubstring; -using ::absl::cord_internal::CONCAT; using ::absl::cord_internal::EXTERNAL; using ::absl::cord_internal::SUBSTRING; @@ -262,16 +261,6 @@ CordRepSubstring* RemoveSuffix(size_t length, CordRep* rep) { return MakeSubstring(0, rep->length - length, rep); } -CordRepConcat* MakeConcat(CordRep* left, CordRep* right, int depth = 0) { - auto* concat = new CordRepConcat; - concat->tag = CONCAT; - concat->length = left->length + right->length; - concat->left = left; - concat->right = right; - concat->set_depth(depth); - return concat; -} - enum Composition { kMix, kAppend, kPrepend }; Composition RandomComposition() { @@ -296,7 +285,6 @@ constexpr const char* kFox = "The quick brown fox jumps over the lazy dog"; constexpr const char* kFoxFlats[] = {"The ", "quick ", "brown ", "fox ", "jumps ", "over ", "the ", "lazy ", "dog"}; -constexpr const char* kAlphabet = "abcdefghijklmnopqrstuvwxyz"; CordRepRing* FromFlats(Span flats, Composition composition = kAppend) { @@ -594,35 +582,6 @@ TEST_P(CordRingCreateFromTreeTest, CreateFromSubstringOfLargeExternal) { EXPECT_THAT(ToRawFlats(result), ElementsAre(str)); } -TEST_P(CordRingBuildInputTest, CreateFromConcat) { - CordRep* flats[] = {MakeFlat("abcdefgh"), MakeFlat("ijklm"), - MakeFlat("nopqrstuv"), MakeFlat("wxyz")}; - auto* left = MakeConcat(RefIfInputSharedIndirect(flats[0]), flats[1]); - auto* right = MakeConcat(flats[2], RefIfInputSharedIndirect(flats[3])); - auto* concat = RefIfInputShared(MakeConcat(left, right)); - CordRepRing* result = NeedsUnref(CordRepRing::Create(concat)); - ASSERT_THAT(result, IsValidRingBuffer()); - EXPECT_THAT(result->length, Eq(26)); - EXPECT_THAT(ToString(result), Eq(kAlphabet)); -} - -TEST_P(CordRingBuildInputTest, CreateFromSubstringConcat) { - for (size_t off = 0; off < 26; ++off) { - for (size_t len = 1; len < 26 - off; ++len) { - CordRep* flats[] = {MakeFlat("abcdefgh"), MakeFlat("ijklm"), - MakeFlat("nopqrstuv"), MakeFlat("wxyz")}; - auto* left = MakeConcat(RefIfInputSharedIndirect(flats[0]), flats[1]); - auto* right = MakeConcat(flats[2], RefIfInputSharedIndirect(flats[3])); - auto* concat = MakeConcat(left, right); - auto* child = RefIfInputShared(MakeSubstring(off, len, concat)); - CordRepRing* result = NeedsUnref(CordRepRing::Create(child)); - ASSERT_THAT(result, IsValidRingBuffer()); - ASSERT_THAT(result->length, Eq(len)); - ASSERT_THAT(ToString(result), string_view(kAlphabet).substr(off, len)); - } - } -} - TEST_P(CordRingCreateTest, Properties) { absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz"; CordRepRing* result = NeedsUnref(CordRepRing::Create(MakeFlat(str1), 120)); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_test.cc index 50079b7c43..a4fa8955d3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/cord_test.cc @@ -28,20 +28,37 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -#include "absl/base/casts.h" #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/fixed_array.h" +#include "absl/hash/hash.h" +#include "absl/random/random.h" #include "absl/strings/cord_test_helpers.h" #include "absl/strings/cordz_test_helpers.h" +#include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" +// convenience local constants +static constexpr auto FLAT = absl::cord_internal::FLAT; +static constexpr auto MAX_FLAT_TAG = absl::cord_internal::MAX_FLAT_TAG; + typedef std::mt19937_64 RandomEngine; +using absl::cord_internal::CordRep; +using absl::cord_internal::CordRepBtree; +using absl::cord_internal::CordRepConcat; +using absl::cord_internal::CordRepCrc; +using absl::cord_internal::CordRepExternal; +using absl::cord_internal::CordRepFlat; +using absl::cord_internal::CordRepSubstring; +using absl::cord_internal::CordzUpdateTracker; +using absl::cord_internal::kFlatOverhead; +using absl::cord_internal::kMaxFlatLength; + static std::string RandomLowercaseString(RandomEngine* rng); static std::string RandomLowercaseString(RandomEngine* rng, size_t length); @@ -184,6 +201,7 @@ class CordTestPeer { } static bool IsTree(const Cord& c) { return c.contents_.is_tree(); } + static CordRep* Tree(const Cord& c) { return c.contents_.tree(); } static cord_internal::CordzInfo* GetCordzInfo(const Cord& c) { return c.contents_.cordz_info(); @@ -191,14 +209,12 @@ class CordTestPeer { static Cord MakeSubstring(Cord src, size_t offset, size_t length) { ABSL_RAW_CHECK(src.contents_.is_tree(), "Can not be inlined"); + ABSL_RAW_CHECK(src.ExpectedChecksum() == absl::nullopt, + "Can not be hardened"); Cord cord; - auto* rep = new cord_internal::CordRepSubstring; - rep->tag = cord_internal::SUBSTRING; - rep->child = cord_internal::CordRep::Ref(src.contents_.tree()); - rep->start = offset; - rep->length = length; - cord.contents_.EmplaceTree(rep, - cord_internal::CordzUpdateTracker::kSubCord); + auto* tree = cord_internal::SkipCrcNode(src.contents_.tree()); + auto* rep = CordRepSubstring::Create(CordRep::Ref(tree), offset, length); + cord.contents_.EmplaceTree(rep, CordzUpdateTracker::kSubCord); return cord; } }; @@ -206,31 +222,107 @@ class CordTestPeer { ABSL_NAMESPACE_END } // namespace absl -// The CordTest fixture runs all tests with and without Cord Btree enabled. -class CordTest : public testing::TestWithParam { +// The CordTest fixture runs all tests with and without Cord Btree enabled, +// and with our without expected CRCs being set on the subject Cords. +class CordTest : public testing::TestWithParam { public: - CordTest() : was_btree_(absl::cord_internal::cord_btree_enabled.load()) { - absl::cord_internal::cord_btree_enabled.store(UseBtree()); - } - ~CordTest() override { - absl::cord_internal::cord_btree_enabled.store(was_btree_); - } - // Returns true if test is running with btree enabled. - bool UseBtree() const { return GetParam(); } + bool UseCrc() const { return GetParam() == 2 || GetParam() == 3; } + void MaybeHarden(absl::Cord& c) { + if (UseCrc()) { + c.SetExpectedChecksum(1); + } + } + absl::Cord MaybeHardened(absl::Cord c) { + MaybeHarden(c); + return c; + } // Returns human readable string representation of the test parameter. - static std::string ToString(testing::TestParamInfo param) { - return param.param ? "Btree" : "Concat"; + static std::string ToString(testing::TestParamInfo param) { + switch (param.param) { + case 0: + return "Btree"; + case 1: + return "BtreeHardened"; + default: + assert(false); + return "???"; + } } - - private: - const bool was_btree_; }; -INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Bool(), +INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Values(0, 1), CordTest::ToString); +TEST(CordRepFlat, AllFlatCapacities) { + // Explicitly and redundantly assert built-in min/max limits + static_assert(absl::cord_internal::kFlatOverhead < 32, ""); + static_assert(absl::cord_internal::kMinFlatSize == 32, ""); + static_assert(absl::cord_internal::kMaxLargeFlatSize == 256 << 10, ""); + EXPECT_EQ(absl::cord_internal::TagToAllocatedSize(FLAT), 32); + EXPECT_EQ(absl::cord_internal::TagToAllocatedSize(MAX_FLAT_TAG), 256 << 10); + + // Verify all tags to map perfectly back and forth, and + // that sizes are monotonically increasing. + size_t last_size = 0; + for (int tag = FLAT; tag <= MAX_FLAT_TAG; ++tag) { + size_t size = absl::cord_internal::TagToAllocatedSize(tag); + ASSERT_GT(size, last_size); + ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size); + last_size = size; + } + + // All flat size from 32 - 512 are 8 byte granularity + for (size_t size = 32; size <= 512; size += 8) { + ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size); + uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size); + ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size); + } + + // All flat sizes from 512 - 8192 are 64 byte granularity + for (size_t size = 512; size <= 8192; size += 64) { + ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size); + uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size); + ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size); + } + + // All flat sizes from 8KB to 256KB are 4KB granularity + for (size_t size = 8192; size <= 256 * 1024; size += 4 * 1024) { + ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size); + uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size); + ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size); + } +} + +TEST(CordRepFlat, MaxFlatSize) { + CordRepFlat* flat = CordRepFlat::New(kMaxFlatLength); + EXPECT_EQ(flat->Capacity(), kMaxFlatLength); + CordRep::Unref(flat); + + flat = CordRepFlat::New(kMaxFlatLength * 4); + EXPECT_EQ(flat->Capacity(), kMaxFlatLength); + CordRep::Unref(flat); +} + +TEST(CordRepFlat, MaxLargeFlatSize) { + const size_t size = 256 * 1024 - kFlatOverhead; + CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), size); + EXPECT_GE(flat->Capacity(), size); + CordRep::Unref(flat); +} + +TEST(CordRepFlat, AllFlatSizes) { + const size_t kMaxSize = 256 * 1024; + for (size_t size = 32; size <= kMaxSize; size *=2) { + const size_t length = size - kFlatOverhead - 1; + CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), length); + EXPECT_GE(flat->Capacity(), length); + memset(flat->Data(), 0xCD, flat->Capacity()); + CordRep::Unref(flat); + } +} + TEST_P(CordTest, AllFlatSizes) { using absl::strings_internal::CordTestAccess; @@ -242,6 +334,7 @@ TEST_P(CordTest, AllFlatSizes) { } absl::Cord dst(src); + MaybeHarden(dst); EXPECT_EQ(std::string(dst), src) << s; } } @@ -273,6 +366,7 @@ TEST_P(CordTest, GigabyteCordFromExternal) { c.Append(from); c.Append(from); c.Append(from); + MaybeHarden(c); } for (int i = 0; i < 1024; ++i) { @@ -301,6 +395,8 @@ bool my_unique_true_boolean = true; TEST_P(CordTest, Assignment) { absl::Cord x(absl::string_view("hi there")); absl::Cord y(x); + MaybeHarden(y); + ASSERT_EQ(x.ExpectedChecksum(), absl::nullopt); ASSERT_EQ(std::string(x), "hi there"); ASSERT_EQ(std::string(y), "hi there"); ASSERT_TRUE(x == y); @@ -354,6 +450,7 @@ TEST_P(CordTest, Assignment) { TEST_P(CordTest, StartsEndsWith) { absl::Cord x(absl::string_view("abcde")); + MaybeHarden(x); absl::Cord empty(""); ASSERT_TRUE(x.StartsWith(absl::Cord("abcde"))); @@ -391,6 +488,7 @@ TEST_P(CordTest, Subcord) { absl::Cord a; AppendWithFragments(s, &rng, &a); + MaybeHarden(a); ASSERT_EQ(s, std::string(a)); // Check subcords of a, from a variety of interesting points. @@ -412,6 +510,9 @@ TEST_P(CordTest, Subcord) { ASSERT_EQ(absl::string_view(s).substr(pos, end_pos - pos), std::string(sa)) << a; + if (pos != 0 || end_pos != a.size()) { + ASSERT_EQ(sa.ExpectedChecksum(), absl::nullopt); + } } } @@ -451,10 +552,19 @@ TEST_P(CordTest, Swap) { absl::string_view b("Mandark"); absl::Cord x(a); absl::Cord y(b); + MaybeHarden(x); swap(x, y); + if (UseCrc()) { + ASSERT_EQ(x.ExpectedChecksum(), absl::nullopt); + ASSERT_EQ(y.ExpectedChecksum(), 1); + } ASSERT_EQ(x, absl::Cord(b)); ASSERT_EQ(y, absl::Cord(a)); x.swap(y); + if (UseCrc()) { + ASSERT_EQ(x.ExpectedChecksum(), 1); + ASSERT_EQ(y.ExpectedChecksum(), absl::nullopt); + } ASSERT_EQ(x, absl::Cord(a)); ASSERT_EQ(y, absl::Cord(b)); } @@ -479,11 +589,320 @@ static void VerifyCopyToString(const absl::Cord& cord) { } TEST_P(CordTest, CopyToString) { - VerifyCopyToString(absl::Cord()); - VerifyCopyToString(absl::Cord("small cord")); - VerifyCopyToString( + VerifyCopyToString(absl::Cord()); // empty cords cannot carry CRCs + VerifyCopyToString(MaybeHardened(absl::Cord("small cord"))); + VerifyCopyToString(MaybeHardened( absl::MakeFragmentedCord({"fragmented ", "cord ", "to ", "test ", - "copying ", "to ", "a ", "string."})); + "copying ", "to ", "a ", "string."}))); +} + +TEST_P(CordTest, AppendEmptyBuffer) { + absl::Cord cord; + cord.Append(absl::CordBuffer()); + cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000)); +} + +TEST_P(CordTest, AppendEmptyBufferToFlat) { + absl::Cord cord(std::string(2000, 'x')); + cord.Append(absl::CordBuffer()); + cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000)); +} + +TEST_P(CordTest, AppendEmptyBufferToTree) { + absl::Cord cord(std::string(2000, 'x')); + cord.Append(std::string(2000, 'y')); + cord.Append(absl::CordBuffer()); + cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000)); +} + +TEST_P(CordTest, AppendSmallBuffer) { + absl::Cord cord; + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3); + ASSERT_THAT(buffer.capacity(), ::testing::Le(15)); + memcpy(buffer.data(), "Abc", 3); + buffer.SetLength(3); + cord.Append(std::move(buffer)); + EXPECT_EQ(buffer.length(), 0); // NOLINT + EXPECT_GT(buffer.capacity(), 0); // NOLINT + + buffer = absl::CordBuffer::CreateWithDefaultLimit(3); + memcpy(buffer.data(), "defgh", 5); + buffer.SetLength(5); + cord.Append(std::move(buffer)); + EXPECT_EQ(buffer.length(), 0); // NOLINT + EXPECT_GT(buffer.capacity(), 0); // NOLINT + + EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre("Abcdefgh")); +} + +TEST_P(CordTest, AppendAndPrependBufferArePrecise) { + // Create a cord large enough to force 40KB flats. + std::string test_data(absl::cord_internal::kMaxFlatLength * 10, 'x'); + absl::Cord cord1(test_data); + absl::Cord cord2(test_data); + const size_t size1 = cord1.EstimatedMemoryUsage(); + const size_t size2 = cord2.EstimatedMemoryUsage(); + + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3); + memcpy(buffer.data(), "Abc", 3); + buffer.SetLength(3); + cord1.Append(std::move(buffer)); + + buffer = absl::CordBuffer::CreateWithDefaultLimit(3); + memcpy(buffer.data(), "Abc", 3); + buffer.SetLength(3); + cord2.Prepend(std::move(buffer)); + +#ifndef NDEBUG + // Allow 32 bytes new CordRepFlat, and 128 bytes for 'glue nodes' + constexpr size_t kMaxDelta = 128 + 32; +#else + // Allow 256 bytes extra for 'allocation debug overhead' + constexpr size_t kMaxDelta = 128 + 32 + 256; +#endif + + EXPECT_LE(cord1.EstimatedMemoryUsage() - size1, kMaxDelta); + EXPECT_LE(cord2.EstimatedMemoryUsage() - size2, kMaxDelta); + + EXPECT_EQ(cord1, absl::StrCat(test_data, "Abc")); + EXPECT_EQ(cord2, absl::StrCat("Abc", test_data)); +} + +TEST_P(CordTest, PrependSmallBuffer) { + absl::Cord cord; + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3); + ASSERT_THAT(buffer.capacity(), ::testing::Le(15)); + memcpy(buffer.data(), "Abc", 3); + buffer.SetLength(3); + cord.Prepend(std::move(buffer)); + EXPECT_EQ(buffer.length(), 0); // NOLINT + EXPECT_GT(buffer.capacity(), 0); // NOLINT + + buffer = absl::CordBuffer::CreateWithDefaultLimit(3); + memcpy(buffer.data(), "defgh", 5); + buffer.SetLength(5); + cord.Prepend(std::move(buffer)); + EXPECT_EQ(buffer.length(), 0); // NOLINT + EXPECT_GT(buffer.capacity(), 0); // NOLINT + + EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre("defghAbc")); +} + +TEST_P(CordTest, AppendLargeBuffer) { + absl::Cord cord; + + std::string s1(700, '1'); + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(s1.size()); + memcpy(buffer.data(), s1.data(), s1.size()); + buffer.SetLength(s1.size()); + cord.Append(std::move(buffer)); + EXPECT_EQ(buffer.length(), 0); // NOLINT + EXPECT_GT(buffer.capacity(), 0); // NOLINT + + std::string s2(1000, '2'); + buffer = absl::CordBuffer::CreateWithDefaultLimit(s2.size()); + memcpy(buffer.data(), s2.data(), s2.size()); + buffer.SetLength(s2.size()); + cord.Append(std::move(buffer)); + EXPECT_EQ(buffer.length(), 0); // NOLINT + EXPECT_GT(buffer.capacity(), 0); // NOLINT + + EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre(s1, s2)); +} + +TEST_P(CordTest, PrependLargeBuffer) { + absl::Cord cord; + + std::string s1(700, '1'); + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(s1.size()); + memcpy(buffer.data(), s1.data(), s1.size()); + buffer.SetLength(s1.size()); + cord.Prepend(std::move(buffer)); + EXPECT_EQ(buffer.length(), 0); // NOLINT + EXPECT_GT(buffer.capacity(), 0); // NOLINT + + std::string s2(1000, '2'); + buffer = absl::CordBuffer::CreateWithDefaultLimit(s2.size()); + memcpy(buffer.data(), s2.data(), s2.size()); + buffer.SetLength(s2.size()); + cord.Prepend(std::move(buffer)); + EXPECT_EQ(buffer.length(), 0); // NOLINT + EXPECT_GT(buffer.capacity(), 0); // NOLINT + + EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre(s2, s1)); +} + +class CordAppendBufferTest : public testing::TestWithParam { + public: + size_t is_default() const { return GetParam(); } + + // Returns human readable string representation of the test parameter. + static std::string ToString(testing::TestParamInfo param) { + return param.param ? "DefaultLimit" : "CustomLimit"; + } + + size_t limit() const { + return is_default() ? absl::CordBuffer::kDefaultLimit + : absl::CordBuffer::kCustomLimit; + } + + size_t maximum_payload() const { + return is_default() ? absl::CordBuffer::MaximumPayload() + : absl::CordBuffer::MaximumPayload(limit()); + } + + absl::CordBuffer GetAppendBuffer(absl::Cord& cord, size_t capacity, + size_t min_capacity = 16) { + return is_default() + ? cord.GetAppendBuffer(capacity, min_capacity) + : cord.GetCustomAppendBuffer(limit(), capacity, min_capacity); + } +}; + +INSTANTIATE_TEST_SUITE_P(WithParam, CordAppendBufferTest, testing::Bool(), + CordAppendBufferTest::ToString); + +TEST_P(CordAppendBufferTest, GetAppendBufferOnEmptyCord) { + absl::Cord cord; + absl::CordBuffer buffer = GetAppendBuffer(cord, 1000); + EXPECT_GE(buffer.capacity(), 1000); + EXPECT_EQ(buffer.length(), 0); +} + +TEST_P(CordAppendBufferTest, GetAppendBufferOnInlinedCord) { + static constexpr int kInlinedSize = sizeof(absl::CordBuffer) - 1; + for (int size : {6, kInlinedSize - 3, kInlinedSize - 2, 1000}) { + absl::Cord cord("Abc"); + absl::CordBuffer buffer = GetAppendBuffer(cord, size, 1); + EXPECT_GE(buffer.capacity(), 3 + size); + EXPECT_EQ(buffer.length(), 3); + EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc"); + EXPECT_TRUE(cord.empty()); + } +} + +TEST_P(CordAppendBufferTest, GetAppendBufferOnInlinedCordCapacityCloseToMax) { + // Cover the use case where we have a non empty inlined cord with some size + // 'n', and ask for something like 'uint64_max - k', assuming internal logic + // could overflow on 'uint64_max - k + size', and return a valid, but + // inefficiently smaller buffer if it would provide is the max allowed size. + for (size_t dist_from_max = 0; dist_from_max <= 4; ++dist_from_max) { + absl::Cord cord("Abc"); + size_t size = std::numeric_limits::max() - dist_from_max; + absl::CordBuffer buffer = GetAppendBuffer(cord, size, 1); + EXPECT_GE(buffer.capacity(), maximum_payload()); + EXPECT_EQ(buffer.length(), 3); + EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc"); + EXPECT_TRUE(cord.empty()); + } +} + +TEST_P(CordAppendBufferTest, GetAppendBufferOnFlat) { + // Create a cord with a single flat and extra capacity + absl::Cord cord; + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500); + const size_t expected_capacity = buffer.capacity(); + buffer.SetLength(3); + memcpy(buffer.data(), "Abc", 3); + cord.Append(std::move(buffer)); + + buffer = GetAppendBuffer(cord, 6); + EXPECT_EQ(buffer.capacity(), expected_capacity); + EXPECT_EQ(buffer.length(), 3); + EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc"); + EXPECT_TRUE(cord.empty()); +} + +TEST_P(CordAppendBufferTest, GetAppendBufferOnFlatWithoutMinCapacity) { + // Create a cord with a single flat and extra capacity + absl::Cord cord; + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500); + buffer.SetLength(30); + memset(buffer.data(), 'x', 30); + cord.Append(std::move(buffer)); + + buffer = GetAppendBuffer(cord, 1000, 900); + EXPECT_GE(buffer.capacity(), 1000); + EXPECT_EQ(buffer.length(), 0); + EXPECT_EQ(cord, std::string(30, 'x')); +} + +TEST_P(CordAppendBufferTest, GetAppendBufferOnTree) { + RandomEngine rng; + for (int num_flats : {2, 3, 100}) { + // Create a cord with `num_flats` flats and extra capacity + absl::Cord cord; + std::string prefix; + std::string last; + for (int i = 0; i < num_flats - 1; ++i) { + prefix += last; + last = RandomLowercaseString(&rng, 10); + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500); + buffer.SetLength(10); + memcpy(buffer.data(), last.data(), 10); + cord.Append(std::move(buffer)); + } + absl::CordBuffer buffer = GetAppendBuffer(cord, 6); + EXPECT_GE(buffer.capacity(), 500); + EXPECT_EQ(buffer.length(), 10); + EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), last); + EXPECT_EQ(cord, prefix); + } +} + +TEST_P(CordAppendBufferTest, GetAppendBufferOnTreeWithoutMinCapacity) { + absl::Cord cord; + for (int i = 0; i < 2; ++i) { + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500); + buffer.SetLength(3); + memcpy(buffer.data(), i ? "def" : "Abc", 3); + cord.Append(std::move(buffer)); + } + absl::CordBuffer buffer = GetAppendBuffer(cord, 1000, 900); + EXPECT_GE(buffer.capacity(), 1000); + EXPECT_EQ(buffer.length(), 0); + EXPECT_EQ(cord, "Abcdef"); +} + +TEST_P(CordAppendBufferTest, GetAppendBufferOnSubstring) { + // Create a large cord with a single flat and some extra capacity + absl::Cord cord; + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500); + buffer.SetLength(450); + memset(buffer.data(), 'x', 450); + cord.Append(std::move(buffer)); + cord.RemovePrefix(1); + + // Deny on substring + buffer = GetAppendBuffer(cord, 6); + EXPECT_EQ(buffer.length(), 0); + EXPECT_EQ(cord, std::string(449, 'x')); +} + +TEST_P(CordAppendBufferTest, GetAppendBufferOnSharedCord) { + // Create a shared cord with a single flat and extra capacity + absl::Cord cord; + absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500); + buffer.SetLength(3); + memcpy(buffer.data(), "Abc", 3); + cord.Append(std::move(buffer)); + absl::Cord shared_cord = cord; + + // Deny on flat + buffer = GetAppendBuffer(cord, 6); + EXPECT_EQ(buffer.length(), 0); + EXPECT_EQ(cord, "Abc"); + + buffer = absl::CordBuffer::CreateWithDefaultLimit(500); + buffer.SetLength(3); + memcpy(buffer.data(), "def", 3); + cord.Append(std::move(buffer)); + shared_cord = cord; + + // Deny on tree + buffer = GetAppendBuffer(cord, 6); + EXPECT_EQ(buffer.length(), 0); + EXPECT_EQ(cord, "Abcdef"); } TEST_P(CordTest, TryFlatEmpty) { @@ -493,45 +912,43 @@ TEST_P(CordTest, TryFlatEmpty) { TEST_P(CordTest, TryFlatFlat) { absl::Cord c("hello"); + MaybeHarden(c); EXPECT_EQ(c.TryFlat(), "hello"); } TEST_P(CordTest, TryFlatSubstrInlined) { absl::Cord c("hello"); c.RemovePrefix(1); + MaybeHarden(c); EXPECT_EQ(c.TryFlat(), "ello"); } TEST_P(CordTest, TryFlatSubstrFlat) { absl::Cord c("longer than 15 bytes"); absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1); + MaybeHarden(sub); EXPECT_EQ(sub.TryFlat(), "onger than 15 bytes"); } TEST_P(CordTest, TryFlatConcat) { absl::Cord c = absl::MakeFragmentedCord({"hel", "lo"}); + MaybeHarden(c); EXPECT_EQ(c.TryFlat(), absl::nullopt); } TEST_P(CordTest, TryFlatExternal) { absl::Cord c = absl::MakeCordFromExternal("hell", [](absl::string_view) {}); + MaybeHarden(c); EXPECT_EQ(c.TryFlat(), "hell"); } TEST_P(CordTest, TryFlatSubstrExternal) { absl::Cord c = absl::MakeCordFromExternal("hell", [](absl::string_view) {}); absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1); + MaybeHarden(sub); EXPECT_EQ(sub.TryFlat(), "ell"); } -TEST_P(CordTest, TryFlatSubstrConcat) { - absl::Cord c = absl::MakeFragmentedCord({"hello", " world"}); - absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1); - EXPECT_EQ(sub.TryFlat(), absl::nullopt); - c.RemovePrefix(1); - EXPECT_EQ(c.TryFlat(), absl::nullopt); -} - TEST_P(CordTest, TryFlatCommonlyAssumedInvariants) { // The behavior tested below is not part of the API contract of Cord, but it's // something we intend to be true in our current implementation. This test @@ -546,6 +963,7 @@ TEST_P(CordTest, TryFlatCommonlyAssumedInvariants) { "returned by the ", "iterator"}; absl::Cord c = absl::MakeFragmentedCord(fragments); + MaybeHarden(c); int fragment = 0; int offset = 0; absl::Cord::CharIterator itc = c.char_begin(); @@ -590,13 +1008,15 @@ static void VerifyFlatten(absl::Cord c) { TEST_P(CordTest, Flatten) { VerifyFlatten(absl::Cord()); - VerifyFlatten(absl::Cord("small cord")); - VerifyFlatten(absl::Cord("larger than small buffer optimization")); - VerifyFlatten(absl::MakeFragmentedCord({"small ", "fragmented ", "cord"})); + VerifyFlatten(MaybeHardened(absl::Cord("small cord"))); + VerifyFlatten( + MaybeHardened(absl::Cord("larger than small buffer optimization"))); + VerifyFlatten(MaybeHardened( + absl::MakeFragmentedCord({"small ", "fragmented ", "cord"}))); // Test with a cord that is longer than the largest flat buffer RandomEngine rng(GTEST_FLAG_GET(random_seed)); - VerifyFlatten(absl::Cord(RandomLowercaseString(&rng, 8192))); + VerifyFlatten(MaybeHardened(absl::Cord(RandomLowercaseString(&rng, 8192)))); } // Test data @@ -650,22 +1070,26 @@ TEST_P(CordTest, MultipleLengths) { { // Construct from Cord absl::Cord tmp(a); absl::Cord x(tmp); + MaybeHarden(x); EXPECT_EQ(a, std::string(x)) << "'" << a << "'"; } { // Construct from absl::string_view absl::Cord x(a); + MaybeHarden(x); EXPECT_EQ(a, std::string(x)) << "'" << a << "'"; } { // Append cord to self absl::Cord self(a); + MaybeHarden(self); self.Append(self); EXPECT_EQ(a + a, std::string(self)) << "'" << a << "' + '" << a << "'"; } { // Prepend cord to self absl::Cord self(a); + MaybeHarden(self); self.Prepend(self); EXPECT_EQ(a + a, std::string(self)) << "'" << a << "' + '" << a << "'"; } @@ -677,12 +1101,14 @@ TEST_P(CordTest, MultipleLengths) { { // CopyFrom Cord absl::Cord x(a); absl::Cord y(b); + MaybeHarden(x); x = y; EXPECT_EQ(b, std::string(x)) << "'" << a << "' + '" << b << "'"; } { // CopyFrom absl::string_view absl::Cord x(a); + MaybeHarden(x); x = b; EXPECT_EQ(b, std::string(x)) << "'" << a << "' + '" << b << "'"; } @@ -690,12 +1116,14 @@ TEST_P(CordTest, MultipleLengths) { { // Cord::Append(Cord) absl::Cord x(a); absl::Cord y(b); + MaybeHarden(x); x.Append(y); EXPECT_EQ(a + b, std::string(x)) << "'" << a << "' + '" << b << "'"; } { // Cord::Append(absl::string_view) absl::Cord x(a); + MaybeHarden(x); x.Append(b); EXPECT_EQ(a + b, std::string(x)) << "'" << a << "' + '" << b << "'"; } @@ -703,12 +1131,14 @@ TEST_P(CordTest, MultipleLengths) { { // Cord::Prepend(Cord) absl::Cord x(a); absl::Cord y(b); + MaybeHarden(x); x.Prepend(y); EXPECT_EQ(b + a, std::string(x)) << "'" << b << "' + '" << a << "'"; } { // Cord::Prepend(absl::string_view) absl::Cord x(a); + MaybeHarden(x); x.Prepend(b); EXPECT_EQ(b + a, std::string(x)) << "'" << b << "' + '" << a << "'"; } @@ -721,13 +1151,16 @@ namespace { TEST_P(CordTest, RemoveSuffixWithExternalOrSubstring) { absl::Cord cord = absl::MakeCordFromExternal( "foo bar baz", [](absl::string_view s) { DoNothing(s, nullptr); }); - EXPECT_EQ("foo bar baz", std::string(cord)); + MaybeHarden(cord); + // This RemoveSuffix() will wrap the EXTERNAL node in a SUBSTRING node. cord.RemoveSuffix(4); EXPECT_EQ("foo bar", std::string(cord)); + MaybeHarden(cord); + // This RemoveSuffix() will adjust the SUBSTRING node in-place. cord.RemoveSuffix(4); EXPECT_EQ("foo", std::string(cord)); @@ -737,6 +1170,7 @@ TEST_P(CordTest, RemoveSuffixMakesZeroLengthNode) { absl::Cord c; c.Append(absl::Cord(std::string(100, 'x'))); absl::Cord other_ref = c; // Prevent inplace appends + MaybeHarden(c); c.Append(absl::Cord(std::string(200, 'y'))); c.RemoveSuffix(200); EXPECT_EQ(std::string(100, 'x'), std::string(c)); @@ -762,6 +1196,7 @@ absl::Cord CordWithZedBlock(size_t size) { // Establish that ZedBlock does what we think it does. TEST_P(CordTest, CordSpliceTestZedBlock) { absl::Cord blob = CordWithZedBlock(10); + MaybeHarden(blob); EXPECT_EQ(10, blob.size()); std::string s; absl::CopyCordToString(blob, &s); @@ -770,6 +1205,7 @@ TEST_P(CordTest, CordSpliceTestZedBlock) { TEST_P(CordTest, CordSpliceTestZedBlock0) { absl::Cord blob = CordWithZedBlock(0); + MaybeHarden(blob); EXPECT_EQ(0, blob.size()); std::string s; absl::CopyCordToString(blob, &s); @@ -778,6 +1214,7 @@ TEST_P(CordTest, CordSpliceTestZedBlock0) { TEST_P(CordTest, CordSpliceTestZedBlockSuffix1) { absl::Cord blob = CordWithZedBlock(10); + MaybeHarden(blob); EXPECT_EQ(10, blob.size()); absl::Cord suffix(blob); suffix.RemovePrefix(9); @@ -790,6 +1227,7 @@ TEST_P(CordTest, CordSpliceTestZedBlockSuffix1) { // Remove all of a prefix block TEST_P(CordTest, CordSpliceTestZedBlockSuffix0) { absl::Cord blob = CordWithZedBlock(10); + MaybeHarden(blob); EXPECT_EQ(10, blob.size()); absl::Cord suffix(blob); suffix.RemovePrefix(10); @@ -822,6 +1260,7 @@ absl::Cord SpliceCord(const absl::Cord& blob, int64_t offset, // Taking an empty suffix of a block breaks appending. TEST_P(CordTest, CordSpliceTestRemoveEntireBlock1) { absl::Cord zero = CordWithZedBlock(10); + MaybeHarden(zero); absl::Cord suffix(zero); suffix.RemovePrefix(10); absl::Cord result; @@ -830,6 +1269,7 @@ TEST_P(CordTest, CordSpliceTestRemoveEntireBlock1) { TEST_P(CordTest, CordSpliceTestRemoveEntireBlock2) { absl::Cord zero = CordWithZedBlock(10); + MaybeHarden(zero); absl::Cord prefix(zero); prefix.RemoveSuffix(10); absl::Cord suffix(zero); @@ -841,13 +1281,19 @@ TEST_P(CordTest, CordSpliceTestRemoveEntireBlock2) { TEST_P(CordTest, CordSpliceTestRemoveEntireBlock3) { absl::Cord blob = CordWithZedBlock(10); absl::Cord block = BigCord(10, 'b'); + MaybeHarden(blob); + MaybeHarden(block); blob = SpliceCord(blob, 0, block); } struct CordCompareTestCase { template - CordCompareTestCase(const LHS& lhs, const RHS& rhs) - : lhs_cord(lhs), rhs_cord(rhs) {} + CordCompareTestCase(const LHS& lhs, const RHS& rhs, bool use_crc) + : lhs_cord(lhs), rhs_cord(rhs) { + if (use_crc) { + lhs_cord.SetExpectedChecksum(1); + } + } absl::Cord lhs_cord; absl::Cord rhs_cord; @@ -884,47 +1330,54 @@ TEST_P(CordTest, Compare) { concat2.Append("cccccccccccDDDDDDDDDDDDDD"); concat2.Append("DD"); + const bool use_crc = UseCrc(); + std::vector test_cases = {{ // Inline cords - {"abcdef", "abcdef"}, - {"abcdef", "abcdee"}, - {"abcdef", "abcdeg"}, - {"bbcdef", "abcdef"}, - {"bbcdef", "abcdeg"}, - {"abcdefa", "abcdef"}, - {"abcdef", "abcdefa"}, + {"abcdef", "abcdef", use_crc}, + {"abcdef", "abcdee", use_crc}, + {"abcdef", "abcdeg", use_crc}, + {"bbcdef", "abcdef", use_crc}, + {"bbcdef", "abcdeg", use_crc}, + {"abcdefa", "abcdef", use_crc}, + {"abcdef", "abcdefa", use_crc}, // Small flat cords - {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDD"}, - {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBxccccDDDDD"}, - {"aaaaaBBBBBcxcccDDDDD", "aaaaaBBBBBcccccDDDDD"}, - {"aaaaaBBBBBxccccDDDDD", "aaaaaBBBBBcccccDDDDX"}, - {"aaaaaBBBBBcccccDDDDDa", "aaaaaBBBBBcccccDDDDD"}, - {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDDa"}, + {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDD", use_crc}, + {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBxccccDDDDD", use_crc}, + {"aaaaaBBBBBcxcccDDDDD", "aaaaaBBBBBcccccDDDDD", use_crc}, + {"aaaaaBBBBBxccccDDDDD", "aaaaaBBBBBcccccDDDDX", use_crc}, + {"aaaaaBBBBBcccccDDDDDa", "aaaaaBBBBBcccccDDDDD", use_crc}, + {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDDa", use_crc}, // Subcords - {subcord, subcord}, - {subcord, "aaBBBBBccc"}, - {subcord, "aaBBBBBccd"}, - {subcord, "aaBBBBBccb"}, - {subcord, "aaBBBBBxcb"}, - {subcord, "aaBBBBBccca"}, - {subcord, "aaBBBBBcc"}, + {subcord, subcord, use_crc}, + {subcord, "aaBBBBBccc", use_crc}, + {subcord, "aaBBBBBccd", use_crc}, + {subcord, "aaBBBBBccb", use_crc}, + {subcord, "aaBBBBBxcb", use_crc}, + {subcord, "aaBBBBBccca", use_crc}, + {subcord, "aaBBBBBcc", use_crc}, // Concats - {concat, concat}, + {concat, concat, use_crc}, {concat, - "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDD"}, + "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDD", + use_crc}, {concat, - "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBcccccccccccccccxDDDDDDDDDDDDDDDD"}, + "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBcccccccccccccccxDDDDDDDDDDDDDDDD", + use_crc}, {concat, - "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBacccccccccccccccDDDDDDDDDDDDDDDD"}, + "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBacccccccccccccccDDDDDDDDDDDDDDDD", + use_crc}, {concat, - "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDD"}, + "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDD", + use_crc}, {concat, - "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDDe"}, + "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDDe", + use_crc}, - {concat, concat2}, + {concat, concat2, use_crc}, }}; for (const auto& tc : test_cases) { @@ -935,6 +1388,7 @@ TEST_P(CordTest, Compare) { TEST_P(CordTest, CompareAfterAssign) { absl::Cord a("aaaaaa1111111"); absl::Cord b("aaaaaa2222222"); + MaybeHarden(a); a = "cccccc"; b = "cccccc"; EXPECT_EQ(a, b); @@ -993,6 +1447,8 @@ TEST_P(CordTest, CompareRandomComparisons) { d.Append(a[GetUniformRandomUpTo(&rng, ABSL_ARRAYSIZE(a))]); } std::bernoulli_distribution coin_flip(0.5); + MaybeHarden(c); + MaybeHarden(d); TestCompare(coin_flip(rng) ? c : absl::Cord(std::string(c)), coin_flip(rng) ? d : absl::Cord(std::string(d)), &rng); } @@ -1118,6 +1574,7 @@ TEST_P(CordTest, ConstructFromExternalCompareContents) { EXPECT_EQ(external->size(), sv.size()); delete external; }); + MaybeHarden(cord); EXPECT_EQ(data, cord); } } @@ -1133,7 +1590,7 @@ TEST_P(CordTest, ConstructFromExternalLargeReleaser) { EXPECT_EQ(data, absl::string_view(data_array.data(), data_array.size())); invoked = true; }; - (void)absl::MakeCordFromExternal(data, releaser); + (void)MaybeHardened(absl::MakeCordFromExternal(data, releaser)); EXPECT_TRUE(invoked); } @@ -1146,11 +1603,11 @@ TEST_P(CordTest, ConstructFromExternalFunctionPointerReleaser) { invoked = true; }); invoked = false; - (void)absl::MakeCordFromExternal(data, releaser); + (void)MaybeHardened(absl::MakeCordFromExternal(data, releaser)); EXPECT_TRUE(invoked); invoked = false; - (void)absl::MakeCordFromExternal(data, *releaser); + (void)MaybeHardened(absl::MakeCordFromExternal(data, *releaser)); EXPECT_TRUE(invoked); } @@ -1164,20 +1621,21 @@ TEST_P(CordTest, ConstructFromExternalMoveOnlyReleaser) { }; bool invoked = false; - (void)absl::MakeCordFromExternal("dummy", Releaser(&invoked)); + (void)MaybeHardened(absl::MakeCordFromExternal("dummy", Releaser(&invoked))); EXPECT_TRUE(invoked); } TEST_P(CordTest, ConstructFromExternalNoArgLambda) { bool invoked = false; - (void)absl::MakeCordFromExternal("dummy", [&invoked]() { invoked = true; }); + (void)MaybeHardened( + absl::MakeCordFromExternal("dummy", [&invoked]() { invoked = true; })); EXPECT_TRUE(invoked); } TEST_P(CordTest, ConstructFromExternalStringViewArgLambda) { bool invoked = false; - (void)absl::MakeCordFromExternal( - "dummy", [&invoked](absl::string_view) { invoked = true; }); + (void)MaybeHardened(absl::MakeCordFromExternal( + "dummy", [&invoked](absl::string_view) { invoked = true; })); EXPECT_TRUE(invoked); } @@ -1192,43 +1650,78 @@ TEST_P(CordTest, ConstructFromExternalNonTrivialReleaserDestructor) { bool destroyed = false; Releaser releaser(&destroyed); - (void)absl::MakeCordFromExternal("dummy", releaser); + (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser)); EXPECT_TRUE(destroyed); } TEST_P(CordTest, ConstructFromExternalReferenceQualifierOverloads) { - struct Releaser { - void operator()(absl::string_view) & { *lvalue_invoked = true; } - void operator()(absl::string_view) && { *rvalue_invoked = true; } + enum InvokedAs { kMissing, kLValue, kRValue }; + enum CopiedAs { kNone, kMove, kCopy }; + struct Tracker { + CopiedAs copied_as = kNone; + InvokedAs invoked_as = kMissing; - bool* lvalue_invoked; - bool* rvalue_invoked; + void Record(InvokedAs rhs) { + ASSERT_EQ(invoked_as, kMissing); + invoked_as = rhs; + } + + void Record(CopiedAs rhs) { + if (copied_as == kNone || rhs == kCopy) copied_as = rhs; + } + } tracker; + + class Releaser { + public: + explicit Releaser(Tracker* tracker) : tr_(tracker) { *tracker = Tracker(); } + Releaser(Releaser&& rhs) : tr_(rhs.tr_) { tr_->Record(kMove); } + Releaser(const Releaser& rhs) : tr_(rhs.tr_) { tr_->Record(kCopy); } + + void operator()(absl::string_view) & { tr_->Record(kLValue); } + void operator()(absl::string_view) && { tr_->Record(kRValue); } + + private: + Tracker* tr_; }; - bool lvalue_invoked = false; - bool rvalue_invoked = false; - Releaser releaser = {&lvalue_invoked, &rvalue_invoked}; - (void)absl::MakeCordFromExternal("", releaser); - EXPECT_FALSE(lvalue_invoked); - EXPECT_TRUE(rvalue_invoked); - rvalue_invoked = false; + const Releaser releaser1(&tracker); + (void)MaybeHardened(absl::MakeCordFromExternal("", releaser1)); + EXPECT_EQ(tracker.copied_as, kCopy); + EXPECT_EQ(tracker.invoked_as, kRValue); - (void)absl::MakeCordFromExternal("dummy", releaser); - EXPECT_FALSE(lvalue_invoked); - EXPECT_TRUE(rvalue_invoked); - rvalue_invoked = false; + const Releaser releaser2(&tracker); + (void)MaybeHardened(absl::MakeCordFromExternal("", releaser2)); + EXPECT_EQ(tracker.copied_as, kCopy); + EXPECT_EQ(tracker.invoked_as, kRValue); - // NOLINTNEXTLINE: suppress clang-tidy std::move on trivially copyable type. - (void)absl::MakeCordFromExternal("dummy", std::move(releaser)); - EXPECT_FALSE(lvalue_invoked); - EXPECT_TRUE(rvalue_invoked); + Releaser releaser3(&tracker); + (void)MaybeHardened(absl::MakeCordFromExternal("", std::move(releaser3))); + EXPECT_EQ(tracker.copied_as, kMove); + EXPECT_EQ(tracker.invoked_as, kRValue); + + Releaser releaser4(&tracker); + (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser4)); + EXPECT_EQ(tracker.copied_as, kCopy); + EXPECT_EQ(tracker.invoked_as, kRValue); + + const Releaser releaser5(&tracker); + (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser5)); + EXPECT_EQ(tracker.copied_as, kCopy); + EXPECT_EQ(tracker.invoked_as, kRValue); + + Releaser releaser6(&tracker); + (void)MaybeHardened(absl::MakeCordFromExternal("foo", std::move(releaser6))); + EXPECT_EQ(tracker.copied_as, kMove); + EXPECT_EQ(tracker.invoked_as, kRValue); } TEST_P(CordTest, ExternalMemoryBasicUsage) { static const char* strings[] = {"", "hello", "there"}; for (const char* str : strings) { absl::Cord dst("(prefix)"); + MaybeHarden(dst); AddExternalMemory(str, &dst); + MaybeHarden(dst); dst.Append("(suffix)"); EXPECT_EQ((std::string("(prefix)") + str + std::string("(suffix)")), std::string(dst)); @@ -1242,7 +1735,9 @@ TEST_P(CordTest, ExternalMemoryRemovePrefixSuffix) { for (int offset = 0; offset <= s.size(); offset++) { for (int length = 0; length <= s.size() - offset; length++) { absl::Cord result(cord); + MaybeHarden(result); result.RemovePrefix(offset); + MaybeHarden(result); result.RemoveSuffix(result.size() - length); EXPECT_EQ(s.substr(offset, length), std::string(result)) << offset << " " << length; @@ -1253,8 +1748,10 @@ TEST_P(CordTest, ExternalMemoryRemovePrefixSuffix) { TEST_P(CordTest, ExternalMemoryGet) { absl::Cord cord("hello"); AddExternalMemory(" world!", &cord); + MaybeHarden(cord); AddExternalMemory(" how are ", &cord); cord.Append(" you?"); + MaybeHarden(cord); std::string s = std::string(cord); for (int i = 0; i < s.size(); i++) { EXPECT_EQ(s[i], cord[i]); @@ -1262,76 +1759,133 @@ TEST_P(CordTest, ExternalMemoryGet) { } // CordMemoryUsage tests verify the correctness of the EstimatedMemoryUsage() -// These tests take into account that the reported memory usage is approximate -// and non-deterministic. For all tests, We verify that the reported memory -// usage is larger than `size()`, and less than `size() * 1.5` as a cord should -// never reserve more 'extra' capacity than half of its size as it grows. -// Additionally we have some whiteboxed expectations based on our knowledge of -// the layout and size of empty and inlined cords, and flat nodes. +// We use whiteboxed expectations based on our knowledge of the layout and size +// of empty and inlined cords, and flat nodes. -TEST_P(CordTest, CordMemoryUsageEmpty) { - EXPECT_EQ(sizeof(absl::Cord), absl::Cord().EstimatedMemoryUsage()); +constexpr auto kFairShare = absl::CordMemoryAccounting::kFairShare; + +// Creates a cord of `n` `c` values, making sure no string stealing occurs. +absl::Cord MakeCord(size_t n, char c) { + const std::string s(n, c); + return absl::Cord(s); } -TEST_P(CordTest, CordMemoryUsageEmbedded) { +TEST(CordTest, CordMemoryUsageEmpty) { + absl::Cord cord; + EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage()); + EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage(kFairShare)); +} + +TEST(CordTest, CordMemoryUsageInlined) { absl::Cord a("hello"); EXPECT_EQ(a.EstimatedMemoryUsage(), sizeof(absl::Cord)); + EXPECT_EQ(a.EstimatedMemoryUsage(kFairShare), sizeof(absl::Cord)); } -TEST_P(CordTest, CordMemoryUsageEmbeddedAppend) { - absl::Cord a("a"); - absl::Cord b("bcd"); - EXPECT_EQ(b.EstimatedMemoryUsage(), sizeof(absl::Cord)); - a.Append(b); - EXPECT_EQ(a.EstimatedMemoryUsage(), sizeof(absl::Cord)); -} - -TEST_P(CordTest, CordMemoryUsageExternalMemory) { - static const int kLength = 1000; +TEST(CordTest, CordMemoryUsageExternalMemory) { absl::Cord cord; - AddExternalMemory(std::string(kLength, 'x'), &cord); - EXPECT_GT(cord.EstimatedMemoryUsage(), kLength); - EXPECT_LE(cord.EstimatedMemoryUsage(), kLength * 1.5); + AddExternalMemory(std::string(1000, 'x'), &cord); + const size_t expected = + sizeof(absl::Cord) + 1000 + sizeof(CordRepExternal) + sizeof(intptr_t); + EXPECT_EQ(cord.EstimatedMemoryUsage(), expected); + EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), expected); } -TEST_P(CordTest, CordMemoryUsageFlat) { - static const int kLength = 125; - absl::Cord a(std::string(kLength, 'a')); - EXPECT_GT(a.EstimatedMemoryUsage(), kLength); - EXPECT_LE(a.EstimatedMemoryUsage(), kLength * 1.5); +TEST(CordTest, CordMemoryUsageFlat) { + absl::Cord cord = MakeCord(1000, 'a'); + const size_t flat_size = + absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize(); + EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size); + EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), + sizeof(absl::Cord) + flat_size); } -TEST_P(CordTest, CordMemoryUsageAppendFlat) { - using absl::strings_internal::CordTestAccess; - absl::Cord a(std::string(CordTestAccess::MaxFlatLength(), 'a')); - size_t length = a.EstimatedMemoryUsage(); - a.Append(std::string(CordTestAccess::MaxFlatLength(), 'b')); - size_t delta = a.EstimatedMemoryUsage() - length; - EXPECT_GT(delta, CordTestAccess::MaxFlatLength()); - EXPECT_LE(delta, CordTestAccess::MaxFlatLength() * 1.5); +TEST(CordTest, CordMemoryUsageSubStringSharedFlat) { + absl::Cord flat = MakeCord(2000, 'a'); + const size_t flat_size = + absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize(); + absl::Cord cord = flat.Subcord(500, 1000); + EXPECT_EQ(cord.EstimatedMemoryUsage(), + sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size); + EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), + sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size / 2); } -TEST_P(CordTest, CordMemoryUsageAppendExternal) { - static const int kLength = 1000; - using absl::strings_internal::CordTestAccess; - absl::Cord a(std::string(CordTestAccess::MaxFlatLength(), 'a')); - size_t length = a.EstimatedMemoryUsage(); - AddExternalMemory(std::string(kLength, 'b'), &a); - size_t delta = a.EstimatedMemoryUsage() - length; - EXPECT_GT(delta, kLength); - EXPECT_LE(delta, kLength * 1.5); +TEST(CordTest, CordMemoryUsageFlatShared) { + absl::Cord shared = MakeCord(1000, 'a'); + absl::Cord cord(shared); + const size_t flat_size = + absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize(); + EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size); + EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), + sizeof(absl::Cord) + flat_size / 2); } -TEST_P(CordTest, CordMemoryUsageSubString) { - static const int kLength = 2000; - using absl::strings_internal::CordTestAccess; - absl::Cord a(std::string(kLength, 'a')); - size_t length = a.EstimatedMemoryUsage(); - AddExternalMemory(std::string(kLength, 'b'), &a); - absl::Cord b = a.Subcord(0, kLength + kLength / 2); - size_t delta = b.EstimatedMemoryUsage() - length; - EXPECT_GT(delta, kLength); - EXPECT_LE(delta, kLength * 1.5); +TEST(CordTest, CordMemoryUsageFlatHardenedAndShared) { + absl::Cord shared = MakeCord(1000, 'a'); + absl::Cord cord(shared); + const size_t flat_size = + absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize(); + cord.SetExpectedChecksum(1); + EXPECT_EQ(cord.EstimatedMemoryUsage(), + sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size); + EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), + sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size / 2); + + absl::Cord cord2(cord); + EXPECT_EQ(cord2.EstimatedMemoryUsage(), + sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size); + EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare), + sizeof(absl::Cord) + (sizeof(CordRepCrc) + flat_size / 2) / 2); +} + +TEST(CordTest, CordMemoryUsageBTree) { + absl::Cord cord1; + size_t flats1_size = 0; + absl::Cord flats1[4] = {MakeCord(1000, 'a'), MakeCord(1100, 'a'), + MakeCord(1200, 'a'), MakeCord(1300, 'a')}; + for (absl::Cord flat : flats1) { + flats1_size += absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize(); + cord1.Append(std::move(flat)); + } + + // Make sure the created cord is a BTREE tree. Under some builds such as + // windows DLL, we may have ODR like effects on the flag, meaning the DLL + // code will run with the picked up default. + if (!absl::CordTestPeer::Tree(cord1)->IsBtree()) { + ABSL_RAW_LOG(WARNING, "Cord library code not respecting btree flag"); + return; + } + + size_t rep1_size = sizeof(CordRepBtree) + flats1_size; + size_t rep1_shared_size = sizeof(CordRepBtree) + flats1_size / 2; + + EXPECT_EQ(cord1.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep1_size); + EXPECT_EQ(cord1.EstimatedMemoryUsage(kFairShare), + sizeof(absl::Cord) + rep1_shared_size); + + absl::Cord cord2; + size_t flats2_size = 0; + absl::Cord flats2[4] = {MakeCord(600, 'a'), MakeCord(700, 'a'), + MakeCord(800, 'a'), MakeCord(900, 'a')}; + for (absl::Cord& flat : flats2) { + flats2_size += absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize(); + cord2.Append(std::move(flat)); + } + size_t rep2_size = sizeof(CordRepBtree) + flats2_size; + + EXPECT_EQ(cord2.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep2_size); + EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare), + sizeof(absl::Cord) + rep2_size); + + absl::Cord cord(cord1); + cord.Append(std::move(cord2)); + + EXPECT_EQ(cord.EstimatedMemoryUsage(), + sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_size + rep2_size); + EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), + sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_shared_size / 2 + + rep2_size); } // Regtest for a change that had to be rolled back because it expanded out @@ -1353,11 +1907,13 @@ TEST_P(CordTest, CordMemoryUsageInlineRep) { TEST_P(CordTest, Concat_Append) { // Create a rep of type CONCAT absl::Cord s1("foobarbarbarbarbar"); + MaybeHarden(s1); s1.Append("abcdefgabcdefgabcdefgabcdefgabcdefgabcdefgabcdefg"); size_t size = s1.size(); // Create a copy of s1 and append to it. absl::Cord s2 = s1; + MaybeHarden(s2); s2.Append("x"); // 7465150 modifies s1 when it shouldn't. @@ -1377,6 +1933,7 @@ TEST_P(CordTest, DiabolicalGrowth) { for (char c : expected) { absl::Cord shared(cord); cord.Append(absl::string_view(&c, 1)); + MaybeHarden(cord); } std::string value; absl::CopyCordToString(cord, &value); @@ -1421,17 +1978,28 @@ static absl::Cord MakeHuge(absl::string_view prefix) { TEST_P(CordTest, HugeCord) { absl::Cord cord = MakeHuge("huge cord"); + MaybeHarden(cord); + + const size_t acceptable_delta = + 100 + (UseCrc() ? sizeof(absl::cord_internal::CordRepCrc) : 0); EXPECT_LE(cord.size(), cord.EstimatedMemoryUsage()); - EXPECT_GE(cord.size() + 100, cord.EstimatedMemoryUsage()); + EXPECT_GE(cord.size() + acceptable_delta, cord.EstimatedMemoryUsage()); } // Tests that Append() works ok when handed a self reference TEST_P(CordTest, AppendSelf) { + // Test the empty case. + absl::Cord empty; + MaybeHarden(empty); + empty.Append(empty); + ASSERT_EQ(empty, ""); + // We run the test until data is ~16K // This guarantees it covers small, medium and large data. std::string control_data = "Abc"; absl::Cord data(control_data); while (control_data.length() < 0x4000) { + MaybeHarden(data); data.Append(data); control_data.append(control_data); ASSERT_EQ(control_data, data); @@ -1442,6 +2010,8 @@ TEST_P(CordTest, MakeFragmentedCordFromInitializerList) { absl::Cord fragmented = absl::MakeFragmentedCord({"A ", "fragmented ", "Cord"}); + MaybeHarden(fragmented); + EXPECT_EQ("A fragmented Cord", fragmented); auto chunk_it = fragmented.chunk_begin(); @@ -1462,6 +2032,8 @@ TEST_P(CordTest, MakeFragmentedCordFromVector) { std::vector chunks = {"A ", "fragmented ", "Cord"}; absl::Cord fragmented = absl::MakeFragmentedCord(chunks); + MaybeHarden(fragmented); + EXPECT_EQ("A fragmented Cord", fragmented); auto chunk_it = fragmented.chunk_begin(); @@ -1564,22 +2136,26 @@ TEST_P(CordTest, CordChunkIteratorOperations) { VerifyChunkIterator(empty_cord, 0); absl::Cord small_buffer_cord("small cord"); + MaybeHarden(small_buffer_cord); VerifyChunkIterator(small_buffer_cord, 1); absl::Cord flat_node_cord("larger than small buffer optimization"); + MaybeHarden(flat_node_cord); VerifyChunkIterator(flat_node_cord, 1); - VerifyChunkIterator( - absl::MakeFragmentedCord({"a ", "small ", "fragmented ", "cord ", "for ", - "testing ", "chunk ", "iterations."}), - 8); + VerifyChunkIterator(MaybeHardened(absl::MakeFragmentedCord( + {"a ", "small ", "fragmented ", "cord ", "for ", + "testing ", "chunk ", "iterations."})), + 8); absl::Cord reused_nodes_cord(std::string(40, 'c')); reused_nodes_cord.Prepend(absl::Cord(std::string(40, 'b'))); + MaybeHarden(reused_nodes_cord); reused_nodes_cord.Prepend(absl::Cord(std::string(40, 'a'))); size_t expected_chunks = 3; for (int i = 0; i < 8; ++i) { reused_nodes_cord.Prepend(reused_nodes_cord); + MaybeHarden(reused_nodes_cord); expected_chunks *= 2; VerifyChunkIterator(reused_nodes_cord, expected_chunks); } @@ -1591,6 +2167,78 @@ TEST_P(CordTest, CordChunkIteratorOperations) { VerifyChunkIterator(subcords, 128); } + +TEST_P(CordTest, AdvanceAndReadOnDataEdge) { + RandomEngine rng(GTEST_FLAG_GET(random_seed)); + const std::string data = RandomLowercaseString(&rng, 2000); + for (bool as_flat : {true, false}) { + SCOPED_TRACE(as_flat ? "Flat" : "External"); + + absl::Cord cord = + as_flat ? absl::Cord(data) + : absl::MakeCordFromExternal(data, [](absl::string_view) {}); + auto it = cord.Chars().begin(); +#if !defined(NDEBUG) || ABSL_OPTION_HARDENED + EXPECT_DEATH_IF_SUPPORTED(cord.AdvanceAndRead(&it, 2001), ".*"); +#endif + + it = cord.Chars().begin(); + absl::Cord frag = cord.AdvanceAndRead(&it, 2000); + EXPECT_EQ(frag, data); + EXPECT_TRUE(it == cord.Chars().end()); + + it = cord.Chars().begin(); + frag = cord.AdvanceAndRead(&it, 200); + EXPECT_EQ(frag, data.substr(0, 200)); + EXPECT_FALSE(it == cord.Chars().end()); + + frag = cord.AdvanceAndRead(&it, 1500); + EXPECT_EQ(frag, data.substr(200, 1500)); + EXPECT_FALSE(it == cord.Chars().end()); + + frag = cord.AdvanceAndRead(&it, 300); + EXPECT_EQ(frag, data.substr(1700, 300)); + EXPECT_TRUE(it == cord.Chars().end()); + } +} + +TEST_P(CordTest, AdvanceAndReadOnSubstringDataEdge) { + RandomEngine rng(GTEST_FLAG_GET(random_seed)); + const std::string data = RandomLowercaseString(&rng, 2500); + for (bool as_flat : {true, false}) { + SCOPED_TRACE(as_flat ? "Flat" : "External"); + + absl::Cord cord = + as_flat ? absl::Cord(data) + : absl::MakeCordFromExternal(data, [](absl::string_view) {}); + cord = cord.Subcord(200, 2000); + const std::string substr = data.substr(200, 2000); + + auto it = cord.Chars().begin(); +#if !defined(NDEBUG) || ABSL_OPTION_HARDENED + EXPECT_DEATH_IF_SUPPORTED(cord.AdvanceAndRead(&it, 2001), ".*"); +#endif + + it = cord.Chars().begin(); + absl::Cord frag = cord.AdvanceAndRead(&it, 2000); + EXPECT_EQ(frag, substr); + EXPECT_TRUE(it == cord.Chars().end()); + + it = cord.Chars().begin(); + frag = cord.AdvanceAndRead(&it, 200); + EXPECT_EQ(frag, substr.substr(0, 200)); + EXPECT_FALSE(it == cord.Chars().end()); + + frag = cord.AdvanceAndRead(&it, 1500); + EXPECT_EQ(frag, substr.substr(200, 1500)); + EXPECT_FALSE(it == cord.Chars().end()); + + frag = cord.AdvanceAndRead(&it, 300); + EXPECT_EQ(frag, substr.substr(1700, 300)); + EXPECT_TRUE(it == cord.Chars().end()); + } +} + TEST_P(CordTest, CharIteratorTraits) { static_assert(std::is_copy_constructible::value, ""); @@ -1705,27 +2353,33 @@ TEST_P(CordTest, CharIteratorOperations) { VerifyCharIterator(empty_cord); absl::Cord small_buffer_cord("small cord"); + MaybeHarden(small_buffer_cord); VerifyCharIterator(small_buffer_cord); absl::Cord flat_node_cord("larger than small buffer optimization"); + MaybeHarden(flat_node_cord); VerifyCharIterator(flat_node_cord); - VerifyCharIterator( + VerifyCharIterator(MaybeHardened( absl::MakeFragmentedCord({"a ", "small ", "fragmented ", "cord ", "for ", - "testing ", "character ", "iteration."})); + "testing ", "character ", "iteration."}))); absl::Cord reused_nodes_cord("ghi"); reused_nodes_cord.Prepend(absl::Cord("def")); reused_nodes_cord.Prepend(absl::Cord("abc")); for (int i = 0; i < 4; ++i) { reused_nodes_cord.Prepend(reused_nodes_cord); + MaybeHarden(reused_nodes_cord); VerifyCharIterator(reused_nodes_cord); } RandomEngine rng(GTEST_FLAG_GET(random_seed)); absl::Cord flat_cord(RandomLowercaseString(&rng, 256)); absl::Cord subcords; - for (int i = 0; i < 4; ++i) subcords.Prepend(flat_cord.Subcord(16 * i, 128)); + for (int i = 0; i < 4; ++i) { + subcords.Prepend(flat_cord.Subcord(16 * i, 128)); + MaybeHarden(subcords); + } VerifyCharIterator(subcords); } @@ -1750,6 +2404,8 @@ TEST_P(CordTest, CharIteratorAdvanceAndRead) { cord.Append(absl::Cord(block)); } + MaybeHarden(cord); + for (size_t chunk_size : {kChunkSize1, kChunkSize2, kChunkSize3, kChunkSize4}) { absl::Cord::CharIterator it = cord.char_begin(); @@ -1767,6 +2423,7 @@ TEST_P(CordTest, CharIteratorAdvanceAndRead) { TEST_P(CordTest, StreamingOutput) { absl::Cord c = absl::MakeFragmentedCord({"A ", "small ", "fragmented ", "Cord", "."}); + MaybeHarden(c); std::stringstream output; output << c; EXPECT_EQ("A small fragmented Cord.", output.str()); @@ -1780,6 +2437,7 @@ TEST_P(CordTest, ForEachChunk) { cord_chunks.push_back(absl::StrCat("[", i, "]")); } absl::Cord c = absl::MakeFragmentedCord(cord_chunks); + MaybeHarden(c); std::vector iterated_chunks; absl::CordTestPeer::ForEachChunk(c, @@ -1797,6 +2455,7 @@ TEST_P(CordTest, SmallBufferAssignFromOwnData) { for (size_t pos = 0; pos < contents.size(); ++pos) { for (size_t count = contents.size() - pos; count > 0; --count) { absl::Cord c(contents); + MaybeHarden(c); absl::string_view flat = c.Flatten(); c = flat.substr(pos, count); EXPECT_EQ(c, contents.substr(pos, count)) @@ -1809,12 +2468,16 @@ TEST_P(CordTest, Format) { absl::Cord c; absl::Format(&c, "There were %04d little %s.", 3, "pigs"); EXPECT_EQ(c, "There were 0003 little pigs."); + MaybeHarden(c); absl::Format(&c, "And %-3llx bad wolf!", 1); + MaybeHarden(c); EXPECT_EQ(c, "There were 0003 little pigs.And 1 bad wolf!"); } TEST_P(CordTest, Hardening) { absl::Cord cord("hello"); + MaybeHarden(cord); + // These statement should abort the program in all builds modes. EXPECT_DEATH_IF_SUPPORTED(cord.RemovePrefix(6), ""); EXPECT_DEATH_IF_SUPPORTED(cord.RemoveSuffix(6), ""); @@ -1833,6 +2496,49 @@ TEST_P(CordTest, Hardening) { EXPECT_DEATH_IF_SUPPORTED(++cord.chunk_end(), ""); } +// This test mimics a specific (and rare) application repeatedly splitting a +// cord, inserting (overwriting) a string value, and composing a new cord from +// the three pieces. This is hostile towards a Btree implementation: A split of +// a node at any level is likely to have the right-most edge of the left split, +// and the left-most edge of the right split shared. For example, splitting a +// leaf node with 6 edges will result likely in a 1-6, 2-5, 3-4, etc. split, +// sharing the 'split node'. When recomposing such nodes, we 'injected' an edge +// in that node. As this happens with some probability on each level of the +// tree, this will quickly grow the tree until it reaches maximum height. +TEST_P(CordTest, BtreeHostileSplitInsertJoin) { + absl::BitGen bitgen; + + // Start with about 1GB of data + std::string data(1 << 10, 'x'); + absl::Cord buffer(data); + absl::Cord cord; + for (int i = 0; i < 1000000; ++i) { + cord.Append(buffer); + } + + for (int j = 0; j < 1000; ++j) { + MaybeHarden(cord); + size_t offset = absl::Uniform(bitgen, 0u, cord.size()); + size_t length = absl::Uniform(bitgen, 100u, data.size()); + if (cord.size() == offset) { + cord.Append(absl::string_view(data.data(), length)); + } else { + absl::Cord suffix; + if (offset + length < cord.size()) { + suffix = cord; + suffix.RemovePrefix(offset + length); + } + if (cord.size() > offset) { + cord.RemoveSuffix(cord.size() - offset); + } + cord.Append(absl::string_view(data.data(), length)); + if (!suffix.empty()) { + cord.Append(suffix); + } + } + } +} + class AfterExitCordTester { public: bool Set(absl::Cord* cord, absl::string_view expected) { @@ -1849,12 +2555,34 @@ class AfterExitCordTester { absl::string_view expected_; }; +// Deliberately prevents the destructor for an absl::Cord from running. The cord +// is accessible via the cord member during the lifetime of the CordLeaker. +// After the CordLeaker is destroyed, pointers to the cord will remain valid +// until the CordLeaker's memory is deallocated. +struct CordLeaker { + union { + absl::Cord cord; + }; + + template + constexpr explicit CordLeaker(const Str& str) : cord(str) {} + + ~CordLeaker() { + // Don't do anything, including running cord's destructor. (cord's + // destructor won't run automatically because cord is hidden inside a + // union.) + } +}; + template void TestConstinitConstructor(Str) { const auto expected = Str::value; // Defined before `cord` to be destroyed after it. static AfterExitCordTester exit_tester; // NOLINT - ABSL_CONST_INIT static absl::Cord cord(Str{}); // NOLINT + ABSL_CONST_INIT static CordLeaker cord_leaker(Str{}); // NOLINT + // cord_leaker is static, so this reference will remain valid through the end + // of program execution. + static absl::Cord& cord = cord_leaker.cord; static bool init_exit_tester = exit_tester.Set(&cord, expected); (void)init_exit_tester; @@ -1912,3 +2640,420 @@ TEST_P(CordTest, ConstinitConstructor) { TestConstinitConstructor( absl::strings_internal::MakeStringConstant(LongView{})); } + +namespace { + +// Test helper that generates a populated cord for future manipulation. +// +// By test convention, all generated cords begin with the characters "abcde" at +// the start of the first chunk. +class PopulatedCordFactory { + public: + constexpr PopulatedCordFactory(absl::string_view name, + absl::Cord (*generator)()) + : name_(name), generator_(generator) {} + + absl::string_view Name() const { return name_; } + absl::Cord Generate() const { return generator_(); } + + private: + absl::string_view name_; + absl::Cord (*generator_)(); +}; + +// clang-format off +// This array is constant-initialized in conformant compilers. +PopulatedCordFactory cord_factories[] = { + {"sso", [] { return absl::Cord("abcde"); }}, + {"flat", [] { + // Too large to live in SSO space, but small enough to be a simple FLAT. + absl::Cord flat(absl::StrCat("abcde", std::string(1000, 'x'))); + flat.Flatten(); + return flat; + }}, + {"external", [] { + // A cheat: we are using a string literal as the external storage, so a + // no-op releaser is correct here. + return absl::MakeCordFromExternal("abcde External!", []{}); + }}, + {"external substring", [] { + // A cheat: we are using a string literal as the external storage, so a + // no-op releaser is correct here. + absl::Cord ext = absl::MakeCordFromExternal("-abcde External!", []{}); + return absl::CordTestPeer::MakeSubstring(ext, 1, ext.size() - 1); + }}, + {"substring", [] { + absl::Cord flat(absl::StrCat("-abcde", std::string(1000, 'x'))); + flat.Flatten(); + return flat.Subcord(1, 998); + }}, + {"fragmented", [] { + std::string fragment = absl::StrCat("abcde", std::string(195, 'x')); + std::vector fragments(200, fragment); + absl::Cord cord = absl::MakeFragmentedCord(fragments); + assert(cord.size() == 40000); + return cord; + }}, +}; +// clang-format on + +// Test helper that can mutate a cord, and possibly undo the mutation, for +// testing. +class CordMutator { + public: + constexpr CordMutator(absl::string_view name, void (*mutate)(absl::Cord&), + void (*undo)(absl::Cord&) = nullptr) + : name_(name), mutate_(mutate), undo_(undo) {} + + absl::string_view Name() const { return name_; } + void Mutate(absl::Cord& cord) const { mutate_(cord); } + bool CanUndo() const { return undo_ != nullptr; } + void Undo(absl::Cord& cord) const { undo_(cord); } + + private: + absl::string_view name_; + void (*mutate_)(absl::Cord&); + void (*undo_)(absl::Cord&); +}; + +// clang-format off +// This array is constant-initialized in conformant compilers. +CordMutator cord_mutators[] = { + {"clear", [](absl::Cord& c) { c.Clear(); }}, + {"overwrite", [](absl::Cord& c) { c = "overwritten"; }}, + { + "append string", + [](absl::Cord& c) { c.Append("0123456789"); }, + [](absl::Cord& c) { c.RemoveSuffix(10); } + }, + { + "append cord", + [](absl::Cord& c) { + c.Append(absl::MakeFragmentedCord({"12345", "67890"})); + }, + [](absl::Cord& c) { c.RemoveSuffix(10); } + }, + { + "append checksummed cord", + [](absl::Cord& c) { + absl::Cord to_append = absl::MakeFragmentedCord({"12345", "67890"}); + to_append.SetExpectedChecksum(999); + c.Append(to_append); + }, + [](absl::Cord& c) { c.RemoveSuffix(10); } + }, + { + "append self", + [](absl::Cord& c) { c.Append(c); }, + [](absl::Cord& c) { c.RemoveSuffix(c.size() / 2); } + }, + { + "append empty string", + [](absl::Cord& c) { c.Append(""); }, + [](absl::Cord& c) { } + }, + { + "append empty cord", + [](absl::Cord& c) { c.Append(absl::Cord()); }, + [](absl::Cord& c) { } + }, + { + "append empty checksummed cord", + [](absl::Cord& c) { + absl::Cord to_append; + to_append.SetExpectedChecksum(999); + c.Append(to_append); + }, + [](absl::Cord& c) { } + }, + { + "prepend string", + [](absl::Cord& c) { c.Prepend("9876543210"); }, + [](absl::Cord& c) { c.RemovePrefix(10); } + }, + { + "prepend cord", + [](absl::Cord& c) { + c.Prepend(absl::MakeFragmentedCord({"98765", "43210"})); + }, + [](absl::Cord& c) { c.RemovePrefix(10); } + }, + { + "prepend checksummed cord", + [](absl::Cord& c) { + absl::Cord to_prepend = absl::MakeFragmentedCord({"98765", "43210"}); + to_prepend.SetExpectedChecksum(999); + c.Prepend(to_prepend); + }, + [](absl::Cord& c) { c.RemovePrefix(10); } + }, + { + "prepend empty string", + [](absl::Cord& c) { c.Prepend(""); }, + [](absl::Cord& c) { } + }, + { + "prepend empty cord", + [](absl::Cord& c) { c.Prepend(absl::Cord()); }, + [](absl::Cord& c) { } + }, + { + "prepend empty checksummed cord", + [](absl::Cord& c) { + absl::Cord to_prepend; + to_prepend.SetExpectedChecksum(999); + c.Prepend(to_prepend); + }, + [](absl::Cord& c) { } + }, + { + "prepend self", + [](absl::Cord& c) { c.Prepend(c); }, + [](absl::Cord& c) { c.RemovePrefix(c.size() / 2); } + }, + {"remove prefix", [](absl::Cord& c) { c.RemovePrefix(c.size() / 2); }}, + {"remove suffix", [](absl::Cord& c) { c.RemoveSuffix(c.size() / 2); }}, + {"remove 0-prefix", [](absl::Cord& c) { c.RemovePrefix(0); }}, + {"remove 0-suffix", [](absl::Cord& c) { c.RemoveSuffix(0); }}, + {"subcord", [](absl::Cord& c) { c = c.Subcord(1, c.size() - 2); }}, + { + "swap inline", + [](absl::Cord& c) { + absl::Cord other("swap"); + c.swap(other); + } + }, + { + "swap tree", + [](absl::Cord& c) { + absl::Cord other(std::string(10000, 'x')); + c.swap(other); + } + }, +}; +// clang-format on +} // namespace + +TEST_P(CordTest, ExpectedChecksum) { + for (const PopulatedCordFactory& factory : cord_factories) { + SCOPED_TRACE(factory.Name()); + for (bool shared : {false, true}) { + SCOPED_TRACE(shared); + + absl::Cord shared_cord_source = factory.Generate(); + auto make_instance = [=] { + return shared ? shared_cord_source : factory.Generate(); + }; + + const absl::Cord base_value = factory.Generate(); + const std::string base_value_as_string(factory.Generate().Flatten()); + + absl::Cord c1 = make_instance(); + EXPECT_FALSE(c1.ExpectedChecksum().has_value()); + + // Setting an expected checksum works, and retains the cord's bytes + c1.SetExpectedChecksum(12345); + EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345); + EXPECT_EQ(c1, base_value); + + // Test that setting an expected checksum again doesn't crash or leak + // memory. + c1.SetExpectedChecksum(12345); + EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345); + EXPECT_EQ(c1, base_value); + + // CRC persists through copies, assignments, and moves: + absl::Cord c1_copy_construct = c1; + EXPECT_EQ(c1_copy_construct.ExpectedChecksum().value_or(0), 12345); + + absl::Cord c1_copy_assign; + c1_copy_assign = c1; + EXPECT_EQ(c1_copy_assign.ExpectedChecksum().value_or(0), 12345); + + absl::Cord c1_move(std::move(c1_copy_assign)); + EXPECT_EQ(c1_move.ExpectedChecksum().value_or(0), 12345); + + EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345); + + // A CRC Cord compares equal to its non-CRC value. + EXPECT_EQ(c1, make_instance()); + + for (const CordMutator& mutator : cord_mutators) { + SCOPED_TRACE(mutator.Name()); + + // Test that mutating a cord removes its stored checksum + absl::Cord c2 = make_instance(); + c2.SetExpectedChecksum(24680); + + mutator.Mutate(c2); + + if (c1 == c2) { + // Not a mutation (for example, appending the empty string). + // Whether the checksum is removed is not defined. + continue; + } + + EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt); + + if (mutator.CanUndo()) { + // Undoing an operation should not restore the checksum + mutator.Undo(c2); + EXPECT_EQ(c2, base_value); + EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt); + } + } + + absl::Cord c3 = make_instance(); + c3.SetExpectedChecksum(999); + const absl::Cord& cc3 = c3; + + // Test that all cord reading operations function in the face of an + // expected checksum. + + // Test data precondition + ASSERT_TRUE(cc3.StartsWith("abcde")); + + EXPECT_EQ(cc3.size(), base_value_as_string.size()); + EXPECT_FALSE(cc3.empty()); + EXPECT_EQ(cc3.Compare(base_value), 0); + EXPECT_EQ(cc3.Compare(base_value_as_string), 0); + EXPECT_EQ(cc3.Compare("wxyz"), -1); + EXPECT_EQ(cc3.Compare(absl::Cord("wxyz")), -1); + EXPECT_EQ(cc3.Compare("aaaa"), 1); + EXPECT_EQ(cc3.Compare(absl::Cord("aaaa")), 1); + EXPECT_EQ(absl::Cord("wxyz").Compare(cc3), 1); + EXPECT_EQ(absl::Cord("aaaa").Compare(cc3), -1); + EXPECT_TRUE(cc3.StartsWith("abcd")); + EXPECT_EQ(std::string(cc3), base_value_as_string); + + std::string dest; + absl::CopyCordToString(cc3, &dest); + EXPECT_EQ(dest, base_value_as_string); + + bool first_pass = true; + for (absl::string_view chunk : cc3.Chunks()) { + if (first_pass) { + EXPECT_TRUE(absl::StartsWith(chunk, "abcde")); + } + first_pass = false; + } + first_pass = true; + for (char ch : cc3.Chars()) { + if (first_pass) { + EXPECT_EQ(ch, 'a'); + } + first_pass = false; + } + EXPECT_TRUE(absl::StartsWith(*cc3.chunk_begin(), "abcde")); + EXPECT_EQ(*cc3.char_begin(), 'a'); + + auto char_it = cc3.char_begin(); + absl::Cord::Advance(&char_it, 2); + EXPECT_EQ(absl::Cord::AdvanceAndRead(&char_it, 2), "cd"); + EXPECT_EQ(*char_it, 'e'); + char_it = cc3.char_begin(); + absl::Cord::Advance(&char_it, 2); + EXPECT_TRUE(absl::StartsWith(absl::Cord::ChunkRemaining(char_it), "cde")); + + EXPECT_EQ(cc3[0], 'a'); + EXPECT_EQ(cc3[4], 'e'); + EXPECT_EQ(absl::HashOf(cc3), absl::HashOf(base_value)); + EXPECT_EQ(absl::HashOf(cc3), absl::HashOf(base_value_as_string)); + } + } +} + +// Test the special cases encountered with an empty checksummed cord. +TEST_P(CordTest, ChecksummedEmptyCord) { + absl::Cord c1; + EXPECT_FALSE(c1.ExpectedChecksum().has_value()); + + // Setting an expected checksum works. + c1.SetExpectedChecksum(12345); + EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345); + EXPECT_EQ(c1, ""); + EXPECT_TRUE(c1.empty()); + + // Test that setting an expected checksum again doesn't crash or leak memory. + c1.SetExpectedChecksum(12345); + EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345); + EXPECT_EQ(c1, ""); + EXPECT_TRUE(c1.empty()); + + // CRC persists through copies, assignments, and moves: + absl::Cord c1_copy_construct = c1; + EXPECT_EQ(c1_copy_construct.ExpectedChecksum().value_or(0), 12345); + + absl::Cord c1_copy_assign; + c1_copy_assign = c1; + EXPECT_EQ(c1_copy_assign.ExpectedChecksum().value_or(0), 12345); + + absl::Cord c1_move(std::move(c1_copy_assign)); + EXPECT_EQ(c1_move.ExpectedChecksum().value_or(0), 12345); + + EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345); + + // A CRC Cord compares equal to its non-CRC value. + EXPECT_EQ(c1, absl::Cord()); + + for (const CordMutator& mutator : cord_mutators) { + SCOPED_TRACE(mutator.Name()); + + // Exercise mutating an empty checksummed cord to catch crashes and exercise + // memory sanitizers. + absl::Cord c2; + c2.SetExpectedChecksum(24680); + mutator.Mutate(c2); + + if (c2.empty()) { + // Not a mutation + continue; + } + EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt); + + if (mutator.CanUndo()) { + mutator.Undo(c2); + } + } + + absl::Cord c3; + c3.SetExpectedChecksum(999); + const absl::Cord& cc3 = c3; + + // Test that all cord reading operations function in the face of an + // expected checksum. + EXPECT_TRUE(cc3.StartsWith("")); + EXPECT_TRUE(cc3.EndsWith("")); + EXPECT_TRUE(cc3.empty()); + EXPECT_EQ(cc3, ""); + EXPECT_EQ(cc3, absl::Cord()); + EXPECT_EQ(cc3.size(), 0); + EXPECT_EQ(cc3.Compare(absl::Cord()), 0); + EXPECT_EQ(cc3.Compare(c1), 0); + EXPECT_EQ(cc3.Compare(cc3), 0); + EXPECT_EQ(cc3.Compare(""), 0); + EXPECT_EQ(cc3.Compare("wxyz"), -1); + EXPECT_EQ(cc3.Compare(absl::Cord("wxyz")), -1); + EXPECT_EQ(absl::Cord("wxyz").Compare(cc3), 1); + EXPECT_EQ(std::string(cc3), ""); + + std::string dest; + absl::CopyCordToString(cc3, &dest); + EXPECT_EQ(dest, ""); + + for (absl::string_view chunk : cc3.Chunks()) { // NOLINT(unreachable loop) + static_cast(chunk); + GTEST_FAIL() << "no chunks expected"; + } + EXPECT_TRUE(cc3.chunk_begin() == cc3.chunk_end()); + + for (char ch : cc3.Chars()) { // NOLINT(unreachable loop) + static_cast(ch); + GTEST_FAIL() << "no chars expected"; + } + EXPECT_TRUE(cc3.char_begin() == cc3.char_end()); + + EXPECT_EQ(cc3.TryFlat(), ""); + EXPECT_EQ(absl::HashOf(c3), absl::HashOf(absl::Cord())); + EXPECT_EQ(absl::HashOf(c3), absl::HashOf(absl::string_view())); +} diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping.cc index 18b20b83fd..7d97944eba 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping.cc @@ -42,11 +42,11 @@ constexpr bool kUnescapeNulls = false; inline bool is_octal_digit(char c) { return ('0' <= c) && (c <= '7'); } -inline int hex_digit_to_int(char c) { +inline unsigned int hex_digit_to_int(char c) { static_assert('0' == 0x30 && 'A' == 0x41 && 'a' == 0x61, "Character set must be ASCII."); - assert(absl::ascii_isxdigit(c)); - int x = static_cast(c); + assert(absl::ascii_isxdigit(static_cast(c))); + unsigned int x = static_cast(c); if (x > '9') { x += 9; } @@ -121,27 +121,29 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, case '7': { // octal digit: 1 to 3 digits const char* octal_start = p; - unsigned int ch = *p - '0'; - if (p < last_byte && is_octal_digit(p[1])) ch = ch * 8 + *++p - '0'; + unsigned int ch = static_cast(*p - '0'); // digit 1 if (p < last_byte && is_octal_digit(p[1])) - ch = ch * 8 + *++p - '0'; // now points at last digit + ch = ch * 8 + static_cast(*++p - '0'); // digit 2 + if (p < last_byte && is_octal_digit(p[1])) + ch = ch * 8 + static_cast(*++p - '0'); // digit 3 if (ch > 0xff) { if (error) { *error = "Value of \\" + - std::string(octal_start, p + 1 - octal_start) + + std::string(octal_start, + static_cast(p + 1 - octal_start)) + " exceeds 0xff"; } return false; } if ((ch == 0) && leave_nulls_escaped) { // Copy the escape sequence for the null character - const ptrdiff_t octal_size = p + 1 - octal_start; + const size_t octal_size = static_cast(p + 1 - octal_start); *d++ = '\\'; memmove(d, octal_start, octal_size); d += octal_size; break; } - *d++ = ch; + *d++ = static_cast(ch); break; } case 'x': @@ -149,32 +151,34 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, if (p >= last_byte) { if (error) *error = "String cannot end with \\x"; return false; - } else if (!absl::ascii_isxdigit(p[1])) { + } else if (!absl::ascii_isxdigit(static_cast(p[1]))) { if (error) *error = "\\x cannot be followed by a non-hex digit"; return false; } unsigned int ch = 0; const char* hex_start = p; - while (p < last_byte && absl::ascii_isxdigit(p[1])) + while (p < last_byte && + absl::ascii_isxdigit(static_cast(p[1]))) // Arbitrarily many hex digits ch = (ch << 4) + hex_digit_to_int(*++p); if (ch > 0xFF) { if (error) { *error = "Value of \\" + - std::string(hex_start, p + 1 - hex_start) + + std::string(hex_start, + static_cast(p + 1 - hex_start)) + " exceeds 0xff"; } return false; } if ((ch == 0) && leave_nulls_escaped) { // Copy the escape sequence for the null character - const ptrdiff_t hex_size = p + 1 - hex_start; + const size_t hex_size = static_cast(p + 1 - hex_start); *d++ = '\\'; memmove(d, hex_start, hex_size); d += hex_size; break; } - *d++ = ch; + *d++ = static_cast(ch); break; } case 'u': { @@ -184,18 +188,20 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, if (p + 4 >= end) { if (error) { *error = "\\u must be followed by 4 hex digits: \\" + - std::string(hex_start, p + 1 - hex_start); + std::string(hex_start, + static_cast(p + 1 - hex_start)); } return false; } for (int i = 0; i < 4; ++i) { // Look one char ahead. - if (absl::ascii_isxdigit(p[1])) { + if (absl::ascii_isxdigit(static_cast(p[1]))) { rune = (rune << 4) + hex_digit_to_int(*++p); // Advance p. } else { if (error) { *error = "\\u must be followed by 4 hex digits: \\" + - std::string(hex_start, p + 1 - hex_start); + std::string(hex_start, + static_cast(p + 1 - hex_start)); } return false; } @@ -220,20 +226,22 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, if (p + 8 >= end) { if (error) { *error = "\\U must be followed by 8 hex digits: \\" + - std::string(hex_start, p + 1 - hex_start); + std::string(hex_start, + static_cast(p + 1 - hex_start)); } return false; } for (int i = 0; i < 8; ++i) { // Look one char ahead. - if (absl::ascii_isxdigit(p[1])) { + if (absl::ascii_isxdigit(static_cast(p[1]))) { // Don't change rune until we're sure this // is within the Unicode limit, but do advance p. uint32_t newrune = (rune << 4) + hex_digit_to_int(*++p); if (newrune > 0x10FFFF) { if (error) { *error = "Value of \\" + - std::string(hex_start, p + 1 - hex_start) + + std::string(hex_start, + static_cast(p + 1 - hex_start)) + " exceeds Unicode limit (0x10FFFF)"; } return false; @@ -243,7 +251,8 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, } else { if (error) { *error = "\\U must be followed by 8 hex digits: \\" + - std::string(hex_start, p + 1 - hex_start); + std::string(hex_start, + static_cast(p + 1 - hex_start)); } return false; } @@ -291,7 +300,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, error)) { return false; } - dest->erase(dest_size); + dest->erase(static_cast(dest_size)); return true; } @@ -311,7 +320,7 @@ std::string CEscapeInternal(absl::string_view src, bool use_hex, std::string dest; bool last_hex_escape = false; // true if last output char was \xNN. - for (unsigned char c : src) { + for (char c : src) { bool is_hex_escape = false; switch (c) { case '\n': dest.append("\\" "n"); break; @@ -320,28 +329,30 @@ std::string CEscapeInternal(absl::string_view src, bool use_hex, case '\"': dest.append("\\" "\""); break; case '\'': dest.append("\\" "'"); break; case '\\': dest.append("\\" "\\"); break; - default: + default: { // Note that if we emit \xNN and the src character after that is a hex // digit then that digit must be escaped too to prevent it being // interpreted as part of the character code by C. - if ((!utf8_safe || c < 0x80) && - (!absl::ascii_isprint(c) || - (last_hex_escape && absl::ascii_isxdigit(c)))) { + const unsigned char uc = static_cast(c); + if ((!utf8_safe || uc < 0x80) && + (!absl::ascii_isprint(uc) || + (last_hex_escape && absl::ascii_isxdigit(uc)))) { if (use_hex) { dest.append("\\" "x"); - dest.push_back(numbers_internal::kHexChar[c / 16]); - dest.push_back(numbers_internal::kHexChar[c % 16]); + dest.push_back(numbers_internal::kHexChar[uc / 16]); + dest.push_back(numbers_internal::kHexChar[uc % 16]); is_hex_escape = true; } else { dest.append("\\"); - dest.push_back(numbers_internal::kHexChar[c / 64]); - dest.push_back(numbers_internal::kHexChar[(c % 64) / 8]); - dest.push_back(numbers_internal::kHexChar[c % 8]); + dest.push_back(numbers_internal::kHexChar[uc / 64]); + dest.push_back(numbers_internal::kHexChar[(uc % 64) / 8]); + dest.push_back(numbers_internal::kHexChar[uc % 8]); } } else { dest.push_back(c); break; } + } } last_hex_escape = is_hex_escape; } @@ -350,7 +361,7 @@ std::string CEscapeInternal(absl::string_view src, bool use_hex, } /* clang-format off */ -constexpr char c_escaped_len[256] = { +constexpr unsigned char c_escaped_len[256] = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, 2, 4, 4, // \t, \n, \r 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // ", ' @@ -375,7 +386,8 @@ constexpr char c_escaped_len[256] = { // that UTF-8 bytes are not handled specially. inline size_t CEscapedLength(absl::string_view src) { size_t escaped_len = 0; - for (unsigned char c : src) escaped_len += c_escaped_len[c]; + for (char c : src) + escaped_len += c_escaped_len[static_cast(c)]; return escaped_len; } @@ -391,8 +403,8 @@ void CEscapeAndAppendInternal(absl::string_view src, std::string* dest) { cur_dest_len + escaped_len); char* append_ptr = &(*dest)[cur_dest_len]; - for (unsigned char c : src) { - int char_len = c_escaped_len[c]; + for (char c : src) { + size_t char_len = c_escaped_len[static_cast(c)]; if (char_len == 1) { *append_ptr++ = c; } else if (char_len == 2) { @@ -424,9 +436,9 @@ void CEscapeAndAppendInternal(absl::string_view src, std::string* dest) { } } else { *append_ptr++ = '\\'; - *append_ptr++ = '0' + c / 64; - *append_ptr++ = '0' + (c % 64) / 8; - *append_ptr++ = '0' + c % 8; + *append_ptr++ = '0' + static_cast(c) / 64; + *append_ptr++ = '0' + (static_cast(c) % 64) / 8; + *append_ptr++ = '0' + static_cast(c) % 8; } } } @@ -440,7 +452,7 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest, size_t destidx = 0; int decode = 0; int state = 0; - unsigned int ch = 0; + unsigned char ch = 0; unsigned int temp = 0; // If "char" is signed by default, using *src as an array index results in @@ -500,13 +512,13 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest, // how to handle those cases. GET_INPUT(first, 4); - temp = decode; + temp = static_cast(decode); GET_INPUT(second, 3); - temp = (temp << 6) | decode; + temp = (temp << 6) | static_cast(decode); GET_INPUT(third, 2); - temp = (temp << 6) | decode; + temp = (temp << 6) | static_cast(decode); GET_INPUT(fourth, 1); - temp = (temp << 6) | decode; + temp = (temp << 6) | static_cast(decode); } else { // We really did have four good data bytes, so advance four // characters in the string. @@ -518,11 +530,11 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest, // temp has 24 bits of input, so write that out as three bytes. if (destidx + 3 > szdest) return false; - dest[destidx + 2] = temp; + dest[destidx + 2] = static_cast(temp); temp >>= 8; - dest[destidx + 1] = temp; + dest[destidx + 1] = static_cast(temp); temp >>= 8; - dest[destidx] = temp; + dest[destidx] = static_cast(temp); destidx += 3; } } else { @@ -583,18 +595,18 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest, } // Each input character gives us six bits of output. - temp = (temp << 6) | decode; + temp = (temp << 6) | static_cast(decode); ++state; if (state == 4) { // If we've accumulated 24 bits of output, write that out as // three bytes. if (dest) { if (destidx + 3 > szdest) return false; - dest[destidx + 2] = temp; + dest[destidx + 2] = static_cast(temp); temp >>= 8; - dest[destidx + 1] = temp; + dest[destidx + 1] = static_cast(temp); temp >>= 8; - dest[destidx] = temp; + dest[destidx] = static_cast(temp); } destidx += 3; state = 0; @@ -619,7 +631,7 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest, if (dest) { if (destidx + 1 > szdest) return false; temp >>= 4; - dest[destidx] = temp; + dest[destidx] = static_cast(temp); } ++destidx; expected_equals = 2; @@ -630,9 +642,9 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest, if (dest) { if (destidx + 2 > szdest) return false; temp >>= 2; - dest[destidx + 1] = temp; + dest[destidx + 1] = static_cast(temp); temp >>= 8; - dest[destidx] = temp; + dest[destidx] = static_cast(temp); } destidx += 2; expected_equals = 1; @@ -773,7 +785,8 @@ bool Base64UnescapeInternal(const char* src, size_t slen, String* dest, const signed char* unbase64) { // Determine the size of the output string. Base64 encodes every 3 bytes into // 4 characters. any leftover chars are added directly for good measure. - // This is documented in the base64 RFC: http://tools.ietf.org/html/rfc3548 + // This is documented in the base64 RFC: + // https://datatracker.ietf.org/doc/html/rfc3548 const size_t dest_len = 3 * (slen / 4) + (slen % 4); strings_internal::STLStringResizeUninitialized(dest, dest_len); @@ -821,9 +834,9 @@ constexpr char kHexValueLenient[256] = { // or a string. This works because we use the [] operator to access // individual characters at a time. template -void HexStringToBytesInternal(const char* from, T to, ptrdiff_t num) { - for (int i = 0; i < num; i++) { - to[i] = (kHexValueLenient[from[i * 2] & 0xFF] << 4) + +void HexStringToBytesInternal(const char* from, T to, size_t num) { + for (size_t i = 0; i < num; i++) { + to[i] = static_cast(kHexValueLenient[from[i * 2] & 0xFF] << 4) + (kHexValueLenient[from[i * 2 + 1] & 0xFF]); } } @@ -831,7 +844,7 @@ void HexStringToBytesInternal(const char* from, T to, ptrdiff_t num) { // This is a templated function so that T can be either a char* or a // std::string. template -void BytesToHexStringInternal(const unsigned char* src, T dest, ptrdiff_t num) { +void BytesToHexStringInternal(const unsigned char* src, T dest, size_t num) { auto dest_ptr = &dest[0]; for (auto src_ptr = src; src_ptr != (src + num); ++src_ptr, dest_ptr += 2) { const char* hex_p = &numbers_internal::kHexTable[*src_ptr * 2]; @@ -876,8 +889,8 @@ std::string Utf8SafeCHexEscape(absl::string_view src) { // WebSafeBase64Escape() - Google's variation of base64 encoder // // Check out -// http://tools.ietf.org/html/rfc2045 for formal description, but what we -// care about is that... +// https://datatracker.ietf.org/doc/html/rfc2045 for formal description, but +// what we care about is that... // Take the encoded stuff in groups of 4 characters and turn each // character into a code 0 to 63 thus: // A-Z map to 0 to 25 diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping.h index f5ca26c5da..aa6d17508c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping.h @@ -122,6 +122,8 @@ std::string Utf8SafeCHexEscape(absl::string_view src); // Converts a `src` string encoded in Base64 to its binary equivalent, writing // it to a `dest` buffer, returning `true` on success. If `src` contains invalid // characters, `dest` is cleared and returns `false`. +// Padding is optional. If padding is included, it must be correct. In the +// padding, '=' and '.' are treated identically. bool Base64Unescape(absl::string_view src, std::string* dest); // WebSafeBase64Unescape() @@ -129,6 +131,8 @@ bool Base64Unescape(absl::string_view src, std::string* dest); // Converts a `src` string encoded in Base64 to its binary equivalent, writing // it to a `dest` buffer, but using '-' instead of '+', and '_' instead of '/'. // If `src` contains invalid characters, `dest` is cleared and returns `false`. +// Padding is optional. If padding is included, it must be correct. In the +// padding, '=' and '.' are treated identically. bool WebSafeBase64Unescape(absl::string_view src, std::string* dest); // Base64Escape() diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping_test.cc index 45671a0ed5..44ffcba7e3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/escaping_test.cc @@ -617,6 +617,48 @@ TEST(Base64, EscapeAndUnescape) { TestEscapeAndUnescape(); } +TEST(Base64, Padding) { + // Padding is optional. + // '.' is an acceptable padding character, just like '='. + std::initializer_list good_padding = { + "YQ", + "YQ==", + "YQ=.", + "YQ.=", + "YQ..", + }; + for (absl::string_view b64 : good_padding) { + std::string decoded; + EXPECT_TRUE(absl::Base64Unescape(b64, &decoded)); + EXPECT_EQ(decoded, "a"); + std::string websafe_decoded; + EXPECT_TRUE(absl::WebSafeBase64Unescape(b64, &websafe_decoded)); + EXPECT_EQ(websafe_decoded, "a"); + } + std::initializer_list bad_padding = { + "YQ=", + "YQ.", + "YQ===", + "YQ==.", + "YQ=.=", + "YQ=..", + "YQ.==", + "YQ.=.", + "YQ..=", + "YQ...", + "YQ====", + "YQ....", + "YQ=====", + "YQ.....", + }; + for (absl::string_view b64 : bad_padding) { + std::string decoded; + EXPECT_FALSE(absl::Base64Unescape(b64, &decoded)); + std::string websafe_decoded; + EXPECT_FALSE(absl::WebSafeBase64Unescape(b64, &websafe_decoded)); + } +} + TEST(Base64, DISABLED_HugeData) { const size_t kSize = size_t(3) * 1000 * 1000 * 1000; static_assert(kSize % 3 == 0, "kSize must be divisible by 3"); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/char_map.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/char_map.h index 61484de0b7..5aabc1fc64 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/char_map.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/char_map.h @@ -103,10 +103,9 @@ class Charmap { constexpr Charmap(uint64_t b0, uint64_t b1, uint64_t b2, uint64_t b3) : m_{b0, b1, b2, b3} {} - static constexpr uint64_t RangeForWord(unsigned char lo, unsigned char hi, - uint64_t word) { - return OpenRangeFromZeroForWord(hi + 1, word) & - ~OpenRangeFromZeroForWord(lo, word); + static constexpr uint64_t RangeForWord(char lo, char hi, uint64_t word) { + return OpenRangeFromZeroForWord(static_cast(hi) + 1, word) & + ~OpenRangeFromZeroForWord(static_cast(lo), word); } // All the chars in the specified word of the range [0, upper). @@ -119,13 +118,16 @@ class Charmap { : (~static_cast(0) >> (64 - upper % 64)); } - static constexpr uint64_t CharMaskForWord(unsigned char x, uint64_t word) { - return (x / 64 == word) ? (static_cast(1) << (x % 64)) : 0; + static constexpr uint64_t CharMaskForWord(char x, uint64_t word) { + const auto unsigned_x = static_cast(x); + return (unsigned_x / 64 == word) + ? (static_cast(1) << (unsigned_x % 64)) + : 0; } - private: - void SetChar(unsigned char c) { - m_[c / 64] |= static_cast(1) << (c % 64); + void SetChar(char c) { + const auto unsigned_c = static_cast(c); + m_[unsigned_c / 64] |= static_cast(1) << (unsigned_c % 64); } uint64_t m_[4]; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc index ebf8c0791a..282b639eb2 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc @@ -242,7 +242,7 @@ int BigUnsigned::ReadDigits(const char* begin, const char* end, // decimal exponent to compensate. --exponent_adjust; } - int digit = (*begin - '0'); + char digit = (*begin - '0'); --significant_digits; if (significant_digits == 0 && std::next(begin) != end && (digit == 0 || digit == 5)) { @@ -255,7 +255,7 @@ int BigUnsigned::ReadDigits(const char* begin, const char* end, // 500000...000000000001 to correctly round up, rather than to nearest. ++digit; } - queued = 10 * queued + digit; + queued = 10 * queued + static_cast(digit); ++digits_queued; if (digits_queued == kMaxSmallPowerOfTen) { MultiplyBy(kTenToNth[kMaxSmallPowerOfTen]); @@ -341,8 +341,8 @@ std::string BigUnsigned::ToString() const { std::string result; // Build result in reverse order while (copy.size() > 0) { - int next_digit = copy.DivMod<10>(); - result.push_back('0' + next_digit); + uint32_t next_digit = copy.DivMod<10>(); + result.push_back('0' + static_cast(next_digit)); } if (result.empty()) { result.push_back('0'); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc index d29acaf462..98823def83 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc @@ -190,11 +190,11 @@ bool IsDigit<16>(char ch) { template <> unsigned ToDigit<10>(char ch) { - return ch - '0'; + return static_cast(ch - '0'); } template <> unsigned ToDigit<16>(char ch) { - return kAsciiToInt[static_cast(ch)]; + return static_cast(kAsciiToInt[static_cast(ch)]); } template <> diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_data_edge.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_data_edge.h new file mode 100644 index 0000000000..e18b33e1b0 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_data_edge.h @@ -0,0 +1,63 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_ +#define ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_flat.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace cord_internal { + +// Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node +// holding a FLAT or EXTERNAL child rep. Requires `rep != nullptr`. +inline bool IsDataEdge(const CordRep* edge) { + assert(edge != nullptr); + + // The fast path is that `edge` is an EXTERNAL or FLAT node, making the below + // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL + // check in the slow path of the SUBSTRING check to optimize for the hot path. + if (edge->tag == EXTERNAL || edge->tag >= FLAT) return true; + if (edge->tag == SUBSTRING) edge = edge->substring()->child; + return edge->tag == EXTERNAL || edge->tag >= FLAT; +} + +// Returns the `absl::string_view` data reference for the provided data edge. +// Requires 'IsDataEdge(edge) == true`. +inline absl::string_view EdgeData(const CordRep* edge) { + assert(IsDataEdge(edge)); + + size_t offset = 0; + const size_t length = edge->length; + if (edge->IsSubstring()) { + offset = edge->substring()->start; + edge = edge->substring()->child; + } + return edge->tag >= FLAT + ? absl::string_view{edge->flat()->Data() + offset, length} + : absl::string_view{edge->external()->base + offset, length}; +} + +} // namespace cord_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_data_edge_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_data_edge_test.cc new file mode 100644 index 0000000000..8fce3bc6a9 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_data_edge_test.cc @@ -0,0 +1,130 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/internal/cord_data_edge.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_test_util.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace cord_internal { +namespace { + +using ::absl::cordrep_testing::MakeExternal; +using ::absl::cordrep_testing::MakeFlat; +using ::absl::cordrep_testing::MakeSubstring; + +TEST(CordDataEdgeTest, IsDataEdgeOnFlat) { + CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ..."); + EXPECT_TRUE(IsDataEdge(rep)); + CordRep::Unref(rep); +} + +TEST(CordDataEdgeTest, IsDataEdgeOnExternal) { + CordRep* rep = MakeExternal("Lorem ipsum dolor sit amet, consectetur ..."); + EXPECT_TRUE(IsDataEdge(rep)); + CordRep::Unref(rep); +} + +TEST(CordDataEdgeTest, IsDataEdgeOnSubstringOfFlat) { + CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ..."); + CordRep* substr = MakeSubstring(1, 20, rep); + EXPECT_TRUE(IsDataEdge(substr)); + CordRep::Unref(substr); +} + +TEST(CordDataEdgeTest, IsDataEdgeOnSubstringOfExternal) { + CordRep* rep = MakeExternal("Lorem ipsum dolor sit amet, consectetur ..."); + CordRep* substr = MakeSubstring(1, 20, rep); + EXPECT_TRUE(IsDataEdge(substr)); + CordRep::Unref(substr); +} + +TEST(CordDataEdgeTest, IsDataEdgeOnBtree) { + CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ..."); + CordRepBtree* tree = CordRepBtree::New(rep); + EXPECT_FALSE(IsDataEdge(tree)); + CordRep::Unref(tree); +} + +TEST(CordDataEdgeTest, IsDataEdgeOnBadSubstr) { + CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ..."); + CordRep* substr = MakeSubstring(1, 18, MakeSubstring(1, 20, rep)); + EXPECT_FALSE(IsDataEdge(substr)); + CordRep::Unref(substr); +} + +TEST(CordDataEdgeTest, EdgeDataOnFlat) { + absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ..."; + CordRep* rep = MakeFlat(value); + EXPECT_EQ(EdgeData(rep), value); + CordRep::Unref(rep); +} + +TEST(CordDataEdgeTest, EdgeDataOnExternal) { + absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ..."; + CordRep* rep = MakeExternal(value); + EXPECT_EQ(EdgeData(rep), value); + CordRep::Unref(rep); +} + +TEST(CordDataEdgeTest, EdgeDataOnSubstringOfFlat) { + absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ..."; + CordRep* rep = MakeFlat(value); + CordRep* substr = MakeSubstring(1, 20, rep); + EXPECT_EQ(EdgeData(substr), value.substr(1, 20)); + CordRep::Unref(substr); +} + +TEST(CordDataEdgeTest, EdgeDataOnSubstringOfExternal) { + absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ..."; + CordRep* rep = MakeExternal(value); + CordRep* substr = MakeSubstring(1, 20, rep); + EXPECT_EQ(EdgeData(substr), value.substr(1, 20)); + CordRep::Unref(substr); +} + +#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG) + +TEST(CordDataEdgeTest, IsDataEdgeOnNullPtr) { + EXPECT_DEATH(IsDataEdge(nullptr), ".*"); +} + +TEST(CordDataEdgeTest, EdgeDataOnNullPtr) { + EXPECT_DEATH(EdgeData(nullptr), ".*"); +} + +TEST(CordDataEdgeTest, EdgeDataOnBtree) { + CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ..."); + CordRepBtree* tree = CordRepBtree::New(rep); + EXPECT_DEATH(EdgeData(tree), ".*"); + CordRep::Unref(tree); +} + +TEST(CordDataEdgeTest, EdgeDataOnBadSubstr) { + CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ..."); + CordRep* substr = MakeSubstring(1, 18, MakeSubstring(1, 20, rep)); + EXPECT_DEATH(EdgeData(substr), ".*"); + CordRep::Unref(substr); +} + +#endif // GTEST_HAS_DEATH_TEST && !NDEBUG + +} // namespace +} // namespace cord_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_internal.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_internal.cc index 1767e6fcc5..b6b06cfa2a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_internal.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_internal.cc @@ -17,69 +17,57 @@ #include #include +#include "absl/base/internal/raw_logging.h" #include "absl/container/inlined_vector.h" #include "absl/strings/internal/cord_rep_btree.h" +#include "absl/strings/internal/cord_rep_crc.h" #include "absl/strings/internal/cord_rep_flat.h" #include "absl/strings/internal/cord_rep_ring.h" +#include "absl/strings/str_cat.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { -ABSL_CONST_INIT std::atomic cord_btree_enabled(kCordEnableBtreeDefault); ABSL_CONST_INIT std::atomic cord_ring_buffer_enabled( kCordEnableRingBufferDefault); ABSL_CONST_INIT std::atomic shallow_subcords_enabled( kCordShallowSubcordsDefault); ABSL_CONST_INIT std::atomic cord_btree_exhaustive_validation(false); +void LogFatalNodeType(CordRep* rep) { + ABSL_INTERNAL_LOG(FATAL, absl::StrCat("Unexpected node type: ", + static_cast(rep->tag))); +} + void CordRep::Destroy(CordRep* rep) { assert(rep != nullptr); - absl::InlinedVector pending; while (true) { assert(!rep->refcount.IsImmortal()); - if (rep->tag == CONCAT) { - CordRepConcat* rep_concat = rep->concat(); - CordRep* right = rep_concat->right; - if (!right->refcount.Decrement()) { - pending.push_back(right); - } - CordRep* left = rep_concat->left; - delete rep_concat; - rep = nullptr; - if (!left->refcount.Decrement()) { - rep = left; - continue; - } - } else if (rep->tag == BTREE) { + if (rep->tag == BTREE) { CordRepBtree::Destroy(rep->btree()); - rep = nullptr; + return; } else if (rep->tag == RING) { CordRepRing::Destroy(rep->ring()); - rep = nullptr; + return; } else if (rep->tag == EXTERNAL) { CordRepExternal::Delete(rep); - rep = nullptr; + return; } else if (rep->tag == SUBSTRING) { CordRepSubstring* rep_substring = rep->substring(); - CordRep* child = rep_substring->child; + rep = rep_substring->child; delete rep_substring; - rep = nullptr; - if (!child->refcount.Decrement()) { - rep = child; - continue; + if (rep->refcount.Decrement()) { + return; } + } else if (rep->tag == CRC) { + CordRepCrc::Destroy(rep->crc()); + return; } else { + assert(rep->IsFlat()); CordRepFlat::Delete(rep); - rep = nullptr; - } - - if (!pending.empty()) { - rep = pending.back(); - pending.pop_back(); - } else { - break; + return; } } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_internal.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_internal.h index 7172b147ee..fcca3a28cd 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_internal.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_internal.h @@ -21,6 +21,7 @@ #include #include +#include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/invoke.h" @@ -33,16 +34,27 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { +// The overhead of a vtable is too much for Cord, so we roll our own subclasses +// using only a single byte to differentiate classes from each other - the "tag" +// byte. Define the subclasses first so we can provide downcasting helper +// functions in the base class. +struct CordRep; +struct CordRepConcat; +struct CordRepExternal; +struct CordRepFlat; +struct CordRepSubstring; +struct CordRepCrc; +class CordRepRing; +class CordRepBtree; + class CordzInfo; // Default feature enable states for cord ring buffers enum CordFeatureDefaults { - kCordEnableBtreeDefault = false, kCordEnableRingBufferDefault = false, kCordShallowSubcordsDefault = false }; -extern std::atomic cord_btree_enabled; extern std::atomic cord_ring_buffer_enabled; extern std::atomic shallow_subcords_enabled; @@ -52,10 +64,6 @@ extern std::atomic shallow_subcords_enabled; // O(n^2) complexity as recursive / full tree validation is O(n). extern std::atomic cord_btree_exhaustive_validation; -inline void enable_cord_btree(bool enable) { - cord_btree_enabled.store(enable, std::memory_order_relaxed); -} - inline void enable_cord_ring_buffer(bool enable) { cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed); } @@ -80,6 +88,9 @@ enum Constants { kMaxBytesToCopy = 511 }; +// Emits a fatal error "Unexpected node type: xyz" and aborts the program. +ABSL_ATTRIBUTE_NORETURN void LogFatalNodeType(CordRep* rep); + // Compact class for tracking the reference count and state flags for CordRep // instances. Data is stored in an atomic int32_t for compactness and speed. class RefcountAndFlags { @@ -118,8 +129,9 @@ class RefcountAndFlags { } // Returns the current reference count using acquire semantics. - inline int32_t Get() const { - return count_.load(std::memory_order_acquire) >> kNumFlags; + inline size_t Get() const { + return static_cast(count_.load(std::memory_order_acquire) >> + kNumFlags); } // Returns whether the atomic integer is 1. @@ -145,7 +157,7 @@ class RefcountAndFlags { // used for the StringConstant constructor to avoid collecting immutable // constant cords. // kReservedFlag is reserved for future use. - enum { + enum Flags { kNumFlags = 2, kImmortalFlag = 0x1, @@ -162,34 +174,26 @@ class RefcountAndFlags { std::atomic count_; }; -// The overhead of a vtable is too much for Cord, so we roll our own subclasses -// using only a single byte to differentiate classes from each other - the "tag" -// byte. Define the subclasses first so we can provide downcasting helper -// functions in the base class. - -struct CordRepConcat; -struct CordRepExternal; -struct CordRepFlat; -struct CordRepSubstring; -class CordRepRing; -class CordRepBtree; - // Various representations that we allow enum CordRepKind { - CONCAT = 0, + UNUSED_0 = 0, SUBSTRING = 1, - BTREE = 2, - RING = 3, - EXTERNAL = 4, + CRC = 2, + BTREE = 3, + RING = 4, + EXTERNAL = 5, // We have different tags for different sized flat arrays, - // starting with FLAT, and limited to MAX_FLAT_TAG. The 225 value is based on - // the current 'size to tag' encoding of 8 / 32 bytes. If a new tag is needed - // in the future, then 'FLAT' and 'MAX_FLAT_TAG' should be adjusted as well - // as the Tag <---> Size logic so that FLAT stil represents the minimum flat - // allocation size. (32 bytes as of now). - FLAT = 5, - MAX_FLAT_TAG = 225 + // starting with FLAT, and limited to MAX_FLAT_TAG. The below values map to an + // allocated range of 32 bytes to 256 KB. The current granularity is: + // - 8 byte granularity for flat sizes in [32 - 512] + // - 64 byte granularity for flat sizes in (512 - 8KiB] + // - 4KiB byte granularity for flat sizes in (8KiB, 256 KiB] + // If a new tag is needed in the future, then 'FLAT' and 'MAX_FLAT_TAG' should + // be adjusted as well as the Tag <---> Size mapping logic so that FLAT still + // represents the minimum flat allocation size. (32 bytes as of now). + FLAT = 6, + MAX_FLAT_TAG = 248 }; // There are various locations where we want to check if some rep is a 'plain' @@ -204,6 +208,18 @@ static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive"); static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive"); struct CordRep { + // Result from an `extract edge` operation. Contains the (possibly changed) + // tree node as well as the extracted edge, or {tree, nullptr} if no edge + // could be extracted. + // On success, the returned `tree` value is null if `extracted` was the only + // data edge inside the tree, a data edge if there were only two data edges in + // the tree, or the (possibly new / smaller) remaining tree with the extracted + // data edge removed. + struct ExtractResult { + CordRep* tree; + CordRep* extracted; + }; + CordRep() = default; constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l) : length(l), refcount(immortal), tag(EXTERNAL), storage{} {} @@ -228,18 +244,18 @@ struct CordRep { // Returns true if this instance's tag matches the requested type. constexpr bool IsRing() const { return tag == RING; } - constexpr bool IsConcat() const { return tag == CONCAT; } constexpr bool IsSubstring() const { return tag == SUBSTRING; } + constexpr bool IsCrc() const { return tag == CRC; } constexpr bool IsExternal() const { return tag == EXTERNAL; } constexpr bool IsFlat() const { return tag >= FLAT; } constexpr bool IsBtree() const { return tag == BTREE; } inline CordRepRing* ring(); inline const CordRepRing* ring() const; - inline CordRepConcat* concat(); - inline const CordRepConcat* concat() const; inline CordRepSubstring* substring(); inline const CordRepSubstring* substring() const; + inline CordRepCrc* crc(); + inline const CordRepCrc* crc() const; inline CordRepExternal* external(); inline const CordRepExternal* external() const; inline CordRepFlat* flat(); @@ -262,17 +278,23 @@ struct CordRep { static inline void Unref(CordRep* rep); }; -struct CordRepConcat : public CordRep { - CordRep* left; - CordRep* right; - - uint8_t depth() const { return storage[0]; } - void set_depth(uint8_t depth) { storage[0] = depth; } -}; - struct CordRepSubstring : public CordRep { size_t start; // Starting offset of substring in child CordRep* child; + + // Creates a substring on `child`, adopting a reference on `child`. + // Requires `child` to be either a flat or external node, and `pos` and `n` to + // form a non-empty partial sub range of `'child`, i.e.: + // `n > 0 && n < length && n + pos <= length` + static inline CordRepSubstring* Create(CordRep* child, size_t pos, size_t n); + + // Creates a substring of `rep`. Does not adopt a reference on `rep`. + // Requires `IsDataEdge(rep) && n > 0 && pos + n <= rep->length`. + // If `n == rep->length` then this method returns `CordRep::Ref(rep)` + // If `rep` is a substring of a flat or external node, then this method will + // return a new substring of that flat or external node with `pos` adjusted + // with the original `start` position. + static inline CordRep* Substring(CordRep* rep, size_t pos, size_t n); }; // Type for function pointer that will invoke the releaser function and also @@ -336,6 +358,47 @@ struct CordRepExternalImpl } }; +inline CordRepSubstring* CordRepSubstring::Create(CordRep* child, size_t pos, + size_t n) { + assert(child != nullptr); + assert(n > 0); + assert(n < child->length); + assert(pos < child->length); + assert(n <= child->length - pos); + + // TODO(b/217376272): Harden internal logic. + // Move to strategical places inside the Cord logic and make this an assert. + if (ABSL_PREDICT_FALSE(!(child->IsExternal() || child->IsFlat()))) { + LogFatalNodeType(child); + } + + CordRepSubstring* rep = new CordRepSubstring(); + rep->length = n; + rep->tag = SUBSTRING; + rep->start = pos; + rep->child = child; + return rep; +} + +inline CordRep* CordRepSubstring::Substring(CordRep* rep, size_t pos, + size_t n) { + assert(rep != nullptr); + assert(n != 0); + assert(pos < rep->length); + assert(n <= rep->length - pos); + if (n == rep->length) return CordRep::Ref(rep); + if (rep->IsSubstring()) { + pos += rep->substring()->start; + rep = rep->substring()->child; + } + CordRepSubstring* substr = new CordRepSubstring(); + substr->length = n; + substr->tag = SUBSTRING; + substr->start = pos; + substr->child = CordRep::Ref(rep); + return substr; +} + inline void CordRepExternal::Delete(CordRep* rep) { assert(rep != nullptr && rep->IsExternal()); auto* rep_external = static_cast(rep); @@ -349,7 +412,8 @@ struct ConstInitExternalStorage { }; template -CordRepExternal ConstInitExternalStorage::value(Str::value); +ABSL_CONST_INIT CordRepExternal + ConstInitExternalStorage::value(Str::value); enum { kMaxInline = 15, @@ -359,8 +423,8 @@ constexpr char GetOrNull(absl::string_view data, size_t pos) { return pos < data.size() ? data[pos] : '\0'; } -// We store cordz_info as 64 bit pointer value in big endian format. This -// guarantees that the least significant byte of cordz_info matches the last +// We store cordz_info as 64 bit pointer value in little endian format. This +// guarantees that the least significant byte of cordz_info matches the first // byte of the inline data representation in as_chars_, which holds the inlined // size or the 'is_tree' bit. using cordz_info_t = int64_t; @@ -370,14 +434,14 @@ using cordz_info_t = int64_t; static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, ""); static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), ""); -// BigEndianByte() creates a big endian representation of 'value', i.e.: a big -// endian value where the last byte in the host's representation holds 'value`, -// with all other bytes being 0. -static constexpr cordz_info_t BigEndianByte(unsigned char value) { +// LittleEndianByte() creates a little endian representation of 'value', i.e.: +// a little endian value where the first byte in the host's representation +// holds 'value`, with all other bytes being 0. +static constexpr cordz_info_t LittleEndianByte(unsigned char value) { #if defined(ABSL_IS_BIG_ENDIAN) - return value; -#else return static_cast(value) << ((sizeof(cordz_info_t) - 1) * 8); +#else + return value; #endif } @@ -386,25 +450,37 @@ class InlineData { // DefaultInitType forces the use of the default initialization constructor. enum DefaultInitType { kDefaultInit }; - // kNullCordzInfo holds the big endian representation of intptr_t(1) + // kNullCordzInfo holds the little endian representation of intptr_t(1) // This is the 'null' / initial value of 'cordz_info'. The null value // is specifically big endian 1 as with 64-bit pointers, the last // byte of cordz_info overlaps with the last byte holding the tag. - static constexpr cordz_info_t kNullCordzInfo = BigEndianByte(1); + static constexpr cordz_info_t kNullCordzInfo = LittleEndianByte(1); + + // kTagOffset contains the offset of the control byte / tag. This constant is + // intended mostly for debugging purposes: do not remove this constant as it + // is actively inspected and used by gdb pretty printing code. + static constexpr size_t kTagOffset = 0; constexpr InlineData() : as_chars_{0} {} explicit InlineData(DefaultInitType) {} explicit constexpr InlineData(CordRep* rep) : as_tree_(rep) {} explicit constexpr InlineData(absl::string_view chars) - : as_chars_{ - GetOrNull(chars, 0), GetOrNull(chars, 1), - GetOrNull(chars, 2), GetOrNull(chars, 3), - GetOrNull(chars, 4), GetOrNull(chars, 5), - GetOrNull(chars, 6), GetOrNull(chars, 7), - GetOrNull(chars, 8), GetOrNull(chars, 9), - GetOrNull(chars, 10), GetOrNull(chars, 11), - GetOrNull(chars, 12), GetOrNull(chars, 13), - GetOrNull(chars, 14), static_cast((chars.size() << 1))} {} + : as_chars_{static_cast((chars.size() << 1)), + GetOrNull(chars, 0), + GetOrNull(chars, 1), + GetOrNull(chars, 2), + GetOrNull(chars, 3), + GetOrNull(chars, 4), + GetOrNull(chars, 5), + GetOrNull(chars, 6), + GetOrNull(chars, 7), + GetOrNull(chars, 8), + GetOrNull(chars, 9), + GetOrNull(chars, 10), + GetOrNull(chars, 11), + GetOrNull(chars, 12), + GetOrNull(chars, 13), + GetOrNull(chars, 14)} {} // Returns true if the current instance is empty. // The 'empty value' is an inlined data value of zero length. @@ -435,8 +511,8 @@ class InlineData { // Requires the current instance to hold a tree value. CordzInfo* cordz_info() const { assert(is_tree()); - intptr_t info = - static_cast(absl::big_endian::ToHost64(as_tree_.cordz_info)); + intptr_t info = static_cast(absl::little_endian::ToHost64( + static_cast(as_tree_.cordz_info))); assert(info & 1); return reinterpret_cast(info - 1); } @@ -446,8 +522,9 @@ class InlineData { // Requires the current instance to hold a tree value. void set_cordz_info(CordzInfo* cordz_info) { assert(is_tree()); - intptr_t info = reinterpret_cast(cordz_info) | 1; - as_tree_.cordz_info = absl::big_endian::FromHost64(info); + uintptr_t info = reinterpret_cast(cordz_info) | 1; + as_tree_.cordz_info = + static_cast(absl::little_endian::FromHost64(info)); } // Resets the current cordz_info to null / empty. @@ -460,7 +537,7 @@ class InlineData { // Requires the current instance to hold inline data. const char* as_chars() const { assert(!is_tree()); - return as_chars_; + return &as_chars_[1]; } // Returns a mutable pointer to the character data inside this instance. @@ -478,7 +555,7 @@ class InlineData { // // It's an error to read from the returned pointer without a preceding write // if the current instance does not hold inline data, i.e.: is_tree() == true. - char* as_chars() { return as_chars_; } + char* as_chars() { return &as_chars_[1]; } // Returns the tree value of this value. // Requires the current instance to hold a tree value. @@ -506,7 +583,7 @@ class InlineData { // Requires the current instance to hold inline data. size_t inline_size() const { assert(!is_tree()); - return tag() >> 1; + return static_cast(tag()) >> 1; } // Sets the size of the inlined character data inside this instance. @@ -514,26 +591,42 @@ class InlineData { // See the documentation on 'as_chars()' for more information and examples. void set_inline_size(size_t size) { ABSL_ASSERT(size <= kMaxInline); - tag() = static_cast(size << 1); + tag() = static_cast(size << 1); + } + + // Compares 'this' inlined data with rhs. The comparison is a straightforward + // lexicographic comparison. `Compare()` returns values as follows: + // + // -1 'this' InlineData instance is smaller + // 0 the InlineData instances are equal + // 1 'this' InlineData instance larger + int Compare(const InlineData& rhs) const { + uint64_t x, y; + memcpy(&x, as_chars(), sizeof(x)); + memcpy(&y, rhs.as_chars(), sizeof(y)); + if (x == y) { + memcpy(&x, as_chars() + 7, sizeof(x)); + memcpy(&y, rhs.as_chars() + 7, sizeof(y)); + if (x == y) { + if (inline_size() == rhs.inline_size()) return 0; + return inline_size() < rhs.inline_size() ? -1 : 1; + } + } + x = absl::big_endian::FromHost64(x); + y = absl::big_endian::FromHost64(y); + return x < y ? -1 : 1; } private: // See cordz_info_t for forced alignment and size of `cordz_info` details. struct AsTree { - explicit constexpr AsTree(absl::cord_internal::CordRep* tree) - : rep(tree), cordz_info(kNullCordzInfo) {} - // This union uses up extra space so that whether rep is 32 or 64 bits, - // cordz_info will still start at the eighth byte, and the last - // byte of cordz_info will still be the last byte of InlineData. - union { - absl::cord_internal::CordRep* rep; - cordz_info_t unused_aligner; - }; - cordz_info_t cordz_info; + explicit constexpr AsTree(absl::cord_internal::CordRep* tree) : rep(tree) {} + cordz_info_t cordz_info = kNullCordzInfo; + absl::cord_internal::CordRep* rep; }; - char& tag() { return reinterpret_cast(this)[kMaxInline]; } - char tag() const { return reinterpret_cast(this)[kMaxInline]; } + int8_t& tag() { return reinterpret_cast(this)[0]; } + int8_t tag() const { return reinterpret_cast(this)[0]; } // If the data has length <= kMaxInline, we store it in `as_chars_`, and // store the size in the last char of `as_chars_` shifted left + 1. @@ -547,16 +640,6 @@ class InlineData { static_assert(sizeof(InlineData) == kMaxInline + 1, ""); -inline CordRepConcat* CordRep::concat() { - assert(IsConcat()); - return static_cast(this); -} - -inline const CordRepConcat* CordRep::concat() const { - assert(IsConcat()); - return static_cast(this); -} - inline CordRepSubstring* CordRep::substring() { assert(IsSubstring()); return static_cast(this); @@ -578,7 +661,9 @@ inline const CordRepExternal* CordRep::external() const { } inline CordRep* CordRep::Ref(CordRep* rep) { - assert(rep != nullptr); + // ABSL_ASSUME is a workaround for + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105585 + ABSL_ASSUME(rep != nullptr); rep->refcount.Increment(); return rep; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc index 6d53ab6169..7ce36128eb 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc @@ -17,11 +17,13 @@ #include #include #include +#include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" +#include "absl/strings/internal/cord_data_edge.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_consume.h" #include "absl/strings/internal/cord_rep_flat.h" @@ -32,7 +34,9 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { -constexpr size_t CordRepBtree::kMaxCapacity; // NOLINT: needed for c++ < c++17 +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr size_t CordRepBtree::kMaxCapacity; +#endif namespace { @@ -52,8 +56,10 @@ inline bool exhaustive_validation() { // Prints the entire tree structure or 'rep'. External callers should // not specify 'depth' and leave it to its default (0) value. // Rep may be a CordRepBtree tree, or a SUBSTRING / EXTERNAL / FLAT node. -void DumpAll(const CordRep* rep, bool include_contents, std::ostream& stream, - int depth = 0) { +void DumpAll(const CordRep* rep, + bool include_contents, + std::ostream& stream, + size_t depth = 0) { // Allow for full height trees + substring -> flat / external nodes. assert(depth <= CordRepBtree::kMaxDepth + 2); std::string sharing = const_cast(rep)->refcount.IsOne() @@ -69,7 +75,7 @@ void DumpAll(const CordRep* rep, bool include_contents, std::ostream& stream, // indentation and prefix / labels keeps us within roughly 80-100 wide. constexpr size_t kMaxDataLength = 60; stream << ", data = \"" - << CordRepBtree::EdgeData(r).substr(0, kMaxDataLength) + << EdgeData(r).substr(0, kMaxDataLength) << (r->length > kMaxDataLength ? "\"..." : "\""); } stream << '\n'; @@ -119,6 +125,7 @@ CordRepSubstring* CreateSubstring(CordRep* rep, size_t offset, size_t n) { rep = CordRep::Ref(substring->child); CordRep::Unref(substring); } + assert(rep->IsExternal() || rep->IsFlat()); CordRepSubstring* substring = new CordRepSubstring(); substring->length = n; substring->tag = SUBSTRING; @@ -140,6 +147,26 @@ inline CordRep* MakeSubstring(CordRep* rep, size_t offset) { return CreateSubstring(rep, offset, rep->length - offset); } +// Resizes `edge` to the provided `length`. Adopts a reference on `edge`. +// This method directly returns `edge` if `length` equals `edge->length`. +// If `is_mutable` is set to true, this function may return `edge` with +// `edge->length` set to the new length depending on the type and size of +// `edge`. Otherwise, this function returns a new CordRepSubstring value. +// Requires `length > 0 && length <= edge->length`. +CordRep* ResizeEdge(CordRep* edge, size_t length, bool is_mutable) { + assert(length > 0); + assert(length <= edge->length); + assert(IsDataEdge(edge)); + if (length >= edge->length) return edge; + + if (is_mutable && (edge->tag >= FLAT || edge->tag == SUBSTRING)) { + edge->length = length; + return edge; + } + + return CreateSubstring(edge, 0, length); +} + template inline absl::string_view Consume(absl::string_view s, size_t n) { return edge_type == kBack ? s.substr(n) : s.substr(0, s.size() - n); @@ -170,24 +197,29 @@ inline void FastUnref(R* r, Fn&& fn) { } } -// Deletes a leaf node data edge. Requires `rep` to be an EXTERNAL or FLAT -// node, or a SUBSTRING of an EXTERNAL or FLAT node. -void DeleteLeafEdge(CordRep* rep) { - for (;;) { + +void DeleteSubstring(CordRepSubstring* substring) { + CordRep* rep = substring->child; + if (!rep->refcount.Decrement()) { if (rep->tag >= FLAT) { CordRepFlat::Delete(rep->flat()); - return; - } - if (rep->tag == EXTERNAL) { + } else { + assert(rep->tag == EXTERNAL); CordRepExternal::Delete(rep->external()); - return; } - assert(rep->tag == SUBSTRING); - CordRepSubstring* substring = rep->substring(); - rep = substring->child; - assert(rep->tag == EXTERNAL || rep->tag >= FLAT); - delete substring; - if (rep->refcount.Decrement()) return; + } + delete substring; +} + +// Deletes a leaf node data edge. Requires `IsDataEdge(rep)`. +void DeleteLeafEdge(CordRep* rep) { + assert(IsDataEdge(rep)); + if (rep->tag >= FLAT) { + CordRepFlat::Delete(rep->flat()); + } else if (rep->tag == EXTERNAL) { + CordRepExternal::Delete(rep->external()); + } else { + DeleteSubstring(rep->substring()); } } @@ -240,11 +272,14 @@ struct StackOperations { static inline CordRepBtree* Finalize(CordRepBtree* tree, OpResult result) { switch (result.action) { case CordRepBtree::kPopped: - if (ABSL_PREDICT_FALSE(tree->height() >= CordRepBtree::kMaxHeight)) { - ABSL_RAW_LOG(FATAL, "Max height exceeded"); - } - return edge_type == kBack ? CordRepBtree::New(tree, result.tree) + tree = edge_type == kBack ? CordRepBtree::New(tree, result.tree) : CordRepBtree::New(result.tree, tree); + if (ABSL_PREDICT_FALSE(tree->height() > CordRepBtree::kMaxHeight)) { + tree = CordRepBtree::Rebuild(tree); + ABSL_RAW_CHECK(tree->height() <= CordRepBtree::kMaxHeight, + "Max height exceeded"); + } + return tree; case CordRepBtree::kCopied: CordRep::Unref(tree); ABSL_FALLTHROUGH_INTENDED; @@ -349,19 +384,37 @@ void CordRepBtree::Dump(const CordRep* rep, std::ostream& stream) { Dump(rep, absl::string_view(), false, stream); } -void CordRepBtree::DestroyLeaf(CordRepBtree* tree, size_t begin, size_t end) { - for (CordRep* edge : tree->Edges(begin, end)) { - FastUnref(edge, DeleteLeafEdge); +template +static void DestroyTree(CordRepBtree* tree) { + for (CordRep* node : tree->Edges()) { + if (node->refcount.Decrement()) continue; + for (CordRep* edge : node->btree()->Edges()) { + if (edge->refcount.Decrement()) continue; + if (size == 1) { + DeleteLeafEdge(edge); + } else { + CordRepBtree::Destroy(edge->btree()); + } + } + CordRepBtree::Delete(node->btree()); } - Delete(tree); + CordRepBtree::Delete(tree); } -void CordRepBtree::DestroyNonLeaf(CordRepBtree* tree, size_t begin, - size_t end) { - for (CordRep* edge : tree->Edges(begin, end)) { - FastUnref(edge->btree(), Destroy); +void CordRepBtree::Destroy(CordRepBtree* tree) { + switch (tree->height()) { + case 0: + for (CordRep* edge : tree->Edges()) { + if (!edge->refcount.Decrement()) { + DeleteLeafEdge(edge); + } + } + return CordRepBtree::Delete(tree); + case 1: + return DestroyTree<1>(tree); + default: + return DestroyTree<2>(tree); } - Delete(tree); } bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) { @@ -692,7 +745,7 @@ CopyResult CordRepBtree::CopySuffix(size_t offset) { return result; } -CopyResult CordRepBtree::CopyPrefix(size_t n) { +CopyResult CordRepBtree::CopyPrefix(size_t n, bool allow_folding) { assert(n > 0); assert(n <= this->length); @@ -704,10 +757,12 @@ CopyResult CordRepBtree::CopyPrefix(size_t n) { int height = this->height(); CordRepBtree* node = this; CordRep* front = node->Edge(kFront); - while (front->length >= n) { - if (--height < 0) return {MakeSubstring(CordRep::Ref(front), 0, n), -1}; - node = front->btree(); - front = node->Edge(kFront); + if (allow_folding) { + while (front->length >= n) { + if (--height < 0) return {MakeSubstring(CordRep::Ref(front), 0, n), -1}; + node = front->btree(); + front = node->Edge(kFront); + } } if (node->length == n) return {CordRep::Ref(node), height}; @@ -746,6 +801,97 @@ CopyResult CordRepBtree::CopyPrefix(size_t n) { return result; } +CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) { + CordRep* front = tree->Edge(tree->begin()); + if (tree->refcount.IsOne()) { + Unref(tree->Edges(tree->begin() + 1, tree->end())); + CordRepBtree::Delete(tree); + } else { + CordRep::Ref(front); + CordRep::Unref(tree); + } + return front; +} + +CordRepBtree* CordRepBtree::ConsumeBeginTo(CordRepBtree* tree, size_t end, + size_t new_length) { + assert(end <= tree->end()); + if (tree->refcount.IsOne()) { + Unref(tree->Edges(end, tree->end())); + tree->set_end(end); + tree->length = new_length; + } else { + CordRepBtree* old = tree; + tree = tree->CopyBeginTo(end, new_length); + CordRep::Unref(old); + } + return tree; +} + +CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) { + // Check input and deal with trivial cases 'Remove all/none' + assert(tree != nullptr); + assert(n <= tree->length); + const size_t len = tree->length; + if (ABSL_PREDICT_FALSE(n == 0)) { + return tree; + } + if (ABSL_PREDICT_FALSE(n >= len)) { + CordRepBtree::Unref(tree); + return nullptr; + } + + size_t length = len - n; + int height = tree->height(); + bool is_mutable = tree->refcount.IsOne(); + + // Extract all top nodes which are reduced to size = 1 + Position pos = tree->IndexOfLength(length); + while (pos.index == tree->begin()) { + CordRep* edge = ExtractFront(tree); + is_mutable &= edge->refcount.IsOne(); + if (height-- == 0) return ResizeEdge(edge, length, is_mutable); + tree = edge->btree(); + pos = tree->IndexOfLength(length); + } + + // Repeat the following sequence traversing down the tree: + // - Crop the top node to the 'last remaining edge' adjusting length. + // - Set the length for down edges to the partial length in that last edge. + // - Repeat this until the last edge is 'included in full' + // - If we hit the data edge level, resize and return the last data edge + CordRepBtree* top = tree = ConsumeBeginTo(tree, pos.index + 1, length); + CordRep* edge = tree->Edge(pos.index); + length = pos.n; + while (length != edge->length) { + // ConsumeBeginTo guarantees `tree` is a clean, privately owned copy. + assert(tree->refcount.IsOne()); + const bool edge_is_mutable = edge->refcount.IsOne(); + + if (height-- == 0) { + tree->edges_[pos.index] = ResizeEdge(edge, length, edge_is_mutable); + return AssertValid(top); + } + + if (!edge_is_mutable) { + // We can't 'in place' remove any suffixes down this edge. + // Replace this edge with a prefix copy instead. + tree->edges_[pos.index] = edge->btree()->CopyPrefix(length, false).edge; + CordRep::Unref(edge); + return AssertValid(top); + } + + // Move down one level, rinse repeat. + tree = edge->btree(); + pos = tree->IndexOfLength(length); + tree = ConsumeBeginTo(edge->btree(), pos.index + 1, length); + edge = tree->Edge(pos.index); + length = pos.n; + } + + return AssertValid(top); +} + CordRep* CordRepBtree::SubTree(size_t offset, size_t n) { assert(n <= this->length); assert(offset <= this->length - n); @@ -870,7 +1016,7 @@ Span CordRepBtree::GetAppendBufferSlow(size_t size) { stack[i] = node; } - // Must be a privately owned flat. + // Must be a privately owned, mutable flat. CordRep* const edge = node->Edge(kBack); if (!edge->refcount.IsOne() || edge->tag < FLAT) return {}; @@ -950,6 +1096,136 @@ template CordRepBtree* CordRepBtree::AddData(CordRepBtree* tree, absl::string_view data, size_t extra); +void CordRepBtree::Rebuild(CordRepBtree** stack, CordRepBtree* tree, + bool consume) { + bool owned = consume && tree->refcount.IsOne(); + if (tree->height() == 0) { + for (CordRep* edge : tree->Edges()) { + if (!owned) edge = CordRep::Ref(edge); + size_t height = 0; + size_t length = edge->length; + CordRepBtree* node = stack[0]; + OpResult result = node->AddEdge(true, edge, length); + while (result.action == CordRepBtree::kPopped) { + stack[height] = result.tree; + if (stack[++height] == nullptr) { + result.action = CordRepBtree::kSelf; + stack[height] = CordRepBtree::New(node, result.tree); + } else { + node = stack[height]; + result = node->AddEdge(true, result.tree, length); + } + } + while (stack[++height] != nullptr) { + stack[height]->length += length; + } + } + } else { + for (CordRep* rep : tree->Edges()) { + Rebuild(stack, rep->btree(), owned); + } + } + if (consume) { + if (owned) { + CordRepBtree::Delete(tree); + } else { + CordRepBtree::Unref(tree); + } + } +} + +CordRepBtree* CordRepBtree::Rebuild(CordRepBtree* tree) { + // Set up initial stack with empty leaf node. + CordRepBtree* node = CordRepBtree::New(); + CordRepBtree* stack[CordRepBtree::kMaxDepth + 1] = {node}; + + // Recursively build the tree, consuming the input tree. + Rebuild(stack, tree, /* consume reference */ true); + + // Return top most node + for (CordRepBtree* parent : stack) { + if (parent == nullptr) return node; + node = parent; + } + + // Unreachable + assert(false); + return nullptr; +} + +CordRepBtree::ExtractResult CordRepBtree::ExtractAppendBuffer( + CordRepBtree* tree, size_t extra_capacity) { + int depth = 0; + NodeStack stack; + + // Set up default 'no success' result which is {tree, nullptr}. + ExtractResult result; + result.tree = tree; + result.extracted = nullptr; + + // Dive down the right side of the tree, making sure no edges are shared. + while (tree->height() > 0) { + if (!tree->refcount.IsOne()) return result; + stack[depth++] = tree; + tree = tree->Edge(kBack)->btree(); + } + if (!tree->refcount.IsOne()) return result; + + // Validate we ended on a non shared flat. + CordRep* rep = tree->Edge(kBack); + if (!(rep->IsFlat() && rep->refcount.IsOne())) return result; + + // Verify it has at least the requested extra capacity. + CordRepFlat* flat = rep->flat(); + const size_t length = flat->length; + const size_t avail = flat->Capacity() - flat->length; + if (extra_capacity > avail) return result; + + // Set the extracted flat in the result. + result.extracted = flat; + + // Cascading delete all nodes that become empty. + while (tree->size() == 1) { + CordRepBtree::Delete(tree); + if (--depth < 0) { + // We consumed the entire tree: return nullptr for new tree. + result.tree = nullptr; + return result; + } + rep = tree; + tree = stack[depth]; + } + + // Remove the edge or cascaded up parent node. + tree->set_end(tree->end() - 1); + tree->length -= length; + + // Adjust lengths up the tree. + while (depth > 0) { + tree = stack[--depth]; + tree->length -= length; + } + + // Remove unnecessary top nodes with size = 1. This may iterate all the way + // down to the leaf node in which case we simply return the remaining last + // edge in that node and the extracted flat. + while (tree->size() == 1) { + int height = tree->height(); + rep = tree->Edge(kBack); + Delete(tree); + if (height == 0) { + // We consumed the leaf: return the sole data edge as the new tree. + result.tree = rep; + return result; + } + tree = rep->btree(); + } + + // Done: return the (new) top level node and extracted flat. + result.tree = tree; + return result; +} + } // namespace cord_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.h index bbaa7934fa..eed5609e55 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.h @@ -22,6 +22,7 @@ #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/optimization.h" +#include "absl/strings/internal/cord_data_edge.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_flat.h" #include "absl/strings/string_view.h" @@ -94,8 +95,9 @@ class CordRepBtree : public CordRep { // local stack variable compared to Cord's current near 400 bytes stack use. // The maximum `height` value of a node is then `kMaxDepth - 1` as node height // values start with a value of 0 for leaf nodes. - static constexpr int kMaxDepth = 12; - static constexpr int kMaxHeight = kMaxDepth - 1; + static constexpr size_t kMaxDepth = 12; + // See comments on height() for why this is an int and not a size_t. + static constexpr int kMaxHeight = static_cast(kMaxDepth - 1); // `Action` defines the action for unwinding changes done at the btree's leaf // level that need to be propagated up to the parent node(s). Each operation @@ -163,6 +165,15 @@ class CordRepBtree : public CordRep { // typically after a ref_count.Decrement() on the last reference count. static void Destroy(CordRepBtree* tree); + // Destruction + static void Delete(CordRepBtree* tree) { delete tree; } + + // Use CordRep::Unref() as we overload for absl::Span. + using CordRep::Unref; + + // Unrefs all edges in `edges` which are assumed to be 'likely one'. + static void Unref(absl::Span edges); + // Appends / Prepends an existing CordRep instance to this tree. // The below methods accept three types of input: // 1) `rep` is a data node (See `IsDataNode` for valid data edges). @@ -198,6 +209,19 @@ class CordRepBtree : public CordRep { // Requires `offset + n <= length`. Returns `nullptr` if `n` is zero. CordRep* SubTree(size_t offset, size_t n); + // Removes `n` trailing bytes from `tree`, and returns the resulting tree + // or data edge. Returns `tree` if n is zero, and nullptr if n == length. + // This function is logically identical to: + // result = tree->SubTree(0, tree->length - n); + // Unref(tree); + // return result; + // However, the actual implementation will as much as possible perform 'in + // place' modifications on the tree on all nodes and edges that are mutable. + // For example, in a fully privately owned tree with the last edge being a + // flat of length 12, RemoveSuffix(1) will simply set the length of that data + // edge to 11, and reduce the length of all nodes on the edge path by 1. + static CordRep* RemoveSuffix(CordRepBtree* tree, size_t n); + // Returns the character at the given offset. char GetCharacter(size_t offset) const; @@ -226,6 +250,36 @@ class CordRepBtree : public CordRep { // shared node of a cord tree. Span GetAppendBuffer(size_t size); + // Extracts the right-most data edge from this tree iff: + // - the tree and all internal edges to the right-most node are not shared. + // - the right-most node is a FLAT node and not shared. + // - the right-most node has at least the desired extra capacity. + // + // Returns {tree, nullptr} if any of the above conditions are not met. + // This method effectively removes data from the tree. The intent of this + // method is to allow applications appending small string data to use + // pre-existing capacity, and add the modified rep back to the tree. + // + // Simplified such code would look similar to this: + // void MyTreeBuilder::Append(string_view data) { + // ExtractResult result = CordRepBtree::ExtractAppendBuffer(tree_, 1); + // if (CordRep* rep = result.extracted) { + // size_t available = rep->Capacity() - rep->length; + // size_t n = std::min(data.size(), n); + // memcpy(rep->Data(), data.data(), n); + // rep->length += n; + // data.remove_prefix(n); + // if (!result.tree->IsBtree()) { + // tree_ = CordRepBtree::Create(result.tree); + // } + // tree_ = CordRepBtree::Append(tree_, rep); + // } + // ... + // // Remaining edge in `result.tree`. + // } + static ExtractResult ExtractAppendBuffer(CordRepBtree* tree, + size_t extra_capacity = 1); + // Returns the `height` of the tree. The height of a tree is limited to // kMaxHeight. `height` is implemented as an `int` as in some places we // use negative (-1) values for 'data edges'. @@ -258,13 +312,6 @@ class CordRepBtree : public CordRep { // Requires this instance to be a leaf node, and `index` to be valid index. inline absl::string_view Data(size_t index) const; - static const char* EdgeDataPtr(const CordRep* r); - static absl::string_view EdgeData(const CordRep* r); - - // Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node - // holding a FLAT or EXTERNAL child rep. - static bool IsDataEdge(const CordRep* rep); - // Diagnostics: returns true if `tree` is valid and internally consistent. // If `shallow` is false, then the provided top level node and all child nodes // below it are recursively checked. If `shallow` is true, only the provided @@ -324,6 +371,11 @@ class CordRepBtree : public CordRep { // `front.height() + 1`. Requires `back.height() == front.height()`. static CordRepBtree* New(CordRepBtree* front, CordRepBtree* back); + // Creates a fully balanced tree from the provided tree by rebuilding a new + // tree from all data edges in the input. This function is automatically + // invoked internally when the tree exceeds the maximum height. + static CordRepBtree* Rebuild(CordRepBtree* tree); + private: CordRepBtree() = default; ~CordRepBtree() = default; @@ -368,6 +420,12 @@ class CordRepBtree : public CordRep { // Requires 0 < `offset` <= length. Position IndexBefore(size_t offset) const; + // Returns the index of the edge ending at (or on) length `length`, and the + // number of bytes inside that edge up to `length`. For example, if we have a + // Node with 2 edges, one of 10 and one of 20 long, then IndexOfLength(27) + // will return {1, 17}, and IndexOfLength(10) will return {0, 10}. + Position IndexOfLength(size_t n) const; + // Identical to the above function except starting from the position `front`. // This function is equivalent to `IndexBefore(front.n + offset)`, with // the difference that this function is optimized to start at `front.index`. @@ -380,12 +438,6 @@ class CordRepBtree : public CordRep { // Requires `offset` < length. Position IndexBeyond(size_t offset) const; - // Destruction - static void DestroyLeaf(CordRepBtree* tree, size_t begin, size_t end); - static void DestroyNonLeaf(CordRepBtree* tree, size_t begin, size_t end); - static void DestroyTree(CordRepBtree* tree, size_t begin, size_t end); - static void Delete(CordRepBtree* tree) { delete tree; } - // Creates a new leaf node containing as much data as possible from `data`. // The data is added either forwards or reversed depending on `edge_type`. // Callers must check the length of the returned node to determine if all data @@ -406,11 +458,28 @@ class CordRepBtree : public CordRep { // created copy to `new_length`. CordRepBtree* CopyBeginTo(size_t end, size_t new_length) const; + // Returns a tree containing the edges [tree->begin(), end) and length + // of `new_length`. This method consumes a reference on the provided + // tree, and logically performs the following operation: + // result = tree->CopyBeginTo(end, new_length); + // CordRep::Unref(tree); + // return result; + static CordRepBtree* ConsumeBeginTo(CordRepBtree* tree, size_t end, + size_t new_length); + // Creates a partial copy of this Btree node, copying all edges starting at // `begin`, adding a reference on each copied edge, and sets the length of // the newly created copy to `new_length`. CordRepBtree* CopyToEndFrom(size_t begin, size_t new_length) const; + // Extracts and returns the front edge from the provided tree. + // This method consumes a reference on the provided tree, and logically + // performs the following operation: + // edge = CordRep::Ref(tree->Edge(kFront)); + // CordRep::Unref(tree); + // return edge; + static CordRep* ExtractFront(CordRepBtree* tree); + // Returns a tree containing the result of appending `right` to `left`. static CordRepBtree* MergeTrees(CordRepBtree* left, CordRepBtree* right); @@ -420,6 +489,12 @@ class CordRepBtree : public CordRep { static CordRepBtree* AppendSlow(CordRepBtree*, CordRep* rep); static CordRepBtree* PrependSlow(CordRepBtree*, CordRep* rep); + // Recursively rebuilds `tree` into `stack`. If 'consume` is set to true, the + // function will consume a reference on `tree`. `stack` is a null terminated + // array containing the new tree's state, with the current leaf node at + // stack[0], and parent nodes above that, or null for 'top of tree'. + static void Rebuild(CordRepBtree** stack, CordRepBtree* tree, bool consume); + // Aligns existing edges to start at index 0, to allow for a new edge to be // added to the back of the current edges. inline void AlignBegin(); @@ -459,11 +534,11 @@ class CordRepBtree : public CordRep { // Returns a partial copy of the current tree containing the first `n` bytes // of data. `CopyResult` contains both the resulting edge and its height. The // resulting tree may be less high than the current tree, or even be a single - // matching data edge. For example, if `n == 1`, then the result will be the - // single data edge, and height will be set to -1 (one below the owning leaf - // node). If n == 0, this function returns null. - // Requires `n <= length` - CopyResult CopyPrefix(size_t n); + // matching data edge if `allow_folding` is set to true. + // For example, if `n == 1`, then the result will be the single data edge, and + // height will be set to -1 (one below the owning leaf node). If n == 0, this + // function returns null. Requires `n <= length` + CopyResult CopyPrefix(size_t n, bool allow_folding = true); // Returns a partial copy of the current tree containing all data starting // after `offset`. `CopyResult` contains both the resulting edge and its @@ -551,34 +626,11 @@ inline absl::Span CordRepBtree::Edges(size_t begin, return {edges_ + begin, static_cast(end - begin)}; } -inline const char* CordRepBtree::EdgeDataPtr(const CordRep* r) { - assert(IsDataEdge(r)); - size_t offset = 0; - if (r->tag == SUBSTRING) { - offset = r->substring()->start; - r = r->substring()->child; - } - return (r->tag >= FLAT ? r->flat()->Data() : r->external()->base) + offset; -} - -inline absl::string_view CordRepBtree::EdgeData(const CordRep* r) { - return absl::string_view(EdgeDataPtr(r), r->length); -} - inline absl::string_view CordRepBtree::Data(size_t index) const { assert(height() == 0); return EdgeData(Edge(index)); } -inline bool CordRepBtree::IsDataEdge(const CordRep* rep) { - // The fast path is that `rep` is an EXTERNAL or FLAT node, making the below - // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL - // check in the slow path the SUBSTRING check to optimize for the hot path. - if (rep->tag == EXTERNAL || rep->tag >= FLAT) return true; - if (rep->tag == SUBSTRING) rep = rep->substring()->child; - return rep->tag == EXTERNAL || rep->tag >= FLAT; -} - inline CordRepBtree* CordRepBtree::New(int height) { CordRepBtree* tree = new CordRepBtree; tree->length = 0; @@ -606,19 +658,14 @@ inline CordRepBtree* CordRepBtree::New(CordRepBtree* front, return tree; } -inline void CordRepBtree::DestroyTree(CordRepBtree* tree, size_t begin, - size_t end) { - if (tree->height() == 0) { - DestroyLeaf(tree, begin, end); - } else { - DestroyNonLeaf(tree, begin, end); +inline void CordRepBtree::Unref(absl::Span edges) { + for (CordRep* edge : edges) { + if (ABSL_PREDICT_FALSE(!edge->refcount.Decrement())) { + CordRep::Destroy(edge); + } } } -inline void CordRepBtree::Destroy(CordRepBtree* tree) { - DestroyTree(tree, tree->begin(), tree->end()); -} - inline CordRepBtree* CordRepBtree::CopyRaw() const { auto* tree = static_cast(::operator new(sizeof(CordRepBtree))); memcpy(static_cast(tree), this, sizeof(CordRepBtree)); @@ -670,7 +717,7 @@ inline void CordRepBtree::AlignBegin() { // size, and then do overlapping load/store of up to 4 pointers (inlined as // XMM, YMM or ZMM load/store) and up to 2 pointers (XMM / YMM), which is a) // compact and b) not clobbering any registers. - ABSL_INTERNAL_ASSUME(new_end <= kMaxCapacity); + ABSL_ASSUME(new_end <= kMaxCapacity); #ifdef __clang__ #pragma unroll 1 #endif @@ -688,7 +735,7 @@ inline void CordRepBtree::AlignEnd() { const size_t new_end = end() + delta; set_begin(new_begin); set_end(new_end); - ABSL_INTERNAL_ASSUME(new_end <= kMaxCapacity); + ABSL_ASSUME(new_end <= kMaxCapacity); #ifdef __clang__ #pragma unroll 1 #endif @@ -762,6 +809,14 @@ inline CordRepBtree::Position CordRepBtree::IndexBefore(Position front, return {index, offset}; } +inline CordRepBtree::Position CordRepBtree::IndexOfLength(size_t n) const { + assert(n <= length); + size_t index = back(); + size_t strip = length - n; + while (strip >= edges_[index]->length) strip -= edges_[index--]->length; + return {index, edges_[index]->length - strip}; +} + inline CordRepBtree::Position CordRepBtree::IndexBeyond( const size_t offset) const { // We need to find the edge which `starting offset` is beyond (>=)`offset`. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc index d1f9995d00..6ed20c23a7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc @@ -16,6 +16,7 @@ #include +#include "absl/strings/internal/cord_data_edge.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_btree.h" @@ -39,7 +40,7 @@ inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) { assert(n <= rep->length); assert(offset < rep->length); assert(offset <= rep->length - n); - assert(CordRepBtree::IsDataEdge(rep)); + assert(IsDataEdge(rep)); if (n == 0) return nullptr; if (n == rep->length) return CordRep::Ref(rep); @@ -49,6 +50,7 @@ inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) { rep = rep->substring()->child; } + assert(rep->IsExternal() || rep->IsFlat()); CordRepSubstring* substring = new CordRepSubstring(); substring->length = n; substring->tag = SUBSTRING; @@ -88,7 +90,7 @@ CordRepBtreeNavigator::Position CordRepBtreeNavigator::Skip(size_t n) { // edges that must be skipped. while (height > 0) { node = edge->btree(); - index_[height] = index; + index_[height] = static_cast(index); node_[--height] = node; index = node->begin(); edge = node->Edge(index); @@ -99,7 +101,7 @@ CordRepBtreeNavigator::Position CordRepBtreeNavigator::Skip(size_t n) { edge = node->Edge(index); } } - index_[0] = index; + index_[0] = static_cast(index); return {edge, n}; } @@ -124,7 +126,7 @@ ReadResult CordRepBtreeNavigator::Read(size_t edge_offset, size_t n) { do { length -= edge->length; while (++index == node->end()) { - index_[height] = index; + index_[height] = static_cast(index); if (++height > height_) { subtree->set_end(subtree_end); if (length == 0) return {subtree, 0}; @@ -152,7 +154,7 @@ ReadResult CordRepBtreeNavigator::Read(size_t edge_offset, size_t n) { // edges that must be read, adding 'down' nodes to `subtree`. while (height > 0) { node = edge->btree(); - index_[height] = index; + index_[height] = static_cast(index); node_[--height] = node; index = node->begin(); edge = node->Edge(index); @@ -176,7 +178,7 @@ ReadResult CordRepBtreeNavigator::Read(size_t edge_offset, size_t n) { subtree->edges_[subtree_end++] = Substring(edge, 0, length); } subtree->set_end(subtree_end); - index_[0] = index; + index_[0] = static_cast(index); return {tree, length}; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.h index 971b92eda6..3d581c877e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.h @@ -143,8 +143,8 @@ class CordRepBtreeNavigator { // `index_` and `node_` contain the navigation state as the 'path' to the // current data edge which is at `node_[0]->Edge(index_[0])`. The contents // of these are undefined until the instance is initialized (`height_ >= 0`). - uint8_t index_[CordRepBtree::kMaxHeight]; - CordRepBtree* node_[CordRepBtree::kMaxHeight]; + uint8_t index_[CordRepBtree::kMaxDepth]; + CordRepBtree* node_[CordRepBtree::kMaxDepth]; }; // Returns true if this instance is not empty. @@ -173,6 +173,7 @@ template inline CordRep* CordRepBtreeNavigator::Init(CordRepBtree* tree) { assert(tree != nullptr); assert(tree->size() > 0); + assert(tree->height() <= CordRepBtree::kMaxHeight); int height = height_ = tree->height(); size_t index = tree->index(edge_type); node_[height] = tree; @@ -206,6 +207,7 @@ inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::Seek( inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::InitOffset( CordRepBtree* tree, size_t offset) { assert(tree != nullptr); + assert(tree->height() <= CordRepBtree::kMaxHeight); if (ABSL_PREDICT_FALSE(offset >= tree->length)) return {nullptr, 0}; height_ = tree->height(); node_[height_] = tree; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator_test.cc index ce09b1992a..bed7550823 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator_test.cc @@ -48,7 +48,7 @@ using Position = CordRepBtreeNavigator::Position; // CordRepBtreeNavigatorTest is a test fixture which automatically creates a // tree to test navigation logic on. The parameter `count' defines the number of // data edges in the test tree. -class CordRepBtreeNavigatorTest : public testing::TestWithParam { +class CordRepBtreeNavigatorTest : public testing::TestWithParam { public: using Flats = std::vector; static constexpr size_t kCharsPerFlat = 3; @@ -71,12 +71,12 @@ class CordRepBtreeNavigatorTest : public testing::TestWithParam { ~CordRepBtreeNavigatorTest() override { CordRep::Unref(tree_); } - int count() const { return GetParam(); } + size_t count() const { return GetParam(); } CordRepBtree* tree() { return tree_; } const std::string& data() const { return data_; } const std::vector& flats() const { return flats_; } - static std::string ToString(testing::TestParamInfo param) { + static std::string ToString(testing::TestParamInfo param) { return absl::StrCat(param.param, "_Flats"); } @@ -131,15 +131,15 @@ TEST_P(CordRepBtreeNavigatorTest, NextPrev) { EXPECT_THAT(nav.Previous(), Eq(nullptr)); EXPECT_THAT(nav.Current(), Eq(flats.front())); - for (int i = 1; i < flats.size(); ++i) { + for (size_t i = 1; i < flats.size(); ++i) { ASSERT_THAT(nav.Next(), Eq(flats[i])); EXPECT_THAT(nav.Current(), Eq(flats[i])); } EXPECT_THAT(nav.Next(), Eq(nullptr)); EXPECT_THAT(nav.Current(), Eq(flats.back())); - for (int i = static_cast(flats.size()) - 2; i >= 0; --i) { - ASSERT_THAT(nav.Previous(), Eq(flats[i])); - EXPECT_THAT(nav.Current(), Eq(flats[i])); + for (size_t i = flats.size() - 1; i > 0; --i) { + ASSERT_THAT(nav.Previous(), Eq(flats[i - 1])); + EXPECT_THAT(nav.Current(), Eq(flats[i - 1])); } EXPECT_THAT(nav.Previous(), Eq(nullptr)); EXPECT_THAT(nav.Current(), Eq(flats.front())); @@ -152,13 +152,13 @@ TEST_P(CordRepBtreeNavigatorTest, PrevNext) { EXPECT_THAT(nav.Next(), Eq(nullptr)); EXPECT_THAT(nav.Current(), Eq(flats.back())); - for (int i = static_cast(flats.size()) - 2; i >= 0; --i) { - ASSERT_THAT(nav.Previous(), Eq(flats[i])); - EXPECT_THAT(nav.Current(), Eq(flats[i])); + for (size_t i = flats.size() - 1; i > 0; --i) { + ASSERT_THAT(nav.Previous(), Eq(flats[i - 1])); + EXPECT_THAT(nav.Current(), Eq(flats[i - 1])); } EXPECT_THAT(nav.Previous(), Eq(nullptr)); EXPECT_THAT(nav.Current(), Eq(flats.front())); - for (int i = 1; i < flats.size(); ++i) { + for (size_t i = 1; i < flats.size(); ++i) { ASSERT_THAT(nav.Next(), Eq(flats[i])); EXPECT_THAT(nav.Current(), Eq(flats[i])); } @@ -180,21 +180,21 @@ TEST(CordRepBtreeNavigatorTest, Reset) { } TEST_P(CordRepBtreeNavigatorTest, Skip) { - int count = this->count(); + size_t count = this->count(); const Flats& flats = this->flats(); CordRepBtreeNavigator nav; nav.InitFirst(tree()); - for (int char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) { + for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) { Position pos = nav.Skip(char_offset); EXPECT_THAT(pos.edge, Eq(nav.Current())); EXPECT_THAT(pos.edge, Eq(flats[0])); EXPECT_THAT(pos.offset, Eq(char_offset)); } - for (int index1 = 0; index1 < count; ++index1) { - for (int index2 = index1; index2 < count; ++index2) { - for (int char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) { + for (size_t index1 = 0; index1 < count; ++index1) { + for (size_t index2 = index1; index2 < count; ++index2) { + for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) { CordRepBtreeNavigator nav; nav.InitFirst(tree()); @@ -215,20 +215,20 @@ TEST_P(CordRepBtreeNavigatorTest, Skip) { } TEST_P(CordRepBtreeNavigatorTest, Seek) { - int count = this->count(); + size_t count = this->count(); const Flats& flats = this->flats(); CordRepBtreeNavigator nav; nav.InitFirst(tree()); - for (int char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) { + for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) { Position pos = nav.Seek(char_offset); EXPECT_THAT(pos.edge, Eq(nav.Current())); EXPECT_THAT(pos.edge, Eq(flats[0])); EXPECT_THAT(pos.offset, Eq(char_offset)); } - for (int index = 0; index < count; ++index) { - for (int char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) { + for (size_t index = 0; index < count; ++index) { + for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) { size_t offset = index * kCharsPerFlat + char_offset; Position pos1 = nav.Seek(offset); ASSERT_THAT(pos1.edge, Eq(flats[index])); @@ -249,7 +249,7 @@ TEST(CordRepBtreeNavigatorTest, InitOffset) { EXPECT_THAT(nav.btree(), Eq(tree)); EXPECT_THAT(pos.edge, Eq(tree->Edges()[1])); EXPECT_THAT(pos.edge, Eq(nav.Current())); - EXPECT_THAT(pos.offset, Eq(2)); + EXPECT_THAT(pos.offset, Eq(2u)); CordRep::Unref(tree); } @@ -319,6 +319,27 @@ TEST_P(CordRepBtreeNavigatorTest, ReadBeyondLengthOfTree) { ASSERT_THAT(result.tree, Eq(nullptr)); } +TEST(CordRepBtreeNavigatorTest, NavigateMaximumTreeDepth) { + CordRepFlat* flat1 = MakeFlat("Hello world"); + CordRepFlat* flat2 = MakeFlat("World Hello"); + + CordRepBtree* node = CordRepBtree::Create(flat1); + node = CordRepBtree::Append(node, flat2); + while (node->height() < CordRepBtree::kMaxHeight) { + node = CordRepBtree::New(node); + } + + CordRepBtreeNavigator nav; + CordRep* edge = nav.InitFirst(node); + EXPECT_THAT(edge, Eq(flat1)); + EXPECT_THAT(nav.Next(), Eq(flat2)); + EXPECT_THAT(nav.Next(), Eq(nullptr)); + EXPECT_THAT(nav.Previous(), Eq(flat1)); + EXPECT_THAT(nav.Previous(), Eq(nullptr)); + + CordRep::Unref(node); +} + } // namespace } // namespace cord_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc index 5dc76966d2..0d0e860139 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc @@ -17,6 +17,7 @@ #include #include "absl/base/config.h" +#include "absl/strings/internal/cord_data_edge.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_btree.h" #include "absl/strings/internal/cord_rep_btree_navigator.h" @@ -44,7 +45,7 @@ absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size, // can directly return the substring into the current data edge as the next // chunk. We can easily establish from the above code that `navigator_.Next()` // has not been called as that requires `chunk_size` to be zero. - if (n < chunk_size) return CordRepBtree::EdgeData(edge).substr(result.n); + if (n < chunk_size) return EdgeData(edge).substr(result.n); // The amount of data taken from the last edge is `chunk_size` and `result.n` // contains the offset into the current edge trailing the read data (which can @@ -60,7 +61,7 @@ absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size, // We did not read all data, return remaining data from current edge. edge = navigator_.Current(); remaining_ -= consumed_by_read + edge->length; - return CordRepBtree::EdgeData(edge).substr(result.n); + return EdgeData(edge).substr(result.n); } } // namespace cord_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.h index 7aa79dbf10..8db8f8dd17 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.h @@ -18,6 +18,7 @@ #include #include "absl/base/config.h" +#include "absl/strings/internal/cord_data_edge.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_btree.h" #include "absl/strings/internal/cord_rep_btree_navigator.h" @@ -167,7 +168,7 @@ inline absl::string_view CordRepBtreeReader::Init(CordRepBtree* tree) { assert(tree != nullptr); const CordRep* edge = navigator_.InitFirst(tree); remaining_ = tree->length - edge->length; - return CordRepBtree::EdgeData(edge); + return EdgeData(edge); } inline absl::string_view CordRepBtreeReader::Next() { @@ -175,7 +176,7 @@ inline absl::string_view CordRepBtreeReader::Next() { const CordRep* edge = navigator_.Next(); assert(edge != nullptr); remaining_ -= edge->length; - return CordRepBtree::EdgeData(edge); + return EdgeData(edge); } inline absl::string_view CordRepBtreeReader::Skip(size_t skip) { @@ -190,7 +191,7 @@ inline absl::string_view CordRepBtreeReader::Skip(size_t skip) { // The combined length of all edges skipped before `pos.edge` is `skip - // pos.offset`, all of which are 'consumed', as well as the current edge. remaining_ -= skip - pos.offset + pos.edge->length; - return CordRepBtree::EdgeData(pos.edge).substr(pos.offset); + return EdgeData(pos.edge).substr(pos.offset); } inline absl::string_view CordRepBtreeReader::Seek(size_t offset) { @@ -199,7 +200,7 @@ inline absl::string_view CordRepBtreeReader::Seek(size_t offset) { remaining_ = 0; return {}; } - absl::string_view chunk = CordRepBtree::EdgeData(pos.edge).substr(pos.offset); + absl::string_view chunk = EdgeData(pos.edge).substr(pos.offset); remaining_ = length() - offset - chunk.length(); return chunk; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader_test.cc index 9b27a81fdb..b4cdd8e58b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader_test.cc @@ -50,9 +50,9 @@ using ReadResult = CordRepBtreeReader::ReadResult; TEST(CordRepBtreeReaderTest, Next) { constexpr size_t kChars = 3; const size_t cap = CordRepBtree::kMaxCapacity; - int counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17}; + size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17}; - for (int count : counts) { + for (size_t count : counts) { std::string data = CreateRandomString(count * kChars); std::vector flats = CreateFlatsFromString(data, kChars); CordRepBtree* node = CordRepBtreeFromFlats(flats); @@ -74,7 +74,7 @@ TEST(CordRepBtreeReaderTest, Next) { EXPECT_THAT(reader.remaining(), Eq(remaining)); } - EXPECT_THAT(reader.remaining(), Eq(0)); + EXPECT_THAT(reader.remaining(), Eq(0u)); // Verify trying to read beyond EOF returns empty string_view EXPECT_THAT(reader.Next(), testing::IsEmpty()); @@ -86,9 +86,9 @@ TEST(CordRepBtreeReaderTest, Next) { TEST(CordRepBtreeReaderTest, Skip) { constexpr size_t kChars = 3; const size_t cap = CordRepBtree::kMaxCapacity; - int counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17}; + size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17}; - for (int count : counts) { + for (size_t count : counts) { std::string data = CreateRandomString(count * kChars); std::vector flats = CreateFlatsFromString(data, kChars); CordRepBtree* node = CordRepBtreeFromFlats(flats); @@ -125,16 +125,16 @@ TEST(CordRepBtreeReaderTest, SkipBeyondLength) { CordRepBtreeReader reader; reader.Init(tree); EXPECT_THAT(reader.Skip(100), IsEmpty()); - EXPECT_THAT(reader.remaining(), Eq(0)); + EXPECT_THAT(reader.remaining(), Eq(0u)); CordRep::Unref(tree); } TEST(CordRepBtreeReaderTest, Seek) { constexpr size_t kChars = 3; const size_t cap = CordRepBtree::kMaxCapacity; - int counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17}; + size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17}; - for (int count : counts) { + for (size_t count : counts) { std::string data = CreateRandomString(count * kChars); std::vector flats = CreateFlatsFromString(data, kChars); CordRepBtree* node = CordRepBtreeFromFlats(flats); @@ -159,9 +159,9 @@ TEST(CordRepBtreeReaderTest, SeekBeyondLength) { CordRepBtreeReader reader; reader.Init(tree); EXPECT_THAT(reader.Seek(6), IsEmpty()); - EXPECT_THAT(reader.remaining(), Eq(0)); + EXPECT_THAT(reader.remaining(), Eq(0u)); EXPECT_THAT(reader.Seek(100), IsEmpty()); - EXPECT_THAT(reader.remaining(), Eq(0)); + EXPECT_THAT(reader.remaining(), Eq(0u)); CordRep::Unref(tree); } @@ -179,7 +179,7 @@ TEST(CordRepBtreeReaderTest, Read) { chunk = reader.Read(0, chunk.length(), tree); EXPECT_THAT(tree, Eq(nullptr)); EXPECT_THAT(chunk, Eq("abcde")); - EXPECT_THAT(reader.remaining(), Eq(10)); + EXPECT_THAT(reader.remaining(), Eq(10u)); EXPECT_THAT(reader.Next(), Eq("fghij")); // Read in full @@ -188,7 +188,7 @@ TEST(CordRepBtreeReaderTest, Read) { EXPECT_THAT(tree, Ne(nullptr)); EXPECT_THAT(CordToString(tree), Eq("abcdefghijklmno")); EXPECT_THAT(chunk, Eq("")); - EXPECT_THAT(reader.remaining(), Eq(0)); + EXPECT_THAT(reader.remaining(), Eq(0u)); CordRep::Unref(tree); // Read < chunk bytes @@ -197,7 +197,7 @@ TEST(CordRepBtreeReaderTest, Read) { ASSERT_THAT(tree, Ne(nullptr)); EXPECT_THAT(CordToString(tree), Eq("abc")); EXPECT_THAT(chunk, Eq("de")); - EXPECT_THAT(reader.remaining(), Eq(10)); + EXPECT_THAT(reader.remaining(), Eq(10u)); EXPECT_THAT(reader.Next(), Eq("fghij")); CordRep::Unref(tree); @@ -207,7 +207,7 @@ TEST(CordRepBtreeReaderTest, Read) { ASSERT_THAT(tree, Ne(nullptr)); EXPECT_THAT(CordToString(tree), Eq("cd")); EXPECT_THAT(chunk, Eq("e")); - EXPECT_THAT(reader.remaining(), Eq(10)); + EXPECT_THAT(reader.remaining(), Eq(10u)); EXPECT_THAT(reader.Next(), Eq("fghij")); CordRep::Unref(tree); @@ -217,7 +217,7 @@ TEST(CordRepBtreeReaderTest, Read) { ASSERT_THAT(tree, Ne(nullptr)); EXPECT_THAT(CordToString(tree), Eq("fgh")); EXPECT_THAT(chunk, Eq("ij")); - EXPECT_THAT(reader.remaining(), Eq(5)); + EXPECT_THAT(reader.remaining(), Eq(5u)); EXPECT_THAT(reader.Next(), Eq("klmno")); CordRep::Unref(tree); @@ -227,7 +227,7 @@ TEST(CordRepBtreeReaderTest, Read) { ASSERT_THAT(tree, Ne(nullptr)); EXPECT_THAT(CordToString(tree), Eq("cdefghijklmn")); EXPECT_THAT(chunk, Eq("o")); - EXPECT_THAT(reader.remaining(), Eq(0)); + EXPECT_THAT(reader.remaining(), Eq(0u)); CordRep::Unref(tree); // Read across chunks landing on exact edge boundary @@ -236,7 +236,7 @@ TEST(CordRepBtreeReaderTest, Read) { ASSERT_THAT(tree, Ne(nullptr)); EXPECT_THAT(CordToString(tree), Eq("cdefghij")); EXPECT_THAT(chunk, Eq("klmno")); - EXPECT_THAT(reader.remaining(), Eq(0)); + EXPECT_THAT(reader.remaining(), Eq(0u)); CordRep::Unref(tree); CordRep::Unref(node); @@ -245,9 +245,9 @@ TEST(CordRepBtreeReaderTest, Read) { TEST(CordRepBtreeReaderTest, ReadExhaustive) { constexpr size_t kChars = 3; const size_t cap = CordRepBtree::kMaxCapacity; - int counts[] = {1, 2, cap, cap * cap + 1, cap * cap * cap * 2 + 17}; + size_t counts[] = {1, 2, cap, cap * cap + 1, cap * cap * cap * 2 + 17}; - for (int count : counts) { + for (size_t count : counts) { std::string data = CreateRandomString(count * kChars); std::vector flats = CreateFlatsFromString(data, kChars); CordRepBtree* node = CordRepBtreeFromFlats(flats); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_test.cc index 073a7d45a9..9d6ce484ec 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_test.cc @@ -25,6 +25,7 @@ #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/cleanup/cleanup.h" +#include "absl/strings/internal/cord_data_edge.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_test_util.h" #include "absl/strings/str_cat.h" @@ -47,10 +48,11 @@ class CordRepBtreeTestPeer { namespace { using ::absl::cordrep_testing::AutoUnref; +using ::absl::cordrep_testing::CordCollectRepsIf; using ::absl::cordrep_testing::CordToString; +using ::absl::cordrep_testing::CordVisitReps; using ::absl::cordrep_testing::CreateFlatsFromString; using ::absl::cordrep_testing::CreateRandomString; -using ::absl::cordrep_testing::MakeConcat; using ::absl::cordrep_testing::MakeExternal; using ::absl::cordrep_testing::MakeFlat; using ::absl::cordrep_testing::MakeSubstring; @@ -62,6 +64,7 @@ using ::testing::ElementsAre; using ::testing::ElementsAreArray; using ::testing::Eq; using ::testing::HasSubstr; +using ::testing::Le; using ::testing::Ne; using ::testing::Not; using ::testing::SizeIs; @@ -125,6 +128,16 @@ MATCHER_P2(IsSubstring, start, length, return true; } +MATCHER_P2(EqExtractResult, tree, rep, "Equals ExtractResult") { + if (arg.tree != tree || arg.extracted != rep) { + *result_listener << "Expected {" << static_cast(tree) << ", " + << static_cast(rep) << "}, got {" << arg.tree + << ", " << arg.extracted << "}"; + return false; + } + return true; +} + // DataConsumer is a simple helper class used by tests to 'consume' string // fragments from the provided input in forward or backward direction. class DataConsumer { @@ -205,14 +218,17 @@ CordRepBtree* MakeTree(size_t size, bool append = true) { return tree; } -CordRepBtree* CreateTree(absl::string_view data, size_t chunk_size) { - std::vector flats = CreateFlatsFromString(data, chunk_size); - auto it = flats.begin(); +CordRepBtree* CreateTree(absl::Span reps) { + auto it = reps.begin(); CordRepBtree* tree = CordRepBtree::Create(*it); - while (++it != flats.end()) tree = CordRepBtree::Append(tree, *it); + while (++it != reps.end()) tree = CordRepBtree::Append(tree, *it); return tree; } +CordRepBtree* CreateTree(absl::string_view data, size_t chunk_size) { + return CreateTree(CreateFlatsFromString(data, chunk_size)); +} + CordRepBtree* CreateTreeReverse(absl::string_view data, size_t chunk_size) { std::vector flats = CreateFlatsFromString(data, chunk_size); auto rit = flats.rbegin(); @@ -268,13 +284,14 @@ INSTANTIATE_TEST_SUITE_P(WithParam, CordRepBtreeDualTest, TEST(CordRepBtreeTest, SizeIsMultipleOf64) { // Only enforce for fully 64-bit platforms. if (sizeof(size_t) == 8 && sizeof(void*) == 8) { - EXPECT_THAT(sizeof(CordRepBtree) % 64, Eq(0)) << "Should be multiple of 64"; + EXPECT_THAT(sizeof(CordRepBtree) % 64, Eq(0u)) + << "Should be multiple of 64"; } } TEST(CordRepBtreeTest, NewDestroyEmptyTree) { auto* tree = CordRepBtree::New(); - EXPECT_THAT(tree->size(), Eq(0)); + EXPECT_THAT(tree->size(), Eq(0u)); EXPECT_THAT(tree->height(), Eq(0)); EXPECT_THAT(tree->Edges(), ElementsAre()); CordRepBtree::Destroy(tree); @@ -282,7 +299,7 @@ TEST(CordRepBtreeTest, NewDestroyEmptyTree) { TEST(CordRepBtreeTest, NewDestroyEmptyTreeAtHeight) { auto* tree = CordRepBtree::New(3); - EXPECT_THAT(tree->size(), Eq(0)); + EXPECT_THAT(tree->size(), Eq(0u)); EXPECT_THAT(tree->height(), Eq(3)); EXPECT_THAT(tree->Edges(), ElementsAre()); CordRepBtree::Destroy(tree); @@ -306,40 +323,31 @@ TEST(CordRepBtreeTest, EdgeData) { CordRepExternal* external = MakeExternal("Hello external"); CordRep* substr1 = MakeSubstring(1, 6, CordRep::Ref(flat)); CordRep* substr2 = MakeSubstring(1, 6, CordRep::Ref(external)); - CordRep* concat = MakeConcat(CordRep::Ref(flat), CordRep::Ref(external)); CordRep* bad_substr = MakeSubstring(1, 2, CordRep::Ref(substr1)); - EXPECT_TRUE(CordRepBtree::IsDataEdge(flat)); - EXPECT_THAT(CordRepBtree::EdgeDataPtr(flat), - TypedEq(flat->Data())); - EXPECT_THAT(CordRepBtree::EdgeData(flat), Eq("Hello world")); + EXPECT_TRUE(IsDataEdge(flat)); + EXPECT_THAT(EdgeData(flat).data(), TypedEq(flat->Data())); + EXPECT_THAT(EdgeData(flat), Eq("Hello world")); - EXPECT_TRUE(CordRepBtree::IsDataEdge(external)); - EXPECT_THAT(CordRepBtree::EdgeDataPtr(external), - TypedEq(external->base)); - EXPECT_THAT(CordRepBtree::EdgeData(external), Eq("Hello external")); + EXPECT_TRUE(IsDataEdge(external)); + EXPECT_THAT(EdgeData(external).data(), TypedEq(external->base)); + EXPECT_THAT(EdgeData(external), Eq("Hello external")); - EXPECT_TRUE(CordRepBtree::IsDataEdge(substr1)); - EXPECT_THAT(CordRepBtree::EdgeDataPtr(substr1), - TypedEq(flat->Data() + 1)); - EXPECT_THAT(CordRepBtree::EdgeData(substr1), Eq("ello w")); + EXPECT_TRUE(IsDataEdge(substr1)); + EXPECT_THAT(EdgeData(substr1).data(), TypedEq(flat->Data() + 1)); + EXPECT_THAT(EdgeData(substr1), Eq("ello w")); - EXPECT_TRUE(CordRepBtree::IsDataEdge(substr2)); - EXPECT_THAT(CordRepBtree::EdgeDataPtr(substr2), + EXPECT_TRUE(IsDataEdge(substr2)); + EXPECT_THAT(EdgeData(substr2).data(), TypedEq(external->base + 1)); - EXPECT_THAT(CordRepBtree::EdgeData(substr2), Eq("ello e")); + EXPECT_THAT(EdgeData(substr2), Eq("ello e")); - EXPECT_FALSE(CordRepBtree::IsDataEdge(concat)); - EXPECT_FALSE(CordRepBtree::IsDataEdge(bad_substr)); + EXPECT_FALSE(IsDataEdge(bad_substr)); #if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG) - EXPECT_DEATH(CordRepBtree::EdgeData(concat), ".*"); - EXPECT_DEATH(CordRepBtree::EdgeDataPtr(concat), ".*"); - EXPECT_DEATH(CordRepBtree::EdgeData(bad_substr), ".*"); - EXPECT_DEATH(CordRepBtree::EdgeDataPtr(bad_substr), ".*"); + EXPECT_DEATH(EdgeData(bad_substr), ".*"); #endif CordRep::Unref(bad_substr); - CordRep::Unref(concat); CordRep::Unref(substr2); CordRep::Unref(substr1); CordRep::Unref(external); @@ -349,7 +357,7 @@ TEST(CordRepBtreeTest, EdgeData) { TEST(CordRepBtreeTest, CreateUnrefLeaf) { auto* flat = MakeFlat("a"); auto* leaf = CordRepBtree::Create(flat); - EXPECT_THAT(leaf->size(), Eq(1)); + EXPECT_THAT(leaf->size(), Eq(1u)); EXPECT_THAT(leaf->height(), Eq(0)); EXPECT_THAT(leaf->Edges(), ElementsAre(flat)); CordRepBtree::Unref(leaf); @@ -358,7 +366,7 @@ TEST(CordRepBtreeTest, CreateUnrefLeaf) { TEST(CordRepBtreeTest, NewUnrefNode) { auto* leaf = CordRepBtree::Create(MakeFlat("a")); CordRepBtree* tree = CordRepBtree::New(leaf); - EXPECT_THAT(tree->size(), Eq(1)); + EXPECT_THAT(tree->size(), Eq(1u)); EXPECT_THAT(tree->height(), Eq(1)); EXPECT_THAT(tree->Edges(), ElementsAre(leaf)); CordRepBtree::Unref(tree); @@ -646,7 +654,7 @@ TEST_P(CordRepBtreeDualTest, MergeEqualHeightTrees) { CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right) : CordRepBtree::Prepend(right, left); EXPECT_THAT(tree, IsNode(1)); - EXPECT_THAT(tree->Edges(), SizeIs(5)); + EXPECT_THAT(tree->Edges(), SizeIs(5u)); // `tree` contains all flats originally belonging to `left` and `right`. EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats)); @@ -674,7 +682,7 @@ TEST_P(CordRepBtreeDualTest, MergeLeafWithTreeNotExceedingLeafCapacity) { CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right) : CordRepBtree::Prepend(right, left); EXPECT_THAT(tree, IsNode(1)); - EXPECT_THAT(tree->Edges(), SizeIs(3)); + EXPECT_THAT(tree->Edges(), SizeIs(3u)); // `tree` contains all flats originally belonging to `left` and `right`. EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats)); @@ -702,7 +710,7 @@ TEST_P(CordRepBtreeDualTest, MergeLeafWithTreeExceedingLeafCapacity) { CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right) : CordRepBtree::Prepend(right, left); EXPECT_THAT(tree, IsNode(1)); - EXPECT_THAT(tree->Edges(), SizeIs(4)); + EXPECT_THAT(tree->Edges(), SizeIs(4u)); // `tree` contains all flats originally belonging to `left` and `right`. EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats)); @@ -731,7 +739,7 @@ TEST(CordRepBtreeTest, MergeFuzzTest) { auto random_leaf_count = [&]() { std::uniform_int_distribution dist_height(0, 3); std::uniform_int_distribution dist_leaf(0, max_cap - 1); - const size_t height = dist_height(rnd); + const int height = dist_height(rnd); return (height ? pow(max_cap, height) : 0) + dist_leaf(rnd); }; @@ -742,14 +750,16 @@ TEST(CordRepBtreeTest, MergeFuzzTest) { CordRepBtree* left = MakeTree(random_leaf_count(), coin_flip(rnd)); GetLeafEdges(left, flats); if (dice_throw(rnd) == 1) { - std::uniform_int_distribution dist(0, left->height()); + std::uniform_int_distribution dist( + 0, static_cast(left->height())); RefEdgesAt(dist(rnd), refs, left); } CordRepBtree* right = MakeTree(random_leaf_count(), coin_flip(rnd)); GetLeafEdges(right, flats); if (dice_throw(rnd) == 1) { - std::uniform_int_distribution dist(0, right->height()); + std::uniform_int_distribution dist( + 0, static_cast(right->height())); RefEdgesAt(dist(rnd), refs, right); } @@ -759,6 +769,63 @@ TEST(CordRepBtreeTest, MergeFuzzTest) { } } +TEST_P(CordRepBtreeTest, RemoveSuffix) { + // Create tree of 1, 2 and 3 levels high + constexpr size_t max_cap = CordRepBtree::kMaxCapacity; + for (size_t cap : {max_cap - 1, max_cap * 2, max_cap * max_cap * 2}) { + const std::string data = CreateRandomString(cap * 512); + + { + // Verify RemoveSuffix() + AutoUnref refs; + CordRepBtree* node = refs.RefIf(shared(), CreateTree(data, 512)); + EXPECT_THAT(CordRepBtree::RemoveSuffix(node, data.length()), Eq(nullptr)); + + // Verify RemoveSuffix() + node = refs.RefIf(shared(), CreateTree(data, 512)); + EXPECT_THAT(CordRepBtree::RemoveSuffix(node, 0), Eq(node)); + CordRep::Unref(node); + } + + for (size_t n = 1; n < data.length(); ++n) { + AutoUnref refs; + auto flats = CreateFlatsFromString(data, 512); + CordRepBtree* node = refs.RefIf(shared(), CreateTree(flats)); + CordRep* rep = refs.Add(CordRepBtree::RemoveSuffix(node, n)); + EXPECT_THAT(CordToString(rep), Eq(data.substr(0, data.length() - n))); + + // Collect all flats + auto is_flat = [](CordRep* rep) { return rep->tag >= FLAT; }; + std::vector edges = CordCollectRepsIf(is_flat, rep); + ASSERT_THAT(edges.size(), Le(flats.size())); + + // Isolate last edge + CordRep* last_edge = edges.back(); + edges.pop_back(); + const size_t last_length = rep->length - edges.size() * 512; + + // All flats except the last edge must be kept or copied 'as is' + size_t index = 0; + for (CordRep* edge : edges) { + ASSERT_THAT(edge, Eq(flats[index++])); + ASSERT_THAT(edge->length, Eq(512u)); + } + + // CordRepBtree may optimize small substrings to avoid waste, so only + // check for flat sharing / updates where the code should always do this. + if (last_length >= 500) { + EXPECT_THAT(last_edge, Eq(flats[index++])); + if (shared()) { + EXPECT_THAT(last_edge->length, Eq(512u)); + } else { + EXPECT_TRUE(last_edge->refcount.IsOne()); + EXPECT_THAT(last_edge->length, Eq(last_length)); + } + } + } + } +} + TEST(CordRepBtreeTest, SubTree) { // Create tree of at least 2 levels high constexpr size_t max_cap = CordRepBtree::kMaxCapacity; @@ -773,8 +840,8 @@ TEST(CordRepBtreeTest, SubTree) { node = CordRepBtree::Append(node, CordRep::Ref(flats[i])); } - for (int offset = 0; offset < data.length(); ++offset) { - for (int length = 1; length <= data.length() - offset; ++length) { + for (size_t offset = 0; offset < data.length(); ++offset) { + for (size_t length = 1; length <= data.length() - offset; ++length) { CordRep* rep = node->SubTree(offset, length); EXPECT_THAT(CordToString(rep), Eq(data.substr(offset, length))); CordRep::Unref(rep); @@ -801,12 +868,12 @@ TEST(CordRepBtreeTest, SubTreeOnExistingSubstring) { ASSERT_THAT(result->tag, Eq(BTREE)); CordRep::Unref(leaf); leaf = result->btree(); - ASSERT_THAT(leaf->Edges(), ElementsAre(_, IsSubstring(0, 990))); + ASSERT_THAT(leaf->Edges(), ElementsAre(_, IsSubstring(0u, 990u))); EXPECT_THAT(leaf->Edges()[1]->substring()->child, Eq(flat)); // Verify substring of substring. result = leaf->SubTree(3 + 5, 970); - ASSERT_THAT(result, IsSubstring(5, 970)); + ASSERT_THAT(result, IsSubstring(5u, 970u)); EXPECT_THAT(result->substring()->child, Eq(flat)); CordRep::Unref(result); @@ -933,50 +1000,6 @@ TEST_P(CordRepBtreeTest, AddLargeDataToLeaf) { } } -TEST_P(CordRepBtreeDualTest, CreateFromConcat) { - AutoUnref refs; - CordRep* flats[] = {MakeFlat("abcdefgh"), MakeFlat("ijklm"), - MakeFlat("nopqrstuv"), MakeFlat("wxyz")}; - auto* left = MakeConcat(flats[0], flats[1]); - auto* right = MakeConcat(flats[2], refs.RefIf(first_shared(), flats[3])); - auto* concat = refs.RefIf(second_shared(), MakeConcat(left, right)); - CordRepBtree* result = CordRepBtree::Create(concat); - ASSERT_TRUE(CordRepBtree::IsValid(result)); - EXPECT_THAT(result->length, Eq(26)); - EXPECT_THAT(CordToString(result), Eq("abcdefghijklmnopqrstuvwxyz")); - CordRep::Unref(result); -} - -TEST_P(CordRepBtreeDualTest, AppendConcat) { - AutoUnref refs; - CordRep* flats[] = {MakeFlat("defgh"), MakeFlat("ijklm"), - MakeFlat("nopqrstuv"), MakeFlat("wxyz")}; - auto* left = MakeConcat(flats[0], flats[1]); - auto* right = MakeConcat(flats[2], refs.RefIf(first_shared(), flats[3])); - auto* concat = refs.RefIf(second_shared(), MakeConcat(left, right)); - CordRepBtree* result = CordRepBtree::Create(MakeFlat("abc")); - result = CordRepBtree::Append(result, concat); - ASSERT_TRUE(CordRepBtree::IsValid(result)); - EXPECT_THAT(result->length, Eq(26)); - EXPECT_THAT(CordToString(result), Eq("abcdefghijklmnopqrstuvwxyz")); - CordRep::Unref(result); -} - -TEST_P(CordRepBtreeDualTest, PrependConcat) { - AutoUnref refs; - CordRep* flats[] = {MakeFlat("abcdefgh"), MakeFlat("ijklm"), - MakeFlat("nopqrstuv"), MakeFlat("wx")}; - auto* left = MakeConcat(flats[0], flats[1]); - auto* right = MakeConcat(flats[2], refs.RefIf(first_shared(), flats[3])); - auto* concat = refs.RefIf(second_shared(), MakeConcat(left, right)); - CordRepBtree* result = CordRepBtree::Create(MakeFlat("yz")); - result = CordRepBtree::Prepend(result, concat); - ASSERT_TRUE(CordRepBtree::IsValid(result)); - EXPECT_THAT(result->length, Eq(26)); - EXPECT_THAT(CordToString(result), Eq("abcdefghijklmnopqrstuvwxyz")); - CordRep::Unref(result); -} - TEST_P(CordRepBtreeTest, CreateFromTreeReturnsTree) { AutoUnref refs; CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("Hello world")); @@ -986,23 +1009,6 @@ TEST_P(CordRepBtreeTest, CreateFromTreeReturnsTree) { CordRep::Unref(result); } -TEST_P(CordRepBtreeTest, ExceedMaxHeight) { - AutoUnref refs; - CordRepBtree* tree = MakeLeaf(); - for (int h = 1; h <= CordRepBtree::kMaxHeight; ++h) { - CordRepBtree* newtree = CordRepBtree::New(tree); - for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) { - newtree = CordRepBtree::Append(newtree, CordRep::Ref(tree)); - } - tree = newtree; - } - refs.RefIf(shared(), tree); -#if defined(GTEST_HAS_DEATH_TEST) - EXPECT_DEATH(tree = CordRepBtree::Append(tree, MakeFlat("Boom")), ".*"); -#endif - CordRep::Unref(tree); -} - TEST(CordRepBtreeTest, GetCharacter) { size_t n = CordRepBtree::kMaxCapacity * CordRepBtree::kMaxCapacity + 2; std::string data = CreateRandomString(n * 3); @@ -1089,7 +1095,7 @@ TEST_P(CordRepBtreeHeightTest, GetAppendBufferNotFlat) { for (int i = 1; i <= height(); ++i) { tree = CordRepBtree::New(tree); } - EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0)); + EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u)); CordRepBtree::Unref(tree); } @@ -1099,7 +1105,7 @@ TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatNotPrivate) { for (int i = 1; i <= height(); ++i) { tree = CordRepBtree::New(tree); } - EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0)); + EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u)); CordRepBtree::Unref(tree); CordRep::Unref(flat); } @@ -1113,7 +1119,7 @@ TEST_P(CordRepBtreeHeightTest, GetAppendBufferTreeNotPrivate) { if (i == (height() + 1) / 2) refs.Ref(tree); tree = CordRepBtree::New(tree); } - EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0)); + EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u)); CordRepBtree::Unref(tree); CordRep::Unref(flat); } @@ -1125,7 +1131,7 @@ TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatNoCapacity) { for (int i = 1; i <= height(); ++i) { tree = CordRepBtree::New(tree); } - EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0)); + EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u)); CordRepBtree::Unref(tree); } @@ -1136,9 +1142,9 @@ TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatWithCapacity) { tree = CordRepBtree::New(tree); } absl::Span span = tree->GetAppendBuffer(2); - EXPECT_THAT(span, SizeIs(2)); + EXPECT_THAT(span, SizeIs(2u)); EXPECT_THAT(span.data(), TypedEq(flat->Data() + 3)); - EXPECT_THAT(tree->length, Eq(5)); + EXPECT_THAT(tree->length, Eq(5u)); size_t avail = flat->Capacity() - 5; span = tree->GetAppendBuffer(avail + 100); @@ -1389,6 +1395,173 @@ TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) { CordRep::Unref(tree); } +TEST_P(CordRepBtreeTest, Rebuild) { + for (size_t size : {3u, 8u, 100u, 10000u, 1000000u}) { + SCOPED_TRACE(absl::StrCat("Rebuild @", size)); + + std::vector flats; + for (size_t i = 0; i < size; ++i) { + flats.push_back(CordRepFlat::New(2)); + flats.back()->Data()[0] = 'x'; + flats.back()->length = 1; + } + + // Build the tree into 'right', and each so many 'split_limit' edges, + // combine 'left' + 'right' into a new 'left', and start a new 'right'. + // This guarantees we get a reasonable amount of chaos in the tree. + size_t split_count = 0; + size_t split_limit = 3; + auto it = flats.begin(); + CordRepBtree* left = nullptr; + CordRepBtree* right = CordRepBtree::New(*it); + while (++it != flats.end()) { + if (++split_count >= split_limit) { + split_limit += split_limit / 16; + left = left ? CordRepBtree::Append(left, right) : right; + right = CordRepBtree::New(*it); + } else { + right = CordRepBtree::Append(right, *it); + } + } + + // Finalize tree + left = left ? CordRepBtree::Append(left, right) : right; + + // Rebuild + AutoUnref ref; + left = ref.Add(CordRepBtree::Rebuild(ref.RefIf(shared(), left))); + ASSERT_TRUE(CordRepBtree::IsValid(left)); + + // Verify we have the exact same edges in the exact same order. + bool ok = true; + it = flats.begin(); + CordVisitReps(left, [&](CordRep* edge) { + if (edge->tag < FLAT) return; + ok = ok && (it != flats.end() && *it++ == edge); + }); + EXPECT_TRUE(ok && it == flats.end()) << "Rebuild edges mismatch"; + } +} + +// Convenience helper for CordRepBtree::ExtractAppendBuffer +CordRepBtree::ExtractResult ExtractLast(CordRepBtree* input, size_t cap = 1) { + return CordRepBtree::ExtractAppendBuffer(input, cap); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferLeafSingleFlat) { + CordRep* flat = MakeFlat("Abc"); + CordRepBtree* leaf = CordRepBtree::Create(flat); + EXPECT_THAT(ExtractLast(leaf), EqExtractResult(nullptr, flat)); + CordRep::Unref(flat); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferNodeSingleFlat) { + CordRep* flat = MakeFlat("Abc"); + CordRepBtree* leaf = CordRepBtree::Create(flat); + CordRepBtree* node = CordRepBtree::New(leaf); + EXPECT_THAT(ExtractLast(node), EqExtractResult(nullptr, flat)); + CordRep::Unref(flat); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferLeafTwoFlats) { + std::vector flats = CreateFlatsFromString("abcdef", 3); + CordRepBtree* leaf = CreateTree(flats); + EXPECT_THAT(ExtractLast(leaf), EqExtractResult(flats[0], flats[1])); + CordRep::Unref(flats[0]); + CordRep::Unref(flats[1]); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferNodeTwoFlats) { + std::vector flats = CreateFlatsFromString("abcdef", 3); + CordRepBtree* leaf = CreateTree(flats); + CordRepBtree* node = CordRepBtree::New(leaf); + EXPECT_THAT(ExtractLast(node), EqExtractResult(flats[0], flats[1])); + CordRep::Unref(flats[0]); + CordRep::Unref(flats[1]); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferNodeTwoFlatsInTwoLeafs) { + std::vector flats = CreateFlatsFromString("abcdef", 3); + CordRepBtree* leaf1 = CordRepBtree::Create(flats[0]); + CordRepBtree* leaf2 = CordRepBtree::Create(flats[1]); + CordRepBtree* node = CordRepBtree::New(leaf1, leaf2); + EXPECT_THAT(ExtractLast(node), EqExtractResult(flats[0], flats[1])); + CordRep::Unref(flats[0]); + CordRep::Unref(flats[1]); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferLeafThreeFlats) { + std::vector flats = CreateFlatsFromString("abcdefghi", 3); + CordRepBtree* leaf = CreateTree(flats); + EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, flats[2])); + CordRep::Unref(flats[2]); + CordRep::Unref(leaf); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferNodeThreeFlatsRightNoFolding) { + CordRep* flat = MakeFlat("Abc"); + std::vector flats = CreateFlatsFromString("defghi", 3); + CordRepBtree* leaf1 = CordRepBtree::Create(flat); + CordRepBtree* leaf2 = CreateTree(flats); + CordRepBtree* node = CordRepBtree::New(leaf1, leaf2); + EXPECT_THAT(ExtractLast(node), EqExtractResult(node, flats[1])); + EXPECT_THAT(node->Edges(), ElementsAre(leaf1, leaf2)); + EXPECT_THAT(leaf1->Edges(), ElementsAre(flat)); + EXPECT_THAT(leaf2->Edges(), ElementsAre(flats[0])); + CordRep::Unref(node); + CordRep::Unref(flats[1]); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferNodeThreeFlatsRightLeafFolding) { + CordRep* flat = MakeFlat("Abc"); + std::vector flats = CreateFlatsFromString("defghi", 3); + CordRepBtree* leaf1 = CreateTree(flats); + CordRepBtree* leaf2 = CordRepBtree::Create(flat); + CordRepBtree* node = CordRepBtree::New(leaf1, leaf2); + EXPECT_THAT(ExtractLast(node), EqExtractResult(leaf1, flat)); + EXPECT_THAT(leaf1->Edges(), ElementsAreArray(flats)); + CordRep::Unref(leaf1); + CordRep::Unref(flat); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferNoCapacity) { + std::vector flats = CreateFlatsFromString("abcdef", 3); + CordRepBtree* leaf = CreateTree(flats); + size_t avail = flats[1]->flat()->Capacity() - flats[1]->length; + EXPECT_THAT(ExtractLast(leaf, avail + 1), EqExtractResult(leaf, nullptr)); + EXPECT_THAT(ExtractLast(leaf, avail), EqExtractResult(flats[0], flats[1])); + CordRep::Unref(flats[0]); + CordRep::Unref(flats[1]); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferNotFlat) { + std::vector flats = CreateFlatsFromString("abcdef", 3); + auto substr = MakeSubstring(1, 2, flats[1]); + CordRepBtree* leaf = CreateTree({flats[0], substr}); + EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr)); + CordRep::Unref(leaf); +} + +TEST(CordRepBtreeTest, ExtractAppendBufferShared) { + std::vector flats = CreateFlatsFromString("abcdef", 3); + CordRepBtree* leaf = CreateTree(flats); + + CordRep::Ref(flats[1]); + EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr)); + CordRep::Unref(flats[1]); + + CordRep::Ref(leaf); + EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr)); + CordRep::Unref(leaf); + + CordRepBtree* node = CordRepBtree::New(leaf); + CordRep::Ref(node); + EXPECT_THAT(ExtractLast(node), EqExtractResult(node, nullptr)); + CordRep::Unref(node); + + CordRep::Unref(node); +} + } // namespace } // namespace cord_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc index 81514543db..20a5579767 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc @@ -40,88 +40,21 @@ CordRep* ClipSubstring(CordRepSubstring* substring) { return child; } -// Unrefs the provided `concat`, and returns `{concat->left, concat->right}` -// Adds or assumes a reference on `concat->left` and `concat->right`. -// Returns an array of 2 elements containing the left and right nodes. -std::array ClipConcat(CordRepConcat* concat) { - std::array result{concat->left, concat->right}; - if (concat->refcount.IsOne()) { - delete concat; - } else { - CordRep::Ref(result[0]); - CordRep::Ref(result[1]); - CordRep::Unref(concat); - } - return result; -} - -void Consume(bool forward, CordRep* rep, ConsumeFn consume_fn) { - size_t offset = 0; - size_t length = rep->length; - struct Entry { - CordRep* rep; - size_t offset; - size_t length; - }; - absl::InlinedVector stack; - - for (;;) { - if (rep->tag == CONCAT) { - std::array res = ClipConcat(rep->concat()); - CordRep* left = res[0]; - CordRep* right = res[1]; - - if (left->length <= offset) { - // Don't need left node - offset -= left->length; - CordRep::Unref(left); - rep = right; - continue; - } - - size_t length_left = left->length - offset; - if (length_left >= length) { - // Don't need right node - CordRep::Unref(right); - rep = left; - continue; - } - - // Need both nodes - size_t length_right = length - length_left; - if (forward) { - stack.push_back({right, 0, length_right}); - rep = left; - length = length_left; - } else { - stack.push_back({left, offset, length_left}); - rep = right; - offset = 0; - length = length_right; - } - } else if (rep->tag == SUBSTRING) { - offset += rep->substring()->start; - rep = ClipSubstring(rep->substring()); - } else { - consume_fn(rep, offset, length); - if (stack.empty()) return; - - rep = stack.back().rep; - offset = stack.back().offset; - length = stack.back().length; - stack.pop_back(); - } - } -} - } // namespace void Consume(CordRep* rep, ConsumeFn consume_fn) { - return Consume(true, rep, std::move(consume_fn)); + size_t offset = 0; + size_t length = rep->length; + + if (rep->tag == SUBSTRING) { + offset += rep->substring()->start; + rep = ClipSubstring(rep->substring()); + } + consume_fn(rep, offset, length); } void ReverseConsume(CordRep* rep, ConsumeFn consume_fn) { - return Consume(false, rep, std::move(consume_fn)); + return Consume(rep, std::move(consume_fn)); } } // namespace cord_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume_test.cc deleted file mode 100644 index e507824b4f..0000000000 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume_test.cc +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2021 The Abseil Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "absl/strings/internal/cord_rep_consume.h" - -#include -#include - -#include "gmock/gmock.h" -#include "gtest/gtest.h" -#include "absl/strings/internal/cord_internal.h" -#include "absl/strings/internal/cord_rep_flat.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace cord_internal { -namespace { - -using testing::InSequence; -using testing::MockFunction; - -// Returns the depth of a node -int Depth(const CordRep* rep) { - return (rep->tag == CONCAT) ? rep->concat()->depth() : 0; -} - -// Creates a concatenation of the specified nodes. -CordRepConcat* CreateConcat(CordRep* left, CordRep* right) { - auto* concat = new CordRepConcat(); - concat->tag = CONCAT; - concat->left = left; - concat->right = right; - concat->length = left->length + right->length; - concat->set_depth(1 + (std::max)(Depth(left), Depth(right))); - return concat; -} - -// Creates a flat with the length set to `length` -CordRepFlat* CreateFlatWithLength(size_t length) { - auto* flat = CordRepFlat::New(length); - flat->length = length; - return flat; -} - -// Creates a substring node on the specified child. -CordRepSubstring* CreateSubstring(CordRep* child, size_t start, size_t length) { - auto* rep = new CordRepSubstring(); - rep->length = length; - rep->tag = SUBSTRING; - rep->start = start; - rep->child = child; - return rep; -} - -// Flats we use in the tests -CordRep* flat[6]; - -// Creates a test tree -CordRep* CreateTestTree() { - flat[0] = CreateFlatWithLength(1); - flat[1] = CreateFlatWithLength(7); - CordRepConcat* left = CreateConcat(flat[0], CreateSubstring(flat[1], 2, 4)); - - flat[2] = CreateFlatWithLength(9); - flat[3] = CreateFlatWithLength(13); - CordRepConcat* right1 = CreateConcat(flat[2], flat[3]); - - flat[4] = CreateFlatWithLength(15); - flat[5] = CreateFlatWithLength(19); - CordRepConcat* right2 = CreateConcat(flat[4], flat[5]); - - CordRepConcat* right = CreateConcat(right1, CreateSubstring(right2, 5, 17)); - return CreateConcat(left, right); -} - -TEST(CordRepConsumeTest, Consume) { - InSequence in_sequence; - CordRep* tree = CreateTestTree(); - MockFunction consume; - EXPECT_CALL(consume, Call(flat[0], 0, 1)); - EXPECT_CALL(consume, Call(flat[1], 2, 4)); - EXPECT_CALL(consume, Call(flat[2], 0, 9)); - EXPECT_CALL(consume, Call(flat[3], 0, 13)); - EXPECT_CALL(consume, Call(flat[4], 5, 10)); - EXPECT_CALL(consume, Call(flat[5], 0, 7)); - Consume(tree, consume.AsStdFunction()); - for (CordRep* rep : flat) { - EXPECT_TRUE(rep->refcount.IsOne()); - CordRep::Unref(rep); - } -} - -TEST(CordRepConsumeTest, ConsumeShared) { - InSequence in_sequence; - CordRep* tree = CreateTestTree(); - MockFunction consume; - EXPECT_CALL(consume, Call(flat[0], 0, 1)); - EXPECT_CALL(consume, Call(flat[1], 2, 4)); - EXPECT_CALL(consume, Call(flat[2], 0, 9)); - EXPECT_CALL(consume, Call(flat[3], 0, 13)); - EXPECT_CALL(consume, Call(flat[4], 5, 10)); - EXPECT_CALL(consume, Call(flat[5], 0, 7)); - Consume(CordRep::Ref(tree), consume.AsStdFunction()); - for (CordRep* rep : flat) { - EXPECT_FALSE(rep->refcount.IsOne()); - CordRep::Unref(rep); - } - CordRep::Unref(tree); -} - -TEST(CordRepConsumeTest, Reverse) { - InSequence in_sequence; - CordRep* tree = CreateTestTree(); - MockFunction consume; - EXPECT_CALL(consume, Call(flat[5], 0, 7)); - EXPECT_CALL(consume, Call(flat[4], 5, 10)); - EXPECT_CALL(consume, Call(flat[3], 0, 13)); - EXPECT_CALL(consume, Call(flat[2], 0, 9)); - EXPECT_CALL(consume, Call(flat[1], 2, 4)); - EXPECT_CALL(consume, Call(flat[0], 0, 1)); - ReverseConsume(tree, consume.AsStdFunction()); - for (CordRep* rep : flat) { - EXPECT_TRUE(rep->refcount.IsOne()); - CordRep::Unref(rep); - } -} - -TEST(CordRepConsumeTest, ReverseShared) { - InSequence in_sequence; - CordRep* tree = CreateTestTree(); - MockFunction consume; - EXPECT_CALL(consume, Call(flat[5], 0, 7)); - EXPECT_CALL(consume, Call(flat[4], 5, 10)); - EXPECT_CALL(consume, Call(flat[3], 0, 13)); - EXPECT_CALL(consume, Call(flat[2], 0, 9)); - EXPECT_CALL(consume, Call(flat[1], 2, 4)); - EXPECT_CALL(consume, Call(flat[0], 0, 1)); - ReverseConsume(CordRep::Ref(tree), consume.AsStdFunction()); - for (CordRep* rep : flat) { - EXPECT_FALSE(rep->refcount.IsOne()); - CordRep::Unref(rep); - } - CordRep::Unref(tree); -} - -TEST(CordRepConsumeTest, UnreachableFlat) { - InSequence in_sequence; - CordRepFlat* flat1 = CreateFlatWithLength(10); - CordRepFlat* flat2 = CreateFlatWithLength(20); - CordRepConcat* concat = CreateConcat(flat1, flat2); - CordRepSubstring* tree = CreateSubstring(concat, 15, 10); - MockFunction consume; - EXPECT_CALL(consume, Call(flat2, 5, 10)); - Consume(tree, consume.AsStdFunction()); - EXPECT_TRUE(flat2->refcount.IsOne()); - CordRep::Unref(flat2); -} - -} // namespace -} // namespace cord_internal -ABSL_NAMESPACE_END -} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc.cc new file mode 100644 index 0000000000..7d7273ef8d --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc.cc @@ -0,0 +1,55 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/internal/cord_rep_crc.h" + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/internal/cord_internal.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace cord_internal { + +CordRepCrc* CordRepCrc::New(CordRep* child, uint32_t crc) { + if (child != nullptr && child->IsCrc()) { + if (child->refcount.IsOne()) { + child->crc()->crc = crc; + return child->crc(); + } + CordRep* old = child; + child = old->crc()->child; + CordRep::Ref(child); + CordRep::Unref(old); + } + auto* new_cordrep = new CordRepCrc; + new_cordrep->length = child != nullptr ? child->length : 0; + new_cordrep->tag = cord_internal::CRC; + new_cordrep->child = child; + new_cordrep->crc = crc; + return new_cordrep; +} + +void CordRepCrc::Destroy(CordRepCrc* node) { + if (node->child != nullptr) { + CordRep::Unref(node->child); + } + delete node; +} + +} // namespace cord_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc.h new file mode 100644 index 0000000000..455a1127d6 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc.h @@ -0,0 +1,102 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/strings/internal/cord_internal.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace cord_internal { + +// CordRepCrc is a CordRep node intended only to appear at the top level of a +// cord tree. It associates an "expected CRC" with the contained data, to allow +// for easy passage of checksum data in Cord data flows. +// +// From Cord's perspective, the crc value has no semantics; any validation of +// the contained checksum is the user's responsibility. +struct CordRepCrc : public CordRep { + CordRep* child; + uint32_t crc; + + // Consumes `child` and returns a CordRepCrc prefixed tree containing `child`. + // If the specified `child` is itself a CordRepCrc node, then this method + // either replaces the existing node, or directly updates the crc value in it + // depending on the node being shared or not, i.e.: refcount.IsOne(). + // `child` must only be null if the Cord is empty. Never returns null. + static CordRepCrc* New(CordRep* child, uint32_t crc); + + // Destroys (deletes) the provided node. `node` must not be null. + static void Destroy(CordRepCrc* node); +}; + +// Consumes `rep` and returns a CordRep* with any outer CordRepCrc wrapper +// removed. This is usually a no-op (returning `rep`), but this will remove and +// unref an outer CordRepCrc node. +inline CordRep* RemoveCrcNode(CordRep* rep) { + assert(rep != nullptr); + if (ABSL_PREDICT_FALSE(rep->IsCrc())) { + CordRep* child = rep->crc()->child; + if (rep->refcount.IsOne()) { + delete rep->crc(); + } else { + CordRep::Ref(child); + CordRep::Unref(rep); + } + return child; + } + return rep; +} + +// Returns `rep` if it is not a CordRepCrc node, or its child if it is. +// Does not consume or create a reference on `rep` or the returned value. +inline CordRep* SkipCrcNode(CordRep* rep) { + assert(rep != nullptr); + if (ABSL_PREDICT_FALSE(rep->IsCrc())) { + return rep->crc()->child; + } else { + return rep; + } +} + +inline const CordRep* SkipCrcNode(const CordRep* rep) { + assert(rep != nullptr); + if (ABSL_PREDICT_FALSE(rep->IsCrc())) { + return rep->crc()->child; + } else { + return rep; + } +} + +inline CordRepCrc* CordRep::crc() { + assert(IsCrc()); + return static_cast(this); +} + +inline const CordRepCrc* CordRep::crc() const { + assert(IsCrc()); + return static_cast(this); +} + +} // namespace cord_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc_test.cc new file mode 100644 index 0000000000..42a9110b87 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_crc_test.cc @@ -0,0 +1,122 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/internal/cord_rep_crc.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_test_util.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace cord_internal { +namespace { + +using ::absl::cordrep_testing::MakeFlat; +using ::testing::Eq; +using ::testing::IsNull; +using ::testing::Ne; + +#if !defined(NDEBUG) && GTEST_HAS_DEATH_TEST + +TEST(CordRepCrc, RemoveCrcWithNullptr) { + EXPECT_DEATH(RemoveCrcNode(nullptr), ""); +} + +#endif // !NDEBUG && GTEST_HAS_DEATH_TEST + +TEST(CordRepCrc, NewDestroy) { + CordRep* rep = cordrep_testing::MakeFlat("Hello world"); + CordRepCrc* crc = CordRepCrc::New(rep, 12345); + EXPECT_TRUE(crc->refcount.IsOne()); + EXPECT_THAT(crc->child, Eq(rep)); + EXPECT_THAT(crc->crc, Eq(12345u)); + EXPECT_TRUE(rep->refcount.IsOne()); + CordRepCrc::Destroy(crc); +} + +TEST(CordRepCrc, NewExistingCrcNotShared) { + CordRep* rep = cordrep_testing::MakeFlat("Hello world"); + CordRepCrc* crc = CordRepCrc::New(rep, 12345); + CordRepCrc* new_crc = CordRepCrc::New(crc, 54321); + EXPECT_THAT(new_crc, Eq(crc)); + EXPECT_TRUE(new_crc->refcount.IsOne()); + EXPECT_THAT(new_crc->child, Eq(rep)); + EXPECT_THAT(new_crc->crc, Eq(54321u)); + EXPECT_TRUE(rep->refcount.IsOne()); + CordRepCrc::Destroy(new_crc); +} + +TEST(CordRepCrc, NewExistingCrcShared) { + CordRep* rep = cordrep_testing::MakeFlat("Hello world"); + CordRepCrc* crc = CordRepCrc::New(rep, 12345); + CordRep::Ref(crc); + CordRepCrc* new_crc = CordRepCrc::New(crc, 54321); + + EXPECT_THAT(new_crc, Ne(crc)); + EXPECT_TRUE(new_crc->refcount.IsOne()); + EXPECT_TRUE(crc->refcount.IsOne()); + EXPECT_FALSE(rep->refcount.IsOne()); + EXPECT_THAT(crc->child, Eq(rep)); + EXPECT_THAT(new_crc->child, Eq(rep)); + EXPECT_THAT(crc->crc, Eq(12345u)); + EXPECT_THAT(new_crc->crc, Eq(54321u)); + + CordRep::Unref(crc); + CordRep::Unref(new_crc); +} + +TEST(CordRepCrc, NewEmpty) { + CordRepCrc* crc = CordRepCrc::New(nullptr, 12345); + EXPECT_TRUE(crc->refcount.IsOne()); + EXPECT_THAT(crc->child, IsNull()); + EXPECT_THAT(crc->length, Eq(0u)); + EXPECT_THAT(crc->crc, Eq(12345u)); + EXPECT_TRUE(crc->refcount.IsOne()); + CordRepCrc::Destroy(crc); +} + +TEST(CordRepCrc, RemoveCrcNotCrc) { + CordRep* rep = cordrep_testing::MakeFlat("Hello world"); + CordRep* nocrc = RemoveCrcNode(rep); + EXPECT_THAT(nocrc, Eq(rep)); + CordRep::Unref(nocrc); +} + +TEST(CordRepCrc, RemoveCrcNotShared) { + CordRep* rep = cordrep_testing::MakeFlat("Hello world"); + CordRepCrc* crc = CordRepCrc::New(rep, 12345); + CordRep* nocrc = RemoveCrcNode(crc); + EXPECT_THAT(nocrc, Eq(rep)); + EXPECT_TRUE(rep->refcount.IsOne()); + CordRep::Unref(nocrc); +} + +TEST(CordRepCrc, RemoveCrcShared) { + CordRep* rep = cordrep_testing::MakeFlat("Hello world"); + CordRepCrc* crc = CordRepCrc::New(rep, 12345); + CordRep::Ref(crc); + CordRep* nocrc = RemoveCrcNode(crc); + EXPECT_THAT(nocrc, Eq(rep)); + EXPECT_FALSE(rep->refcount.IsOne()); + CordRep::Unref(nocrc); + CordRep::Unref(crc); +} + +} // namespace +} // namespace cord_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_flat.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_flat.h index 4d0f988697..e3e27fcd7c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_flat.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_flat.h @@ -20,6 +20,8 @@ #include #include +#include "absl/base/config.h" +#include "absl/base/macros.h" #include "absl/strings/internal/cord_internal.h" namespace absl { @@ -42,23 +44,45 @@ static constexpr size_t kMinFlatSize = 32; static constexpr size_t kMaxFlatSize = 4096; static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead; static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead; +static constexpr size_t kMaxLargeFlatSize = 256 * 1024; +static constexpr size_t kMaxLargeFlatLength = kMaxLargeFlatSize - kFlatOverhead; +// kTagBase should make the Size <--> Tag computation resilient +// against changes to the value of FLAT when we add a new tag.. +static constexpr uint8_t kTagBase = FLAT - 4; + +// Converts the provided rounded size to the corresponding tag constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) { - return static_cast((size <= 1024) ? size / 8 + 1 - : 129 + size / 32 - 1024 / 32); + return static_cast(size <= 512 ? kTagBase + size / 8 + : size <= 8192 + ? kTagBase + 512 / 8 + size / 64 - 512 / 64 + : kTagBase + 512 / 8 + ((8192 - 512) / 64) + + size / 4096 - 8192 / 4096); } -static_assert(kMinFlatSize / 8 + 1 >= FLAT, ""); -static_assert(AllocatedSizeToTagUnchecked(kMaxFlatSize) <= MAX_FLAT_TAG, ""); +// Converts the provided tag to the corresponding allocated size +constexpr size_t TagToAllocatedSize(uint8_t tag) { + return (tag <= kTagBase + 512 / 8) ? tag * 8 - kTagBase * 8 + : (tag <= kTagBase + (512 / 8) + ((8192 - 512) / 64)) + ? 512 + tag * 64 - kTagBase * 64 - 512 / 8 * 64 + : 8192 + tag * 4096 - kTagBase * 4096 - + ((512 / 8) + ((8192 - 512) / 64)) * 4096; +} -// Helper functions for rounded div, and rounding to exact sizes. -constexpr size_t DivUp(size_t n, size_t m) { return (n + m - 1) / m; } -constexpr size_t RoundUp(size_t n, size_t m) { return DivUp(n, m) * m; } +static_assert(AllocatedSizeToTagUnchecked(kMinFlatSize) == FLAT, ""); +static_assert(AllocatedSizeToTagUnchecked(kMaxLargeFlatSize) == MAX_FLAT_TAG, + ""); + +// RoundUp logically performs `((n + m - 1) / m) * m` to round up to the nearest +// multiple of `m`, optimized for the invariant that `m` is a power of 2. +constexpr size_t RoundUp(size_t n, size_t m) { + return (n + m - 1) & (0 - m); +} // Returns the size to the nearest equal or larger value that can be // expressed exactly as a tag value. inline size_t RoundUpForTag(size_t size) { - return RoundUp(size, (size <= 1024) ? 8 : 32); + return RoundUp(size, (size <= 512) ? 8 : (size <= 8192 ? 64 : 4096)); } // Converts the allocated size to a tag, rounding down if the size @@ -71,26 +95,26 @@ inline uint8_t AllocatedSizeToTag(size_t size) { return tag; } -// Converts the provided tag to the corresponding allocated size -constexpr size_t TagToAllocatedSize(uint8_t tag) { - return (tag <= 129) ? ((tag - 1) * 8) : (1024 + (tag - 129) * 32); -} - // Converts the provided tag to the corresponding available data length constexpr size_t TagToLength(uint8_t tag) { return TagToAllocatedSize(tag) - kFlatOverhead; } // Enforce that kMaxFlatSize maps to a well-known exact tag value. -static_assert(TagToAllocatedSize(225) == kMaxFlatSize, "Bad tag logic"); +static_assert(TagToAllocatedSize(MAX_FLAT_TAG) == kMaxLargeFlatSize, + "Bad tag logic"); struct CordRepFlat : public CordRep { + // Tag for explicit 'large flat' allocation + struct Large {}; + // Creates a new flat node. - static CordRepFlat* New(size_t len) { + template + static CordRepFlat* NewImpl(size_t len, Args... args ABSL_ATTRIBUTE_UNUSED) { if (len <= kMinFlatLength) { len = kMinFlatLength; - } else if (len > kMaxFlatLength) { - len = kMaxFlatLength; + } else if (len > max_flat_size - kFlatOverhead) { + len = max_flat_size - kFlatOverhead; } // Round size up so it matches a size we can exactly express in a tag. @@ -101,6 +125,12 @@ struct CordRepFlat : public CordRep { return rep; } + static CordRepFlat* New(size_t len) { return NewImpl(len); } + + static CordRepFlat* New(Large, size_t len) { + return NewImpl(len); + } + // Deletes a CordRepFlat instance created previously through a call to New(). // Flat CordReps are allocated and constructed with raw ::operator new and // placement new, and must be destructed and deallocated accordingly. @@ -117,6 +147,17 @@ struct CordRepFlat : public CordRep { #endif } + // Create a CordRepFlat containing `data`, with an optional additional + // extra capacity of up to `extra` bytes. Requires that `data.size()` + // is less than kMaxFlatLength. + static CordRepFlat* Create(absl::string_view data, size_t extra = 0) { + assert(data.size() <= kMaxFlatLength); + CordRepFlat* flat = New(data.size() + (std::min)(extra, kMaxFlatLength)); + memcpy(flat->Data(), data.data(), data.size()); + flat->length = data.size(); + return flat; + } + // Returns a pointer to the data inside this flat rep. char* Data() { return reinterpret_cast(storage); } const char* Data() const { return reinterpret_cast(storage); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc index db1f63fa67..af2fc7683d 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc @@ -129,7 +129,9 @@ class CordRepRing::Filler { index_type pos_; }; -constexpr size_t CordRepRing::kMaxCapacity; // NOLINT: needed for c++11 +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr size_t CordRepRing::kMaxCapacity; +#endif bool CordRepRing::IsValid(std::ostream& output) const { if (capacity_ == 0) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.h index 44db849430..2000e21ea0 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.h @@ -383,8 +383,8 @@ class CordRepRing : public CordRep { // Destroys the provided ring buffer, decrementing the reference count of all // contained child CordReps. The provided 1\`rep` should have a ref count of - // one (pre decrement destroy call observing `refcount.IsOne()`) or zero (post - // decrement destroy call observing `!refcount.Decrement()`). + // one (pre decrement destroy call observing `refcount.IsOne()`) or zero + // (post decrement destroy call observing `!refcount.Decrement()`). static void Destroy(CordRepRing* rep); // Returns a mutable reference to the logical end position array. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_test_util.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_test_util.h index bc500064c9..18a0a19544 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_test_util.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cord_rep_test_util.h @@ -42,18 +42,6 @@ inline cord_internal::CordRepSubstring* MakeSubstring( return sub; } -inline cord_internal::CordRepConcat* MakeConcat(cord_internal::CordRep* left, - cord_internal::CordRep* right, - int depth = 0) { - auto* concat = new cord_internal::CordRepConcat; - concat->tag = cord_internal::CONCAT; - concat->length = left->length + right->length; - concat->left = left; - concat->right = right; - concat->set_depth(depth); - return concat; -} - inline cord_internal::CordRepFlat* MakeFlat(absl::string_view value) { assert(value.length() <= cord_internal::kMaxFlatLength); auto* flat = cord_internal::CordRepFlat::New(value.length()); @@ -115,6 +103,38 @@ inline cord_internal::CordRepBtree* CordRepBtreeFromFlats( return node; } +template +inline void CordVisitReps(cord_internal::CordRep* rep, Fn&& fn) { + fn(rep); + while (rep->tag == cord_internal::SUBSTRING) { + rep = rep->substring()->child; + fn(rep); + } + if (rep->tag == cord_internal::BTREE) { + for (cord_internal::CordRep* edge : rep->btree()->Edges()) { + CordVisitReps(edge, fn); + } + } +} + +template +inline std::vector CordCollectRepsIf( + Predicate&& predicate, cord_internal::CordRep* rep) { + std::vector reps; + CordVisitReps(rep, [&reps, &predicate](cord_internal::CordRep* rep) { + if (predicate(rep)) reps.push_back(rep); + }); + return reps; +} + +inline std::vector CordCollectReps( + cord_internal::CordRep* rep) { + std::vector reps; + auto fn = [&reps](cord_internal::CordRep* rep) { reps.push_back(rep); }; + CordVisitReps(rep, fn); + return reps; +} + inline void CordToString(cord_internal::CordRep* rep, std::string& s) { size_t offset = 0; size_t length = rep->length; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_functions.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_functions.cc index 48369933c9..20d314f03c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_functions.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_functions.cc @@ -21,8 +21,8 @@ #include "absl/base/attributes.h" #include "absl/base/config.h" -#include "absl/base/internal/exponential_biased.h" #include "absl/base/internal/raw_logging.h" +#include "absl/profiling/internal/exponential_biased.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -48,7 +48,7 @@ constexpr int64_t kIntervalIfDisabled = 1 << 16; ABSL_ATTRIBUTE_NOINLINE bool cordz_should_profile_slow() { - thread_local absl::base_internal::ExponentialBiased + thread_local absl::profiling_internal::ExponentialBiased exponential_biased_generator; int32_t mean_interval = get_cordz_mean_interval(); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_functions.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_functions.h index c9ba14508a..93f46ec6fe 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_functions.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_functions.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_STRINGS_CORDZ_FUNCTIONS_H_ -#define ABSL_STRINGS_CORDZ_FUNCTIONS_H_ +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_ #include @@ -82,4 +82,4 @@ inline void cordz_set_next_sample_for_testing(int64_t) {} ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_STRINGS_CORDZ_FUNCTIONS_H_ +#endif // ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_handle.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_handle.h index 5df53c782a..3c800b433f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_handle.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_handle.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_STRINGS_CORDZ_HANDLE_H_ -#define ABSL_STRINGS_CORDZ_HANDLE_H_ +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_ #include #include @@ -128,4 +128,4 @@ class CordzSnapshot : public CordzHandle { ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_STRINGS_CORDZ_HANDLE_H_ +#endif // ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info.cc index 5c18bbc566..530f33bed4 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info.cc @@ -20,6 +20,7 @@ #include "absl/debugging/stacktrace.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_btree.h" +#include "absl/strings/internal/cord_rep_crc.h" #include "absl/strings/internal/cord_rep_ring.h" #include "absl/strings/internal/cordz_handle.h" #include "absl/strings/internal/cordz_statistics.h" @@ -33,7 +34,9 @@ namespace cord_internal { using ::absl::base_internal::SpinLockHolder; -constexpr int CordzInfo::kMaxStackDepth; +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr size_t CordzInfo::kMaxStackDepth; +#endif ABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{absl::kConstInit}; @@ -81,6 +84,14 @@ class CordRepAnalyzer { size_t refcount = rep->refcount.Get(); RepRef repref{rep, (refcount > 1) ? refcount - 1 : 1}; + // Process the top level CRC node, if present. + if (repref.rep->tag == CRC) { + statistics_.node_count++; + statistics_.node_counts.crc++; + memory_usage_.Add(sizeof(CordRepCrc), repref.refcount); + repref = repref.Child(repref.rep->crc()->child); + } + // Process all top level linear nodes (substrings and flats). repref = CountLinearReps(repref, memory_usage_); @@ -89,8 +100,6 @@ class CordRepAnalyzer { AnalyzeRing(repref); } else if (repref.rep->tag == BTREE) { AnalyzeBtree(repref); - } else if (repref.rep->tag == CONCAT) { - AnalyzeConcat(repref); } else { // We should have either a concat, btree, or ring node if not null. assert(false); @@ -132,14 +141,6 @@ class CordRepAnalyzer { } }; - // Returns `rr` if `rr.rep` is not null and a CONCAT type. - // Asserts that `rr.rep` is a concat node or null. - static RepRef AssertConcat(RepRef repref) { - const CordRep* rep = repref.rep; - assert(rep == nullptr || rep->tag == CONCAT); - return (rep != nullptr && rep->tag == CONCAT) ? repref : RepRef{nullptr, 0}; - } - // Counts a flat of the provide allocated size void CountFlat(size_t size) { statistics_.node_count++; @@ -192,34 +193,6 @@ class CordRepAnalyzer { return rep; } - // Analyzes the provided concat node in a flattened recursive way. - void AnalyzeConcat(RepRef rep) { - absl::InlinedVector pending; - - while (rep.rep != nullptr) { - const CordRepConcat* concat = rep.rep->concat(); - RepRef left = rep.Child(concat->left); - RepRef right = rep.Child(concat->right); - - statistics_.node_count++; - statistics_.node_counts.concat++; - memory_usage_.Add(sizeof(CordRepConcat), rep.refcount); - - right = AssertConcat(CountLinearReps(right, memory_usage_)); - rep = AssertConcat(CountLinearReps(left, memory_usage_)); - if (rep.rep != nullptr) { - if (right.rep != nullptr) { - pending.push_back(right); - } - } else if (right.rep != nullptr) { - rep = right; - } else if (!pending.empty()) { - rep = pending.back(); - pending.pop_back(); - } - } - } - // Analyzes the provided ring. void AnalyzeRing(RepRef rep) { statistics_.node_count++; @@ -318,7 +291,7 @@ CordzInfo::MethodIdentifier CordzInfo::GetParentMethod(const CordzInfo* src) { : src->method_; } -int CordzInfo::FillParentStack(const CordzInfo* src, void** stack) { +size_t CordzInfo::FillParentStack(const CordzInfo* src, void** stack) { assert(stack); if (src == nullptr) return 0; if (src->parent_stack_depth_) { @@ -329,11 +302,14 @@ int CordzInfo::FillParentStack(const CordzInfo* src, void** stack) { return src->stack_depth_; } -CordzInfo::CordzInfo(CordRep* rep, const CordzInfo* src, +CordzInfo::CordzInfo(CordRep* rep, + const CordzInfo* src, MethodIdentifier method) : rep_(rep), - stack_depth_(absl::GetStackTrace(stack_, /*max_depth=*/kMaxStackDepth, - /*skip_count=*/1)), + stack_depth_( + static_cast(absl::GetStackTrace(stack_, + /*max_depth=*/kMaxStackDepth, + /*skip_count=*/1))), parent_stack_depth_(FillParentStack(src, parent_stack_)), method_(method), parent_method_(GetParentMethod(src)), diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info.h index 026d5b9981..17eaa91c77 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_STRINGS_CORDZ_INFO_H_ -#define ABSL_STRINGS_CORDZ_INFO_H_ +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_ #include #include @@ -196,7 +196,7 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle { std::atomic head ABSL_GUARDED_BY(mutex){nullptr}; }; - static constexpr int kMaxStackDepth = 64; + static constexpr size_t kMaxStackDepth = 64; explicit CordzInfo(CordRep* rep, const CordzInfo* src, MethodIdentifier method); @@ -216,7 +216,7 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle { // `stack_` depending on `parent_stack_` being empty, returning the size of // the parent stack. // Returns 0 if `src` is null. - static int FillParentStack(const CordzInfo* src, void** stack); + static size_t FillParentStack(const CordzInfo* src, void** stack); void ODRCheck() const { #ifndef NDEBUG @@ -244,8 +244,8 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle { void* stack_[kMaxStackDepth]; void* parent_stack_[kMaxStackDepth]; - const int stack_depth_; - const int parent_stack_depth_; + const size_t stack_depth_; + const size_t parent_stack_depth_; const MethodIdentifier method_; const MethodIdentifier parent_method_; CordzUpdateTracker update_tracker_; @@ -295,4 +295,4 @@ inline CordRep* CordzInfo::RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_) { ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_STRINGS_CORDZ_INFO_H_ +#endif // ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info_statistics_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info_statistics_test.cc index 7430d281ca..6d6feb52bd 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info_statistics_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info_statistics_test.cc @@ -22,6 +22,7 @@ #include "absl/strings/cord.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/cord_rep_btree.h" +#include "absl/strings/internal/cord_rep_crc.h" #include "absl/strings/internal/cord_rep_flat.h" #include "absl/strings/internal/cord_rep_ring.h" #include "absl/strings/internal/cordz_info.h" @@ -61,7 +62,7 @@ CordRepFlat* Flat(size_t size) { } // Creates an external of the specified length -CordRepExternal* External(int length = 512) { +CordRepExternal* External(size_t length = 512) { return static_cast( NewExternalRep(absl::string_view("", length), [](absl::string_view) {})); } @@ -75,16 +76,6 @@ CordRepSubstring* Substring(CordRep* rep) { return substring; } -// Creates a concat on the provided reps -CordRepConcat* Concat(CordRep* left, CordRep* right) { - auto* concat = new CordRepConcat; - concat->length = left->length + right->length; - concat->tag = CONCAT; - concat->left = left; - concat->right = right; - return concat; -} - // Reference count helper struct RefHelper { std::vector refs; @@ -157,10 +148,6 @@ double FairShareImpl(CordRep* rep, size_t ref) { rep->ring()->ForEach([&](CordRepRing::index_type i) { self += FairShareImpl(rep->ring()->entry_child(i), 1); }); - } else if (rep->tag == CONCAT) { - self = SizeOf(rep->concat()); - children = FairShareImpl(rep->concat()->left, ref) + - FairShareImpl(rep->concat()->right, ref); } else { assert(false); } @@ -306,80 +293,6 @@ TEST(CordzInfoStatisticsTest, SharedSubstring) { EXPECT_THAT(SampleCord(substring), EqStatistics(expected)); } -TEST(CordzInfoStatisticsTest, Concat) { - RefHelper ref; - auto* flat1 = Flat(300); - auto* flat2 = Flat(2000); - auto* concat = ref.NeedsUnref(Concat(flat1, flat2)); - - CordzStatistics expected; - expected.size = concat->length; - expected.estimated_memory_usage = - SizeOf(concat) + SizeOf(flat1) + SizeOf(flat2); - expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage; - expected.node_count = 3; - expected.node_counts.flat = 2; - expected.node_counts.flat_512 = 1; - expected.node_counts.concat = 1; - - EXPECT_THAT(SampleCord(concat), EqStatistics(expected)); -} - -TEST(CordzInfoStatisticsTest, DeepConcat) { - RefHelper ref; - auto* flat1 = Flat(300); - auto* flat2 = Flat(2000); - auto* flat3 = Flat(400); - auto* external = External(3000); - auto* substring = Substring(external); - auto* concat1 = Concat(flat1, flat2); - auto* concat2 = Concat(flat3, substring); - auto* concat = ref.NeedsUnref(Concat(concat1, concat2)); - - CordzStatistics expected; - expected.size = concat->length; - expected.estimated_memory_usage = SizeOf(concat) * 3 + SizeOf(flat1) + - SizeOf(flat2) + SizeOf(flat3) + - SizeOf(external) + SizeOf(substring); - expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage; - - expected.node_count = 8; - expected.node_counts.flat = 3; - expected.node_counts.flat_512 = 2; - expected.node_counts.external = 1; - expected.node_counts.concat = 3; - expected.node_counts.substring = 1; - - EXPECT_THAT(SampleCord(concat), EqStatistics(expected)); -} - -TEST(CordzInfoStatisticsTest, DeepSharedConcat) { - RefHelper ref; - auto* flat1 = Flat(40); - auto* flat2 = ref.Ref(Flat(2000), 4); - auto* flat3 = Flat(70); - auto* external = ref.Ref(External(3000)); - auto* substring = ref.Ref(Substring(external), 3); - auto* concat1 = Concat(flat1, flat2); - auto* concat2 = Concat(flat3, substring); - auto* concat = ref.Ref(ref.NeedsUnref(Concat(concat1, concat2))); - - CordzStatistics expected; - expected.size = concat->length; - expected.estimated_memory_usage = SizeOf(concat) * 3 + SizeOf(flat1) + - SizeOf(flat2) + SizeOf(flat3) + - SizeOf(external) + SizeOf(substring); - expected.estimated_fair_share_memory_usage = FairShare(concat); - expected.node_count = 8; - expected.node_counts.flat = 3; - expected.node_counts.flat_64 = 1; - expected.node_counts.flat_128 = 1; - expected.node_counts.external = 1; - expected.node_counts.concat = 3; - expected.node_counts.substring = 1; - - EXPECT_THAT(SampleCord(concat), EqStatistics(expected)); -} TEST(CordzInfoStatisticsTest, Ring) { RefHelper ref; @@ -439,7 +352,7 @@ TEST(CordzInfoStatisticsTest, SharedSubstringRing) { } TEST(CordzInfoStatisticsTest, BtreeLeaf) { - ASSERT_THAT(CordRepBtree::kMaxCapacity, Ge(3)); + ASSERT_THAT(CordRepBtree::kMaxCapacity, Ge(3u)); RefHelper ref; auto* flat1 = Flat(2000); auto* flat2 = Flat(200); @@ -479,7 +392,7 @@ TEST(CordzInfoStatisticsTest, BtreeNodeShared) { RefHelper ref; static constexpr int leaf_count = 3; const size_t flat3_count = CordRepBtree::kMaxCapacity - 3; - ASSERT_THAT(flat3_count, Ge(0)); + ASSERT_THAT(flat3_count, Ge(0u)); CordRepBtree* tree = nullptr; size_t mem_size = 0; @@ -535,6 +448,23 @@ TEST(CordzInfoStatisticsTest, BtreeNodeShared) { EXPECT_THAT(SampleCord(tree), EqStatistics(expected)); } +TEST(CordzInfoStatisticsTest, Crc) { + RefHelper ref; + auto* left = Flat(1000); + auto* crc = ref.NeedsUnref(CordRepCrc::New(left, 12345)); + + CordzStatistics expected; + expected.size = left->length; + expected.estimated_memory_usage = SizeOf(crc) + SizeOf(left); + expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage; + expected.node_count = 2; + expected.node_counts.flat = 1; + expected.node_counts.flat_1k = 1; + expected.node_counts.crc = 1; + + EXPECT_THAT(SampleCord(crc), EqStatistics(expected)); +} + TEST(CordzInfoStatisticsTest, ThreadSafety) { Notification stop; static constexpr int kNumThreads = 8; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info_test.cc index b98343ae79..cd226c3ed5 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_info_test.cc @@ -124,7 +124,7 @@ TEST(CordzInfoTest, UntrackCord) { CordzInfo* info = data.data.cordz_info(); info->Untrack(); - EXPECT_THAT(DeleteQueue(), SizeIs(0)); + EXPECT_THAT(DeleteQueue(), SizeIs(0u)); } TEST(CordzInfoTest, UntrackCordWithSnapshot) { @@ -263,8 +263,9 @@ TEST(CordzInfoTest, StackV2) { // resultant formatted stack will be "", but that still equals the stack // recorded in CordzInfo, which is also empty. The skip_count is 1 so that the // line number of the current stack isn't included in the HasSubstr check. - local_stack.resize(absl::GetStackTrace(local_stack.data(), kMaxStackDepth, - /*skip_count=*/1)); + local_stack.resize(static_cast( + absl::GetStackTrace(local_stack.data(), kMaxStackDepth, + /*skip_count=*/1))); std::string got_stack = FormatStack(info->GetStack()); std::string expected_stack = FormatStack(local_stack); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_sample_token.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_sample_token.h index 28a1d70ccc..b58022c3f9 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_sample_token.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_sample_token.h @@ -16,8 +16,8 @@ #include "absl/strings/internal/cordz_handle.h" #include "absl/strings/internal/cordz_info.h" -#ifndef ABSL_STRINGS_CORDZ_SAMPLE_TOKEN_H_ -#define ABSL_STRINGS_CORDZ_SAMPLE_TOKEN_H_ +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_ namespace absl { ABSL_NAMESPACE_BEGIN @@ -94,4 +94,4 @@ class CordzSampleToken : public CordzSnapshot { ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_STRINGS_CORDZ_SAMPLE_TOKEN_H_ +#endif // ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_sample_token_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_sample_token_test.cc index 9f54301d68..6be1770d59 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_sample_token_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_sample_token_test.cc @@ -167,7 +167,7 @@ TEST(CordzSampleTokenTest, MultiThreaded) { if (cord.data.is_profiled()) { // 1) Untrack cord.data.cordz_info()->Untrack(); - cord.data.clear_cordz_info();; + cord.data.clear_cordz_info(); } else { // 2) Track CordzInfo::TrackCord(cord.data, kTrackCordMethod); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_statistics.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_statistics.h index da4c7dbb8c..9f558df494 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_statistics.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_statistics.h @@ -41,15 +41,16 @@ struct CordzStatistics { size_t concat = 0; // #concat reps size_t ring = 0; // #ring buffer reps size_t btree = 0; // #btree reps + size_t crc = 0; // #crc reps }; // The size of the cord in bytes. This matches the result of Cord::size(). - int64_t size = 0; + size_t size = 0; // The estimated memory used by the sampled cord. This value matches the // value as reported by Cord::EstimatedMemoryUsage(). // A value of 0 implies the property has not been recorded. - int64_t estimated_memory_usage = 0; + size_t estimated_memory_usage = 0; // The effective memory used by the sampled cord, inversely weighted by the // effective indegree of each allocated node. This is a representation of the @@ -58,14 +59,14 @@ struct CordzStatistics { // by multiple Cord instances, and for cases where a Cord includes the same // node multiple times (either directly or indirectly). // A value of 0 implies the property has not been recorded. - int64_t estimated_fair_share_memory_usage = 0; + size_t estimated_fair_share_memory_usage = 0; // The total number of nodes referenced by this cord. // For ring buffer Cords, this includes the 'ring buffer' node. // For btree Cords, this includes all 'CordRepBtree' tree nodes as well as all // the substring, flat and external nodes referenced by the tree. // A value of 0 implies the property has not been recorded. - int64_t node_count = 0; + size_t node_count = 0; // Detailed node counts per type NodeCounts node_counts; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_update_tracker.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_update_tracker.h index 02efcc3a2d..c5170662bf 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_update_tracker.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_update_tracker.h @@ -40,6 +40,7 @@ class CordzUpdateTracker { enum MethodIdentifier { kUnknown, kAppendCord, + kAppendCordBuffer, kAppendExternalMemory, kAppendString, kAssignCord, @@ -49,15 +50,18 @@ class CordzUpdateTracker { kConstructorString, kCordReader, kFlatten, + kGetAppendBuffer, kGetAppendRegion, kMakeCordFromExternal, kMoveAppendCord, kMoveAssignCord, kMovePrependCord, kPrependCord, + kPrependCordBuffer, kPrependString, kRemovePrefix, kRemoveSuffix, + kSetExpectedChecksum, kSubCord, // kNumMethods defines the number of entries: must be the last entry. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_update_tracker_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_update_tracker_test.cc index fcd17df7a0..9b1f798659 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_update_tracker_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/cordz_update_tracker_test.cc @@ -38,6 +38,7 @@ using Methods = std::array; Methods AllMethods() { return Methods{Method::kUnknown, Method::kAppendCord, + Method::kAppendCordBuffer, Method::kAppendExternalMemory, Method::kAppendString, Method::kAssignCord, @@ -47,15 +48,18 @@ Methods AllMethods() { Method::kConstructorString, Method::kCordReader, Method::kFlatten, + Method::kGetAppendBuffer, Method::kGetAppendRegion, Method::kMakeCordFromExternal, Method::kMoveAppendCord, Method::kMoveAssignCord, Method::kMovePrependCord, Method::kPrependCord, + Method::kPrependCordBuffer, Method::kPrependString, Method::kRemovePrefix, Method::kRemoveSuffix, + Method::kSetExpectedChecksum, Method::kSubCord}; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.cc new file mode 100644 index 0000000000..a084568fa8 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.cc @@ -0,0 +1,93 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/internal/damerau_levenshtein_distance.h" + +#include +#include +#include + +#include "absl/strings/string_view.h" +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace strings_internal { +// Calculate DamerauLevenshtein (adjacent transpositions) distance +// between two strings, +// https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance. The +// algorithm follows the condition that no substring is edited more than once. +// While this can reduce is larger distance, it's a) a much simpler algorithm +// and b) more realistic for the case that typographic mistakes should be +// detected. +// When the distance is larger than cutoff, or one of the strings has more +// than MAX_SIZE=100 characters, the code returns min(MAX_SIZE, cutoff) + 1. +uint8_t CappedDamerauLevenshteinDistance(absl::string_view s1, + absl::string_view s2, uint8_t cutoff) { + const uint8_t MAX_SIZE = 100; + const uint8_t _cutoff = std::min(MAX_SIZE, cutoff); + const uint8_t cutoff_plus_1 = static_cast(_cutoff + 1); + + if (s1.size() > s2.size()) std::swap(s1, s2); + if (s1.size() + _cutoff < s2.size() || s2.size() > MAX_SIZE) + return cutoff_plus_1; + + if (s1.empty()) + return static_cast(s2.size()); + + // Lower diagonal bound: y = x - lower_diag + const uint8_t lower_diag = + _cutoff - static_cast(s2.size() - s1.size()); + // Upper diagonal bound: y = x + upper_diag + const uint8_t upper_diag = _cutoff; + + // d[i][j] is the number of edits required to convert s1[0, i] to s2[0, j] + std::array, MAX_SIZE + 2> d; + std::iota(d[0].begin(), d[0].begin() + upper_diag + 1, 0); + d[0][cutoff_plus_1] = cutoff_plus_1; + for (size_t i = 1; i <= s1.size(); ++i) { + // Deduce begin of relevant window. + size_t j_begin = 1; + if (i > lower_diag) { + j_begin = i - lower_diag; + d[i][j_begin - 1] = cutoff_plus_1; + } else { + d[i][0] = static_cast(i); + } + + // Deduce end of relevant window. + size_t j_end = i + upper_diag; + if (j_end > s2.size()) { + j_end = s2.size(); + } else { + d[i][j_end + 1] = cutoff_plus_1; + } + + for (size_t j = j_begin; j <= j_end; ++j) { + const uint8_t deletion_distance = d[i - 1][j] + 1; + const uint8_t insertion_distance = d[i][j - 1] + 1; + const uint8_t mismatched_tail_cost = s1[i - 1] == s2[j - 1] ? 0 : 1; + const uint8_t mismatch_distance = d[i - 1][j - 1] + mismatched_tail_cost; + uint8_t transposition_distance = _cutoff + 1; + if (i > 1 && j > 1 && s1[i - 1] == s2[j - 2] && s1[i - 2] == s2[j - 1]) + transposition_distance = d[i - 2][j - 2] + 1; + d[i][j] = std::min({cutoff_plus_1, deletion_distance, insertion_distance, + mismatch_distance, transposition_distance}); + } + } + return d[s1.size()][s2.size()]; +} + +} // namespace strings_internal + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.h new file mode 100644 index 0000000000..1a9684254a --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.h @@ -0,0 +1,35 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_ +#define ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_ + +#include +#include + +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace strings_internal { +// Calculate DamerauLevenshtein distance between two strings. +// When the distance is larger than cutoff, the code just returns cutoff + 1. +uint8_t CappedDamerauLevenshteinDistance(absl::string_view s1, + absl::string_view s2, uint8_t cutoff); + +} // namespace strings_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance_test.cc new file mode 100644 index 0000000000..a342b7db54 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance_test.cc @@ -0,0 +1,99 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/internal/damerau_levenshtein_distance.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace { + +using absl::strings_internal::CappedDamerauLevenshteinDistance; + +TEST(Distance, TestDistances) { + EXPECT_THAT(CappedDamerauLevenshteinDistance("ab", "ab", 6), uint8_t{0}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("a", "b", 6), uint8_t{1}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("ca", "abc", 6), uint8_t{3}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "ad", 6), uint8_t{2}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "cadb", 6), uint8_t{4}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "bdac", 6), uint8_t{4}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("ab", "ab", 0), uint8_t{0}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("", "", 0), uint8_t{0}); + // combinations for 3-character strings: + // 1, 2, 3 removals, insertions or replacements and transpositions + EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", "abc", 6), uint8_t{0}); + for (auto res : + {"", "ca", "efg", "ea", "ce", "ceb", "eca", "cae", "cea", "bea"}) { + EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{3}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{3}); + } + for (auto res : + {"a", "b", "c", "ba", "cb", "bca", "cab", "cba", "ace", + "efc", "ebf", "aef", "ae", "be", "eb", "ec", "ecb", "bec", + "bce", "cbe", "ace", "eac", "aeb", "bae", "eab", "eba"}) { + EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{2}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{2}); + } + for (auto res : {"ab", "ac", "bc", "acb", "bac", "ebc", "aec", "abe"}) { + EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{1}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{1}); + } +} + +TEST(Distance, TestCutoff) { + // Returing cutoff + 1 if the value is larger than cutoff or string longer + // than MAX_SIZE. + EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 3), uint8_t{3}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 2), uint8_t{3}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 1), uint8_t{2}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("abcdefg", "a", 2), uint8_t{3}); + EXPECT_THAT(CappedDamerauLevenshteinDistance("a", "abcde", 2), uint8_t{3}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(102, 'a'), + std::string(102, 'a'), 105), + uint8_t{101}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'), + std::string(100, 'a'), 100), + uint8_t{0}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'), + std::string(100, 'b'), 100), + uint8_t{100}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'), + std::string(99, 'a'), 2), + uint8_t{1}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'), + std::string(101, 'a'), 2), + uint8_t{3}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'), + std::string(101, 'a'), 2), + uint8_t{3}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX + 1, 'a'), + std::string(UINT8_MAX + 1, 'b'), + UINT8_MAX), + uint8_t{101}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX - 1, 'a'), + std::string(UINT8_MAX - 1, 'b'), + UINT8_MAX), + uint8_t{101}); + EXPECT_THAT( + CappedDamerauLevenshteinDistance(std::string(UINT8_MAX, 'a'), + std::string(UINT8_MAX, 'b'), UINT8_MAX), + uint8_t{101}); + EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX - 1, 'a'), + std::string(UINT8_MAX - 1, 'a'), + UINT8_MAX), + uint8_t{101}); +} +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/escaping.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/escaping.cc index c5271286ad..cfea096111 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/escaping.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/escaping.cc @@ -21,7 +21,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace strings_internal { -const char kBase64Chars[] = +ABSL_CONST_INIT const char kBase64Chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) { @@ -102,8 +102,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest, } } // To save time, we didn't update szdest or szsrc in the loop. So do it now. - szdest = limit_dest - cur_dest; - szsrc = limit_src - cur_src; + szdest = static_cast(limit_dest - cur_dest); + szsrc = static_cast(limit_src - cur_src); /* now deal with the tail (<=3 bytes) */ switch (szsrc) { @@ -154,7 +154,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest, // the loop because the loop above always reads 4 bytes, and the fourth // byte is past the end of the input. if (szdest < 4) return 0; - uint32_t in = (cur_src[0] << 16) + absl::big_endian::Load16(cur_src + 1); + uint32_t in = + (uint32_t{cur_src[0]} << 16) + absl::big_endian::Load16(cur_src + 1); cur_dest[0] = base64[in >> 18]; in &= 0x3FFFF; cur_dest[1] = base64[in >> 12]; @@ -172,7 +173,7 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest, ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc); break; } - return (cur_dest - dest); + return static_cast(cur_dest - dest); } } // namespace strings_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/has_absl_stringify.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/has_absl_stringify.h new file mode 100644 index 0000000000..55a0850829 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/has_absl_stringify.h @@ -0,0 +1,55 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_ +#define ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_ +#include +#include +#include + +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace strings_internal { + +// This is an empty class not intended to be used. It exists so that +// `HasAbslStringify` can reference a universal class rather than needing to be +// copied for each new sink. +class UnimplementedSink { + public: + void Append(size_t count, char ch); + + void Append(string_view v); + + // Support `absl::Format(&sink, format, args...)`. + friend void AbslFormatFlush(UnimplementedSink* sink, absl::string_view v); +}; + +template +struct HasAbslStringify : std::false_type {}; + +template +struct HasAbslStringify< + T, std::enable_if_t(), + std::declval()))>::value>> : std::true_type {}; + +} // namespace strings_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/memutil.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/memutil.cc index 2519c6881e..44996a7549 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/memutil.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/memutil.cc @@ -54,10 +54,11 @@ size_t memspn(const char* s, size_t slen, const char* accept) { cont: c = *p++; - if (slen-- == 0) return p - 1 - s; + if (slen-- == 0) + return static_cast(p - 1 - s); for (spanp = accept; (sc = *spanp++) != '\0';) if (sc == c) goto cont; - return p - 1 - s; + return static_cast(p - 1 - s); } size_t memcspn(const char* s, size_t slen, const char* reject) { @@ -68,9 +69,10 @@ size_t memcspn(const char* s, size_t slen, const char* reject) { while (slen-- != 0) { c = *p++; for (spanp = reject; (sc = *spanp++) != '\0';) - if (sc == c) return p - 1 - s; + if (sc == c) + return static_cast(p - 1 - s); } - return p - s; + return static_cast(p - s); } char* mempbrk(const char* s, size_t slen, const char* accept) { @@ -97,8 +99,9 @@ const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle, const char* hayend = phaystack + haylen - neelen + 1; // A static cast is used here to work around the fact that memchr returns // a void* on Posix-compliant systems and const void* on Windows. - while ((match = static_cast( - memchr(phaystack, pneedle[0], hayend - phaystack)))) { + while ( + (match = static_cast(memchr( + phaystack, pneedle[0], static_cast(hayend - phaystack))))) { if (memcmp(match, pneedle, neelen) == 0) return match; else diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream.cc index 05324c780c..a0e5ec08c2 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream.cc @@ -14,20 +14,27 @@ #include "absl/strings/internal/ostringstream.h" +#include +#include +#include +#include + namespace absl { ABSL_NAMESPACE_BEGIN namespace strings_internal { -OStringStream::Buf::int_type OStringStream::overflow(int c) { - assert(s_); - if (!Buf::traits_type::eq_int_type(c, Buf::traits_type::eof())) - s_->push_back(static_cast(c)); +OStringStream::Streambuf::int_type OStringStream::Streambuf::overflow(int c) { + assert(str_); + if (!std::streambuf::traits_type::eq_int_type( + c, std::streambuf::traits_type::eof())) + str_->push_back(static_cast(c)); return 1; } -std::streamsize OStringStream::xsputn(const char* s, std::streamsize n) { - assert(s_); - s_->append(s, n); +std::streamsize OStringStream::Streambuf::xsputn(const char* s, + std::streamsize n) { + assert(str_); + str_->append(s, static_cast(n)); return n; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream.h index d25d60473f..c0e237dbe8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream.h @@ -16,11 +16,13 @@ #define ABSL_STRINGS_INTERNAL_OSTRINGSTREAM_H_ #include +#include #include #include #include +#include -#include "absl/base/port.h" +#include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -60,26 +62,49 @@ namespace strings_internal { // strm << 3.14; // // Note: flush() has no effect. No reason to call it. -class OStringStream : private std::basic_streambuf, public std::ostream { +class OStringStream final : public std::ostream { public: // The argument can be null, in which case you'll need to call str(p) with a // non-null argument before you can write to the stream. // // The destructor of OStringStream doesn't use the std::string. It's OK to // destroy the std::string before the stream. - explicit OStringStream(std::string* s) : std::ostream(this), s_(s) {} + explicit OStringStream(std::string* str) + : std::ostream(&buf_), buf_(str) {} + OStringStream(OStringStream&& that) + : std::ostream(std::move(static_cast(that))), + buf_(that.buf_) { + rdbuf(&buf_); + } + OStringStream& operator=(OStringStream&& that) { + std::ostream::operator=(std::move(static_cast(that))); + buf_ = that.buf_; + rdbuf(&buf_); + return *this; + } - std::string* str() { return s_; } - const std::string* str() const { return s_; } - void str(std::string* s) { s_ = s; } + std::string* str() { return buf_.str(); } + const std::string* str() const { return buf_.str(); } + void str(std::string* str) { buf_.str(str); } private: - using Buf = std::basic_streambuf; + class Streambuf final : public std::streambuf { + public: + explicit Streambuf(std::string* str) : str_(str) {} + Streambuf(const Streambuf&) = default; + Streambuf& operator=(const Streambuf&) = default; - Buf::int_type overflow(int c) override; - std::streamsize xsputn(const char* s, std::streamsize n) override; + std::string* str() { return str_; } + const std::string* str() const { return str_; } + void str(std::string* str) { str_ = str; } - std::string* s_; + protected: + int_type overflow(int c) override; + std::streamsize xsputn(const char* s, std::streamsize n) override; + + private: + std::string* str_; + } buf_; }; } // namespace strings_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream_test.cc index 2879e50eb3..ef3ad573e9 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/ostringstream_test.cc @@ -14,10 +14,12 @@ #include "absl/strings/internal/ostringstream.h" +#include #include #include #include #include +#include #include "gtest/gtest.h" @@ -29,24 +31,51 @@ TEST(OStringStream, IsOStream) { ""); } -TEST(OStringStream, ConstructDestroy) { +TEST(OStringStream, ConstructNullptr) { + absl::strings_internal::OStringStream strm(nullptr); + EXPECT_EQ(nullptr, strm.str()); +} + +TEST(OStringStream, ConstructStr) { + std::string s = "abc"; { - absl::strings_internal::OStringStream strm(nullptr); - EXPECT_EQ(nullptr, strm.str()); + absl::strings_internal::OStringStream strm(&s); + EXPECT_EQ(&s, strm.str()); } + EXPECT_EQ("abc", s); +} + +TEST(OStringStream, Destroy) { + std::unique_ptr s(new std::string); + absl::strings_internal::OStringStream strm(s.get()); + s.reset(); +} + +TEST(OStringStream, MoveConstruct) { + std::string s = "abc"; { - std::string s = "abc"; - { - absl::strings_internal::OStringStream strm(&s); - EXPECT_EQ(&s, strm.str()); - } - EXPECT_EQ("abc", s); + absl::strings_internal::OStringStream strm1(&s); + strm1 << std::hex << 16; + EXPECT_EQ(&s, strm1.str()); + absl::strings_internal::OStringStream strm2(std::move(strm1)); + strm2 << 16; // We should still be in base 16. + EXPECT_EQ(&s, strm2.str()); } + EXPECT_EQ("abc1010", s); +} + +TEST(OStringStream, MoveAssign) { + std::string s = "abc"; { - std::unique_ptr s(new std::string); - absl::strings_internal::OStringStream strm(s.get()); - s.reset(); + absl::strings_internal::OStringStream strm1(&s); + strm1 << std::hex << 16; + EXPECT_EQ(&s, strm1.str()); + absl::strings_internal::OStringStream strm2(nullptr); + strm2 = std::move(strm1); + strm2 << 16; // We should still be in base 16. + EXPECT_EQ(&s, strm2.str()); } + EXPECT_EQ("abc1010", s); } TEST(OStringStream, Str) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/resize_uninitialized.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/resize_uninitialized.h index 749c66e78e..49859dcc7d 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/resize_uninitialized.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/resize_uninitialized.h @@ -29,8 +29,9 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace strings_internal { -// Is a subclass of true_type or false_type, depending on whether or not -// T has a __resize_default_init member. +// In this type trait, we look for a __resize_default_init member function, and +// we use it if available, otherwise, we use resize. We provide HasMember to +// indicate whether __resize_default_init is present. template struct ResizeUninitializedTraits { using HasMember = std::false_type; @@ -79,14 +80,36 @@ void STLStringReserveAmortized(string_type* s, size_t new_size) { } } +// In this type trait, we look for an __append_default_init member function, and +// we use it if available, otherwise, we use append. +template +struct AppendUninitializedTraits { + static void Append(string_type* s, size_t n) { + s->append(n, typename string_type::value_type()); + } +}; + +template +struct AppendUninitializedTraits< + string_type, absl::void_t() + .__append_default_init(237))> > { + static void Append(string_type* s, size_t n) { + s->__append_default_init(n); + } +}; + // Like STLStringResizeUninitialized(str, new_size), except guaranteed to use // exponential growth so that the amortized complexity of increasing the string // size by a small amount is O(1), in contrast to O(str->size()) in the case of // precise growth. template void STLStringResizeUninitializedAmortized(string_type* s, size_t new_size) { - STLStringReserveAmortized(s, new_size); - STLStringResizeUninitialized(s, new_size); + const size_t size = s->size(); + if (new_size > size) { + AppendUninitializedTraits::Append(s, new_size - size); + } else { + s->erase(new_size); + } } } // namespace strings_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc index 01ee476b6c..ad1b9c58f3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc @@ -19,10 +19,12 @@ namespace { int resize_call_count = 0; +int append_call_count = 0; // A mock string class whose only purpose is to track how many times its -// resize() method has been called. +// resize()/append() methods have been called. struct resizable_string { + using value_type = char; size_t size() const { return 0; } size_t capacity() const { return 0; } char& operator[](size_t) { @@ -30,14 +32,18 @@ struct resizable_string { return c; } void resize(size_t) { resize_call_count += 1; } + void append(size_t, value_type) { append_call_count += 1; } void reserve(size_t) {} + resizable_string& erase(size_t = 0, size_t = 0) { return *this; } }; int resize_default_init_call_count = 0; +int append_default_init_call_count = 0; // A mock string class whose only purpose is to track how many times its -// resize() and __resize_default_init() methods have been called. -struct resize_default_init_string { +// resize()/__resize_default_init()/append()/__append_default_init() methods +// have been called. +struct default_init_string { size_t size() const { return 0; } size_t capacity() const { return 0; } char& operator[](size_t) { @@ -46,46 +52,68 @@ struct resize_default_init_string { } void resize(size_t) { resize_call_count += 1; } void __resize_default_init(size_t) { resize_default_init_call_count += 1; } + void __append_default_init(size_t) { append_default_init_call_count += 1; } void reserve(size_t) {} + default_init_string& erase(size_t = 0, size_t = 0) { return *this; } }; TEST(ResizeUninit, WithAndWithout) { resize_call_count = 0; + append_call_count = 0; resize_default_init_call_count = 0; + append_default_init_call_count = 0; { resizable_string rs; EXPECT_EQ(resize_call_count, 0); + EXPECT_EQ(append_call_count, 0); EXPECT_EQ(resize_default_init_call_count, 0); + EXPECT_EQ(append_default_init_call_count, 0); EXPECT_FALSE( absl::strings_internal::STLStringSupportsNontrashingResize(&rs)); EXPECT_EQ(resize_call_count, 0); + EXPECT_EQ(append_call_count, 0); EXPECT_EQ(resize_default_init_call_count, 0); + EXPECT_EQ(append_default_init_call_count, 0); absl::strings_internal::STLStringResizeUninitialized(&rs, 237); EXPECT_EQ(resize_call_count, 1); + EXPECT_EQ(append_call_count, 0); EXPECT_EQ(resize_default_init_call_count, 0); + EXPECT_EQ(append_default_init_call_count, 0); absl::strings_internal::STLStringResizeUninitializedAmortized(&rs, 1000); - EXPECT_EQ(resize_call_count, 2); + EXPECT_EQ(resize_call_count, 1); + EXPECT_EQ(append_call_count, 1); EXPECT_EQ(resize_default_init_call_count, 0); + EXPECT_EQ(append_default_init_call_count, 0); } resize_call_count = 0; + append_call_count = 0; resize_default_init_call_count = 0; + append_default_init_call_count = 0; { - resize_default_init_string rus; + default_init_string rus; EXPECT_EQ(resize_call_count, 0); + EXPECT_EQ(append_call_count, 0); EXPECT_EQ(resize_default_init_call_count, 0); + EXPECT_EQ(append_default_init_call_count, 0); EXPECT_TRUE( absl::strings_internal::STLStringSupportsNontrashingResize(&rus)); EXPECT_EQ(resize_call_count, 0); + EXPECT_EQ(append_call_count, 0); EXPECT_EQ(resize_default_init_call_count, 0); + EXPECT_EQ(append_default_init_call_count, 0); absl::strings_internal::STLStringResizeUninitialized(&rus, 237); EXPECT_EQ(resize_call_count, 0); + EXPECT_EQ(append_call_count, 0); EXPECT_EQ(resize_default_init_call_count, 1); + EXPECT_EQ(append_default_init_call_count, 0); absl::strings_internal::STLStringResizeUninitializedAmortized(&rus, 1000); EXPECT_EQ(resize_call_count, 0); - EXPECT_EQ(resize_default_init_call_count, 2); + EXPECT_EQ(append_call_count, 0); + EXPECT_EQ(resize_default_init_call_count, 1); + EXPECT_EQ(append_default_init_call_count, 1); } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc index e28a29b171..967fe9ca26 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc @@ -77,7 +77,7 @@ class IntDigits { v >>= 3; } while (v); start_ = p; - size_ = storage_ + sizeof(storage_) - p; + size_ = static_cast(storage_ + sizeof(storage_) - p); } // Print the signed or unsigned integer as decimal. @@ -86,7 +86,8 @@ class IntDigits { void PrintAsDec(T v) { static_assert(std::is_integral::value, ""); start_ = storage_; - size_ = numbers_internal::FastIntToBuffer(v, storage_) - storage_; + size_ = static_cast(numbers_internal::FastIntToBuffer(v, storage_) - + storage_); } void PrintAsDec(int128 v) { @@ -115,7 +116,7 @@ class IntDigits { if (add_neg) { *--p = '-'; } - size_ = storage_ + sizeof(storage_) - p; + size_ = static_cast(storage_ + sizeof(storage_) - p); start_ = p; } @@ -138,7 +139,7 @@ class IntDigits { ++p; } start_ = p; - size_ = storage_ + sizeof(storage_) - p; + size_ = static_cast(storage_ + sizeof(storage_) - p); } // Print the unsigned integer as hex using uppercase. @@ -154,7 +155,7 @@ class IntDigits { v >>= 4; } while (v); start_ = p; - size_ = storage_ + sizeof(storage_) - p; + size_ = static_cast(storage_ + sizeof(storage_) - p); } // The printed value including the '-' sign if available. @@ -208,10 +209,12 @@ string_view SignColumn(bool neg, const FormatConversionSpecImpl conv) { return {}; } -bool ConvertCharImpl(unsigned char v, const FormatConversionSpecImpl conv, - FormatSinkImpl *sink) { +bool ConvertCharImpl(char v, + const FormatConversionSpecImpl conv, + FormatSinkImpl* sink) { size_t fill = 0; - if (conv.width() >= 0) fill = conv.width(); + if (conv.width() >= 0) + fill = static_cast(conv.width()); ReducePadding(1, &fill); if (!conv.has_left_flag()) sink->Append(fill, ' '); sink->Append(1, v); @@ -225,7 +228,8 @@ bool ConvertIntImplInnerSlow(const IntDigits &as_digits, // Print as a sequence of Substrings: // [left_spaces][sign][base_indicator][zeroes][formatted][right_spaces] size_t fill = 0; - if (conv.width() >= 0) fill = conv.width(); + if (conv.width() >= 0) + fill = static_cast(conv.width()); string_view formatted = as_digits.without_neg_or_zero(); ReducePadding(formatted, &fill); @@ -236,10 +240,9 @@ bool ConvertIntImplInnerSlow(const IntDigits &as_digits, string_view base_indicator = BaseIndicator(as_digits, conv); ReducePadding(base_indicator, &fill); - int precision = conv.precision(); - bool precision_specified = precision >= 0; - if (!precision_specified) - precision = 1; + bool precision_specified = conv.precision() >= 0; + size_t precision = + precision_specified ? static_cast(conv.precision()) : size_t{1}; if (conv.has_alt_flag() && conv.conversion_char() == FormatConversionCharInternal::o) { @@ -247,7 +250,7 @@ bool ConvertIntImplInnerSlow(const IntDigits &as_digits, // "For o conversion, it increases the precision (if necessary) to // force the first digit of the result to be zero." if (formatted.empty() || *formatted.begin() != '0') { - int needed = static_cast(formatted.size()) + 1; + size_t needed = formatted.size() + 1; precision = std::max(precision, needed); } } @@ -275,19 +278,40 @@ bool ConvertIntImplInnerSlow(const IntDigits &as_digits, return true; } +template ::value && + std::is_signed::value) || + std::is_same::value, + int>::type = 0> +constexpr auto ConvertV(T) { + return FormatConversionCharInternal::d; +} + +template ::value && + std::is_unsigned::value) || + std::is_same::value, + int>::type = 0> +constexpr auto ConvertV(T) { + return FormatConversionCharInternal::u; +} + template -bool ConvertIntArg(T v, const FormatConversionSpecImpl conv, - FormatSinkImpl *sink) { +bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) { using U = typename MakeUnsigned::type; IntDigits as_digits; + if (conv.conversion_char() == FormatConversionCharInternal::v) { + conv.set_conversion_char(ConvertV(T{})); + } + // This odd casting is due to a bug in -Wswitch behavior in gcc49 which causes // it to complain about a switch/case type mismatch, even though both are // FormatConverionChar. Likely this is because at this point // FormatConversionChar is declared, but not defined. switch (static_cast(conv.conversion_char())) { case static_cast(FormatConversionCharInternal::c): - return ConvertCharImpl(static_cast(v), conv, sink); + return ConvertCharImpl(static_cast(v), conv, sink); case static_cast(FormatConversionCharInternal::o): as_digits.PrintAsOct(static_cast(v)); @@ -320,7 +344,7 @@ bool ConvertIntArg(T v, const FormatConversionSpecImpl conv, return ConvertFloatImpl(static_cast(v), conv, sink); default: - ABSL_INTERNAL_ASSUME(false); + ABSL_ASSUME(false); } if (conv.is_basic()) { @@ -331,8 +355,11 @@ bool ConvertIntArg(T v, const FormatConversionSpecImpl conv, } template -bool ConvertFloatArg(T v, const FormatConversionSpecImpl conv, - FormatSinkImpl *sink) { +bool ConvertFloatArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) { + if (conv.conversion_char() == FormatConversionCharInternal::v) { + conv.set_conversion_char(FormatConversionCharInternal::g); + } + return FormatConversionCharIsFloat(conv.conversion_char()) && ConvertFloatImpl(v, conv, sink); } @@ -349,6 +376,15 @@ inline bool ConvertStringArg(string_view v, const FormatConversionSpecImpl conv, } // namespace +bool ConvertBoolArg(bool v, FormatSinkImpl *sink) { + if (v) { + sink->Append("true"); + } else { + sink->Append("false"); + } + return true; +} + // ==================== Strings ==================== StringConvertResult FormatConvertImpl(const std::string &v, const FormatConversionSpecImpl conv, @@ -375,7 +411,7 @@ FormatConvertImpl(const char *v, const FormatConversionSpecImpl conv, len = std::strlen(v); } else { // If precision is set, we look for the NUL-terminator on the valid range. - len = std::find(v, v + conv.precision(), '\0') - v; + len = static_cast(std::find(v, v + conv.precision(), '\0') - v); } return {ConvertStringArg(string_view(v, len), conv, sink)}; } @@ -410,19 +446,18 @@ FloatingConvertResult FormatConvertImpl(long double v, } // ==================== Chars ==================== -IntegralConvertResult FormatConvertImpl(char v, - const FormatConversionSpecImpl conv, - FormatSinkImpl *sink) { +CharConvertResult FormatConvertImpl(char v, const FormatConversionSpecImpl conv, + FormatSinkImpl *sink) { return {ConvertIntArg(v, conv, sink)}; } -IntegralConvertResult FormatConvertImpl(signed char v, - const FormatConversionSpecImpl conv, - FormatSinkImpl *sink) { +CharConvertResult FormatConvertImpl(signed char v, + const FormatConversionSpecImpl conv, + FormatSinkImpl *sink) { return {ConvertIntArg(v, conv, sink)}; } -IntegralConvertResult FormatConvertImpl(unsigned char v, - const FormatConversionSpecImpl conv, - FormatSinkImpl *sink) { +CharConvertResult FormatConvertImpl(unsigned char v, + const FormatConversionSpecImpl conv, + FormatSinkImpl *sink) { return {ConvertIntArg(v, conv, sink)}; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h index 3c91be701f..bc4cde9677 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -25,10 +26,12 @@ #include #include #include +#include #include "absl/base/port.h" #include "absl/meta/type_traits.h" #include "absl/numeric/int128.h" +#include "absl/strings/internal/has_absl_stringify.h" #include "absl/strings/internal/str_format/extension.h" #include "absl/strings/string_view.h" @@ -45,6 +48,11 @@ class FormatConversionSpec; namespace str_format_internal { +template +struct ArgConvertResult { + bool value; +}; + template struct HasUserDefinedConvert : std::false_type {}; @@ -55,7 +63,12 @@ struct HasUserDefinedConvert()))>> : std::true_type {}; -void AbslFormatConvert(); // Stops the lexical name lookup +// These declarations prevent ADL lookup from continuing in absl namespaces, +// we are deliberately using these as ADL hooks and want them to consider +// non-absl namespaces only. +void AbslFormatConvert(); +void AbslStringify(); + template auto FormatConvertImpl(const T& v, FormatConversionSpecImpl conv, FormatSinkImpl* sink) @@ -71,6 +84,19 @@ auto FormatConvertImpl(const T& v, FormatConversionSpecImpl conv, return AbslFormatConvert(v, fcs, &fs); } +template +auto FormatConvertImpl(const T& v, FormatConversionSpecImpl, + FormatSinkImpl* sink) + -> std::enable_if_t(), v))>::value, + ArgConvertResult> { + using FormatSinkT = + absl::enable_if_t; + auto fs = sink->Wrap(); + AbslStringify(fs, v); + return {true}; +} + template class StreamedWrapper; @@ -95,11 +121,6 @@ struct VoidPtr { uintptr_t value; }; -template -struct ArgConvertResult { - bool value; -}; - template constexpr FormatConversionCharSet ExtractCharSet(FormatConvertResult) { return C; @@ -110,8 +131,8 @@ constexpr FormatConversionCharSet ExtractCharSet(ArgConvertResult) { return C; } -using StringConvertResult = - ArgConvertResult; +using StringConvertResult = ArgConvertResult; ArgConvertResult FormatConvertImpl( VoidPtr v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); @@ -144,7 +165,7 @@ StringConvertResult FormatConvertImpl(const AbslCord& value, size_t space_remaining = 0; int width = conv.width(); - if (width >= 0) space_remaining = width; + if (width >= 0) space_remaining = static_cast(width); size_t to_write = value.size(); @@ -174,11 +195,19 @@ StringConvertResult FormatConvertImpl(const AbslCord& value, } using IntegralConvertResult = ArgConvertResult; +using FloatingConvertResult = ArgConvertResult; +using CharConvertResult = ArgConvertResult; -using FloatingConvertResult = - ArgConvertResult; + +bool ConvertBoolArg(bool v, FormatSinkImpl* sink); // Floats. FloatingConvertResult FormatConvertImpl(float v, FormatConversionSpecImpl conv, @@ -190,14 +219,14 @@ FloatingConvertResult FormatConvertImpl(long double v, FormatSinkImpl* sink); // Chars. -IntegralConvertResult FormatConvertImpl(char v, FormatConversionSpecImpl conv, - FormatSinkImpl* sink); -IntegralConvertResult FormatConvertImpl(signed char v, - FormatConversionSpecImpl conv, - FormatSinkImpl* sink); -IntegralConvertResult FormatConvertImpl(unsigned char v, - FormatConversionSpecImpl conv, - FormatSinkImpl* sink); +CharConvertResult FormatConvertImpl(char v, FormatConversionSpecImpl conv, + FormatSinkImpl* sink); +CharConvertResult FormatConvertImpl(signed char v, + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); +CharConvertResult FormatConvertImpl(unsigned char v, + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); // Ints. IntegralConvertResult FormatConvertImpl(short v, // NOLINT @@ -228,9 +257,16 @@ IntegralConvertResult FormatConvertImpl(int128 v, FormatConversionSpecImpl conv, IntegralConvertResult FormatConvertImpl(uint128 v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + +// This function needs to be a template due to ambiguity regarding type +// conversions. template ::value, int> = 0> IntegralConvertResult FormatConvertImpl(T v, FormatConversionSpecImpl conv, FormatSinkImpl* sink) { + if (conv.conversion_char() == FormatConversionCharInternal::v) { + return {ConvertBoolArg(v, sink)}; + } + return FormatConvertImpl(static_cast(v), conv, sink); } @@ -238,7 +274,8 @@ IntegralConvertResult FormatConvertImpl(T v, FormatConversionSpecImpl conv, // FormatArgImpl will use the underlying Convert functions instead. template typename std::enable_if::value && - !HasUserDefinedConvert::value, + !HasUserDefinedConvert::value && + !strings_internal::HasAbslStringify::value, IntegralConvertResult>::type FormatConvertImpl(T v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); @@ -301,11 +338,11 @@ struct FormatArgImplFriend { template constexpr FormatConversionCharSet ArgumentToConv() { - return absl::str_format_internal::ExtractCharSet( - decltype(str_format_internal::FormatConvertImpl( - std::declval(), - std::declval(), - std::declval())){}); + using ConvResult = decltype(str_format_internal::FormatConvertImpl( + std::declval(), + std::declval(), + std::declval())); + return absl::str_format_internal::ExtractCharSet(ConvResult{}); } // A type-erased handle to a format argument. @@ -351,7 +388,8 @@ class FormatArgImpl { template struct DecayType { static constexpr bool kHasUserDefined = - str_format_internal::HasUserDefinedConvert::value; + str_format_internal::HasUserDefinedConvert::value || + strings_internal::HasAbslStringify::value; using type = typename std::conditional< !kHasUserDefined && std::is_convertible::value, const char*, @@ -363,6 +401,7 @@ class FormatArgImpl { struct DecayType::value && + !strings_internal::HasAbslStringify::value && std::is_enum::value>::type> { using type = typename std::underlying_type::type; }; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc index c988ba8fd2..77a4222337 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc @@ -32,7 +32,8 @@ inline bool BindFromPosition(int position, int* value, return false; } // -1 because positions are 1-based - return FormatArgImplFriend::ToInt(pack[position - 1], value); + return FormatArgImplFriend::ToInt(pack[static_cast(position) - 1], + value); } class ArgContext { @@ -56,7 +57,7 @@ inline bool ArgContext::Bind(const UnboundConversion* unbound, const FormatArgImpl* arg = nullptr; int arg_position = unbound->arg_position; if (static_cast(arg_position - 1) >= pack_.size()) return false; - arg = &pack_[arg_position - 1]; // 1-based + arg = &pack_[static_cast(arg_position - 1)]; // 1-based if (unbound->flags != Flags::kBasic) { int width = unbound->width.value(); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/bind.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/bind.h index b26cff6648..b73c50287c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/bind.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/bind.h @@ -25,6 +25,7 @@ #include "absl/strings/internal/str_format/checker.h" #include "absl/strings/internal/str_format/parser.h" #include "absl/types/span.h" +#include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -87,6 +88,36 @@ class FormatSpecTemplate : public MakeDependent::type { using Base = typename MakeDependent::type; + template + struct ErrorMaker { + constexpr bool operator()(int) const { return res; } + }; + + template + static constexpr bool CheckArity(ErrorMaker SpecifierCount = {}, + ErrorMaker ParametersPassed = {}) { + static_assert(SpecifierCount(i) == ParametersPassed(j), + "Number of arguments passed must match the number of " + "conversion specifiers."); + return true; + } + + template + static constexpr bool CheckMatch( + ErrorMaker MismatchedArgumentNumber = {}) { + static_assert(MismatchedArgumentNumber(arg), + "Passed argument must match specified format."); + return true; + } + + template + static bool CheckMatches(absl::index_sequence) { + bool res[] = {true, CheckMatch()...}; + (void)res; + return true; + } + public: #ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER @@ -112,7 +143,8 @@ class FormatSpecTemplate template FormatSpecTemplate(string_view s) // NOLINT __attribute__((enable_if(str_format_internal::EnsureConstexpr(s), - "constexpr trap"))) { + "constexpr trap"))) + : Base("to avoid noise in the compiler error") { static_assert(sizeof(T*) == 0, "Format specified does not match the arguments passed."); } @@ -133,13 +165,12 @@ class FormatSpecTemplate #endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER - template < - FormatConversionCharSet... C, - typename = typename std::enable_if::type, - typename = typename std::enable_if::type> + template FormatSpecTemplate(const ExtendedParsedFormat& pc) // NOLINT - : Base(&pc) {} + : Base(&pc) { + CheckArity(); + CheckMatches(absl::make_index_sequence{}); + } }; class Streamable { @@ -204,9 +235,10 @@ class StreamedWrapper { private: template - friend ArgConvertResult FormatConvertImpl( - const StreamedWrapper& v, FormatConversionSpecImpl conv, - FormatSinkImpl* out); + friend ArgConvertResult + FormatConvertImpl(const StreamedWrapper& v, FormatConversionSpecImpl conv, + FormatSinkImpl* out); const T& v_; }; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/checker.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/checker.h index 2a2601eccf..aeb9d48d39 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/checker.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/checker.h @@ -22,9 +22,14 @@ // Compile time check support for entry points. #ifndef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER -#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) +// We disable format checker under vscode intellisense compilation. +// See https://github.com/microsoft/vscode-cpptools/issues/3683 for +// more details. +#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) && \ + !defined(__INTELLISENSE__) #define ABSL_INTERNAL_ENABLE_FORMAT_CHECKER 1 -#endif // ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) +#endif // ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) && + // !defined(__INTELLISENSE__) #endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER namespace absl { @@ -77,9 +82,10 @@ constexpr string_view ConsumeFront(string_view str, size_t len = 1) { } constexpr string_view ConsumeAnyOf(string_view format, const char* chars) { - return ContainsChar(chars, GetChar(format, 0)) - ? ConsumeAnyOf(ConsumeFront(format), chars) - : format; + while (ContainsChar(chars, GetChar(format, 0))) { + format = ConsumeFront(format); + } + return format; } constexpr bool IsDigit(char c) { return c >= '0' && c <= '9'; } @@ -93,16 +99,22 @@ struct Integer { // If the next character is a '$', consume it. // Otherwise, make `this` an invalid positional argument. constexpr Integer ConsumePositionalDollar() const { - return GetChar(format, 0) == '$' ? Integer{ConsumeFront(format), value} - : Integer{format, 0}; + if (GetChar(format, 0) == '$') { + return Integer{ConsumeFront(format), value}; + } else { + return Integer{format, 0}; + } } }; -constexpr Integer ParseDigits(string_view format, int value = 0) { - return IsDigit(GetChar(format, 0)) - ? ParseDigits(ConsumeFront(format), - 10 * value + GetChar(format, 0) - '0') - : Integer{format, value}; +constexpr Integer ParseDigits(string_view format) { + int value = 0; + while (IsDigit(GetChar(format, 0))) { + value = 10 * value + GetChar(format, 0) - '0'; + format = ConsumeFront(format); + } + + return Integer{format, value}; } // Parse digits for a positional argument. @@ -158,30 +170,36 @@ class ConvParser { // If it is '*', we verify that it matches `args_`. `error_` is set if it // doesn't match. constexpr ConvParser ParseWidth() const { - return IsDigit(GetChar(format_, 0)) - ? SetFormat(ParseDigits(format_).format) - : GetChar(format_, 0) == '*' - ? is_positional_ - ? VerifyPositional( - ParsePositional(ConsumeFront(format_)), '*') - : SetFormat(ConsumeFront(format_)) - .ConsumeNextArg('*') - : *this; + char first_char = GetChar(format_, 0); + + if (IsDigit(first_char)) { + return SetFormat(ParseDigits(format_).format); + } else if (first_char == '*') { + if (is_positional_) { + return VerifyPositional(ParsePositional(ConsumeFront(format_)), '*'); + } else { + return SetFormat(ConsumeFront(format_)).ConsumeNextArg('*'); + } + } else { + return *this; + } } // Consume the precision. // If it is '*', we verify that it matches `args_`. `error_` is set if it // doesn't match. constexpr ConvParser ParsePrecision() const { - return GetChar(format_, 0) != '.' - ? *this - : GetChar(format_, 1) == '*' - ? is_positional_ - ? VerifyPositional( - ParsePositional(ConsumeFront(format_, 2)), '*') - : SetFormat(ConsumeFront(format_, 2)) - .ConsumeNextArg('*') - : SetFormat(ParseDigits(ConsumeFront(format_)).format); + if (GetChar(format_, 0) != '.') { + return *this; + } else if (GetChar(format_, 1) == '*') { + if (is_positional_) { + return VerifyPositional(ParsePositional(ConsumeFront(format_, 2)), '*'); + } else { + return SetFormat(ConsumeFront(format_, 2)).ConsumeNextArg('*'); + } + } else { + return SetFormat(ParseDigits(ConsumeFront(format_)).format); + } } // Consume the length characters. @@ -192,11 +210,18 @@ class ConvParser { // Consume the conversion character and verify that it matches `args_`. // `error_` is set if it doesn't match. constexpr ConvParser ParseConversion() const { - return is_positional_ - ? VerifyPositional({ConsumeFront(format_), arg_position_}, - GetChar(format_, 0)) - : ConsumeNextArg(GetChar(format_, 0)) - .SetFormat(ConsumeFront(format_)); + char first_char = GetChar(format_, 0); + + if (first_char == 'v' && *(format_.data() - 1) != '%') { + return SetError(true); + } + + if (is_positional_) { + return VerifyPositional({ConsumeFront(format_), arg_position_}, + first_char); + } else { + return ConsumeNextArg(first_char).SetFormat(ConsumeFront(format_)); + } } constexpr ConvParser(string_view format, ConvList args, bool error, @@ -219,8 +244,13 @@ class ConvParser { // `format()` will be set to the character after the conversion character. // `error()` will be set if any of the arguments do not match. constexpr ConvParser Run() const { - return (is_positional_ ? ParseArgPosition(ParsePositional(format_)) : *this) - .ParseFlags() + ConvParser parser = *this; + + if (is_positional_) { + parser = ParseArgPosition(ParsePositional(format_)); + } + + return parser.ParseFlags() .ParseWidth() .ParsePrecision() .ParseLength() @@ -257,29 +287,40 @@ class FormatParser { // We use an inner function to increase the recursion limit. // The inner function consumes up to `limit` characters on every run. // This increases the limit from 512 to ~512*limit. - static constexpr string_view ConsumeNonPercentInner(string_view format, - int limit = 20) { - return FoundPercent(format) || !limit - ? format - : ConsumeNonPercentInner( - ConsumeFront(format, GetChar(format, 0) == '%' && - GetChar(format, 1) == '%' - ? 2 - : 1), - limit - 1); + static constexpr string_view ConsumeNonPercentInner(string_view format) { + int limit = 20; + while (!FoundPercent(format) && limit != 0) { + size_t len = 0; + + if (GetChar(format, 0) == '%' && GetChar(format, 1) == '%') { + len = 2; + } else { + len = 1; + } + + format = ConsumeFront(format, len); + --limit; + } + + return format; } // Consume characters until the next conversion spec %. // It skips %%. static constexpr string_view ConsumeNonPercent(string_view format) { - return FoundPercent(format) - ? format - : ConsumeNonPercent(ConsumeNonPercentInner(format)); + while (!FoundPercent(format)) { + format = ConsumeNonPercentInner(format); + } + + return format; } static constexpr bool IsPositional(string_view format) { - return IsDigit(GetChar(format, 0)) ? IsPositional(ConsumeFront(format)) - : GetChar(format, 0) == '$'; + while (IsDigit(GetChar(format, 0))) { + format = ConsumeFront(format); + } + + return GetChar(format, 0) == '$'; } constexpr bool RunImpl(bool is_positional) const { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/checker_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/checker_test.cc index 7c70f47d68..680517f7fc 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/checker_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/checker_test.cc @@ -39,16 +39,16 @@ std::string ConvToString(FormatConversionCharSet conv) { TEST(StrFormatChecker, ArgumentToConv) { FormatConversionCharSet conv = ArgumentToConv(); - EXPECT_EQ(ConvToString(conv), "s"); + EXPECT_EQ(ConvToString(conv), "sv"); conv = ArgumentToConv(); EXPECT_EQ(ConvToString(conv), "sp"); conv = ArgumentToConv(); - EXPECT_EQ(ConvToString(conv), "fFeEgGaA"); + EXPECT_EQ(ConvToString(conv), "fFeEgGaAv"); conv = ArgumentToConv(); - EXPECT_EQ(ConvToString(conv), "cdiouxXfFeEgGaA*"); + EXPECT_EQ(ConvToString(conv), "cdiouxXfFeEgGaAv*"); conv = ArgumentToConv(); EXPECT_EQ(ConvToString(conv), "p"); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/convert_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/convert_test.cc index 91e0360901..300612b7ba 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/convert_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/convert_test.cc @@ -24,6 +24,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/base/attributes.h" #include "absl/base/internal/raw_logging.h" #include "absl/strings/internal/str_format/bind.h" #include "absl/strings/match.h" @@ -124,6 +125,7 @@ void StrAppendV(std::string *dst, const char *format, va_list ap) { delete[] buf; } +void StrAppend(std::string *, const char *, ...) ABSL_PRINTF_ATTRIBUTE(2, 3); void StrAppend(std::string *out, const char *format, ...) { va_list ap; va_start(ap, format); @@ -131,6 +133,7 @@ void StrAppend(std::string *out, const char *format, ...) { va_end(ap); } +std::string StrPrint(const char *, ...) ABSL_PRINTF_ATTRIBUTE(1, 2); std::string StrPrint(const char *format, ...) { va_list ap; va_start(ap, format); @@ -455,25 +458,36 @@ TYPED_TEST_P(TypedFormatConvertTest, AllIntsWithFlags) { } TYPED_TEST_P(TypedFormatConvertTest, Char) { + // Pass a bunch of values of type TypeParam to both FormatPack and libc's + // vsnprintf("%c", ...) (wrapped in StrPrint) to make sure we get the same + // value. typedef TypeParam T; using remove_volatile_t = typename std::remove_volatile::type; - static const T kMin = std::numeric_limits::min(); - static const T kMax = std::numeric_limits::max(); - T kVals[] = { - remove_volatile_t(1), remove_volatile_t(2), remove_volatile_t(10), - remove_volatile_t(-1), remove_volatile_t(-2), remove_volatile_t(-10), - remove_volatile_t(0), - kMin + remove_volatile_t(1), kMin, - kMax - remove_volatile_t(1), kMax + std::vector vals = { + remove_volatile_t(1), remove_volatile_t(2), remove_volatile_t(10), // + remove_volatile_t(-1), remove_volatile_t(-2), remove_volatile_t(-10), // + remove_volatile_t(0), }; - for (const T &c : kVals) { + + // We'd like to test values near std::numeric_limits::min() and + // std::numeric_limits::max(), too, but vsnprintf("%c", ...) can't handle + // anything larger than an int. Add in the most extreme values we can without + // exceeding that range. + static const T kMin = + static_cast(std::numeric_limits::min()); + static const T kMax = + static_cast(std::numeric_limits::max()); + vals.insert(vals.end(), {kMin + 1, kMin, kMax - 1, kMax}); + + for (const T c : vals) { const FormatArgImpl args[] = {FormatArgImpl(c)}; UntypedFormatSpecImpl format("%c"); - EXPECT_EQ(StrPrint("%c", c), FormatPack(format, absl::MakeSpan(args))); + EXPECT_EQ(StrPrint("%c", static_cast(c)), + FormatPack(format, absl::MakeSpan(args))); } } -REGISTER_TYPED_TEST_CASE_P(TypedFormatConvertTest, AllIntsWithFlags, Char); +REGISTER_TYPED_TEST_SUITE_P(TypedFormatConvertTest, AllIntsWithFlags, Char); typedef ::testing::Types< int, unsigned, volatile int, @@ -482,8 +496,8 @@ typedef ::testing::Types< long long, unsigned long long, signed char, unsigned char, char> AllIntTypes; -INSTANTIATE_TYPED_TEST_CASE_P(TypedFormatConvertTestWithAllIntTypes, - TypedFormatConvertTest, AllIntTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(TypedFormatConvertTestWithAllIntTypes, + TypedFormatConvertTest, AllIntTypes); TEST_F(FormatConvertTest, VectorBool) { // Make sure vector's values behave as bools. std::vector v = {true, false}; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc index 484f6ebfc1..2a0ceb13d7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc @@ -33,6 +33,8 @@ std::string FlagsToString(Flags v) { return s; } +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL + #define ABSL_INTERNAL_X_VAL(id) \ constexpr absl::FormatConversionChar FormatConversionCharInternal::id; ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, ) @@ -45,21 +47,19 @@ constexpr absl::FormatConversionChar FormatConversionCharInternal::kNone; ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, ) #undef ABSL_INTERNAL_CHAR_SET_CASE -// NOLINTNEXTLINE(readability-redundant-declaration) constexpr FormatConversionCharSet FormatConversionCharSetInternal::kStar; -// NOLINTNEXTLINE(readability-redundant-declaration) constexpr FormatConversionCharSet FormatConversionCharSetInternal::kIntegral; -// NOLINTNEXTLINE(readability-redundant-declaration) constexpr FormatConversionCharSet FormatConversionCharSetInternal::kFloating; -// NOLINTNEXTLINE(readability-redundant-declaration) constexpr FormatConversionCharSet FormatConversionCharSetInternal::kNumeric; -// NOLINTNEXTLINE(readability-redundant-declaration) constexpr FormatConversionCharSet FormatConversionCharSetInternal::kPointer; +#endif // ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL + bool FormatSinkImpl::PutPaddedString(string_view value, int width, int precision, bool left) { size_t space_remaining = 0; - if (width >= 0) space_remaining = width; + if (width >= 0) + space_remaining = static_cast(width); size_t n = value.size(); if (precision >= 0) n = std::min(n, static_cast(precision)); string_view shown(value.data(), n); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension.h index 55cbb56d0a..603bd49d18 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension.h @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -70,7 +71,7 @@ class FormatSinkImpl { ~FormatSinkImpl() { Flush(); } void Flush() { - raw_.Write(string_view(buf_, pos_ - buf_)); + raw_.Write(string_view(buf_, static_cast(pos_ - buf_))); pos_ = buf_; } @@ -120,7 +121,9 @@ class FormatSinkImpl { } private: - size_t Avail() const { return buf_ + sizeof(buf_) - pos_; } + size_t Avail() const { + return static_cast(buf_ + sizeof(buf_) - pos_); + } FormatRawSinkImpl raw_; size_t size_ = 0; @@ -166,7 +169,7 @@ inline std::ostream& operator<<(std::ostream& os, Flags v) { X_VAL(f) X_SEP X_VAL(F) X_SEP X_VAL(e) X_SEP X_VAL(E) X_SEP \ X_VAL(g) X_SEP X_VAL(G) X_SEP X_VAL(a) X_SEP X_VAL(A) X_SEP \ /* misc */ \ - X_VAL(n) X_SEP X_VAL(p) + X_VAL(n) X_SEP X_VAL(p) X_SEP X_VAL(v) // clang-format on // This type should not be referenced, it exists only to provide labels @@ -188,7 +191,7 @@ struct FormatConversionCharInternal { c, s, // text d, i, o, u, x, X, // int f, F, e, E, g, G, a, A, // float - n, p, // misc + n, p, v, // misc kNone }; // clang-format on @@ -289,6 +292,8 @@ class FormatConversionSpecImpl { return conv_; } + void set_conversion_char(FormatConversionChar c) { conv_ = c; } + // Returns the specified width. If width is unspecfied, it returns a negative // value. int width() const { return width_; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension_test.cc index 1c93fdb1c7..694c126406 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/extension_test.cc @@ -19,6 +19,7 @@ #include #include +#include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" @@ -95,4 +96,14 @@ TEST(FormatExtensionTest, VerifyEnumEquality) { #undef X_VAL } +TEST(FormatExtensionTest, SetConversionChar) { + absl::str_format_internal::FormatConversionSpecImpl spec; + EXPECT_EQ(spec.conversion_char(), + absl::str_format_internal::FormatConversionCharInternal::kNone); + spec.set_conversion_char( + absl::str_format_internal::FormatConversionCharInternal::d); + EXPECT_EQ(spec.conversion_char(), + absl::str_format_internal::FormatConversionCharInternal::d); +} + } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc index b1c4068475..8e497852bb 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc @@ -92,27 +92,30 @@ class StackArray { // Calculates `10 * (*v) + carry` and stores the result in `*v` and returns // the carry. +// Requires: `0 <= carry <= 9` template -inline Int MultiplyBy10WithCarry(Int *v, Int carry) { +inline char MultiplyBy10WithCarry(Int* v, char carry) { using BiggerInt = absl::conditional_t; - BiggerInt tmp = 10 * static_cast(*v) + carry; + BiggerInt tmp = + 10 * static_cast(*v) + static_cast(carry); *v = static_cast(tmp); - return static_cast(tmp >> (sizeof(Int) * 8)); + return static_cast(tmp >> (sizeof(Int) * 8)); } // Calculates `(2^64 * carry + *v) / 10`. // Stores the quotient in `*v` and returns the remainder. // Requires: `0 <= carry <= 9` -inline uint64_t DivideBy10WithCarry(uint64_t *v, uint64_t carry) { +inline char DivideBy10WithCarry(uint64_t* v, char carry) { constexpr uint64_t divisor = 10; // 2^64 / divisor = chunk_quotient + chunk_remainder / divisor constexpr uint64_t chunk_quotient = (uint64_t{1} << 63) / (divisor / 2); constexpr uint64_t chunk_remainder = uint64_t{} - chunk_quotient * divisor; + const uint64_t carry_u64 = static_cast(carry); const uint64_t mod = *v % divisor; - const uint64_t next_carry = chunk_remainder * carry + mod; - *v = *v / divisor + carry * chunk_quotient + next_carry / divisor; - return next_carry % divisor; + const uint64_t next_carry = chunk_remainder * carry_u64 + mod; + *v = *v / divisor + carry_u64 * chunk_quotient + next_carry / divisor; + return static_cast(next_carry % divisor); } using MaxFloatType = @@ -125,11 +128,11 @@ using MaxFloatType = // // Requires `0 <= exp` and `exp <= numeric_limits::max_exponent`. class BinaryToDecimal { - static constexpr int ChunksNeeded(int exp) { + static constexpr size_t ChunksNeeded(int exp) { // We will left shift a uint128 by `exp` bits, so we need `128+exp` total // bits. Round up to 32. // See constructor for details about adding `10%` to the value. - return (128 + exp + 31) / 32 * 11 / 10; + return static_cast((128 + exp + 31) / 32 * 11 / 10); } public: @@ -140,7 +143,7 @@ class BinaryToDecimal { assert(exp > 0); assert(exp <= std::numeric_limits::max_exponent); static_assert( - static_cast(StackArray::kMaxCapacity) >= + StackArray::kMaxCapacity >= ChunksNeeded(std::numeric_limits::max_exponent), ""); @@ -149,9 +152,9 @@ class BinaryToDecimal { [=](absl::Span input) { f(BinaryToDecimal(input, v, exp)); }); } - int TotalDigits() const { - return static_cast((decimal_end_ - decimal_start_) * kDigitsPerChunk + - CurrentDigits().size()); + size_t TotalDigits() const { + return (decimal_end_ - decimal_start_) * kDigitsPerChunk + + CurrentDigits().size(); } // See the current block of digits. @@ -190,30 +193,31 @@ class BinaryToDecimal { // the decimal representation is around 7% less efficient in space than the // binary one. We allocate an extra 10% memory to account for this. See // ChunksNeeded for this calculation. - int chunk_index = exp / 32; + size_t after_chunk_index = static_cast(exp / 32 + 1); decimal_start_ = decimal_end_ = ChunksNeeded(exp); const int offset = exp % 32; // Left shift v by exp bits. - data_[chunk_index] = static_cast(v << offset); + data_[after_chunk_index - 1] = static_cast(v << offset); for (v >>= (32 - offset); v; v >>= 32) - data_[++chunk_index] = static_cast(v); + data_[++after_chunk_index - 1] = static_cast(v); - while (chunk_index >= 0) { + while (after_chunk_index > 0) { // While we have more than one chunk available, go in steps of 1e9. - // `data_[chunk_index]` holds the highest non-zero binary chunk, so keep - // the variable updated. + // `data_[after_chunk_index - 1]` holds the highest non-zero binary chunk, + // so keep the variable updated. uint32_t carry = 0; - for (int i = chunk_index; i >= 0; --i) { - uint64_t tmp = uint64_t{data_[i]} + (uint64_t{carry} << 32); - data_[i] = static_cast(tmp / uint64_t{1000000000}); + for (size_t i = after_chunk_index; i > 0; --i) { + uint64_t tmp = uint64_t{data_[i - 1]} + (uint64_t{carry} << 32); + data_[i - 1] = static_cast(tmp / uint64_t{1000000000}); carry = static_cast(tmp % uint64_t{1000000000}); } // If the highest chunk is now empty, remove it from view. - if (data_[chunk_index] == 0) --chunk_index; + if (data_[after_chunk_index - 1] == 0) + --after_chunk_index; --decimal_start_; - assert(decimal_start_ != chunk_index); + assert(decimal_start_ != after_chunk_index - 1); data_[decimal_start_] = carry; } @@ -225,13 +229,13 @@ class BinaryToDecimal { } private: - static constexpr int kDigitsPerChunk = 9; + static constexpr size_t kDigitsPerChunk = 9; - int decimal_start_; - int decimal_end_; + size_t decimal_start_; + size_t decimal_end_; char digits_[kDigitsPerChunk]; - int size_ = 0; + size_t size_ = 0; absl::Span data_; }; @@ -251,25 +255,26 @@ class FractionalDigitGenerator { static_assert(StackArray::kMaxCapacity >= (Limits::digits + 128 - Limits::min_exponent + 31) / 32, ""); - StackArray::RunWithCapacity((Limits::digits + exp + 31) / 32, - [=](absl::Span input) { - f(FractionalDigitGenerator(input, v, exp)); - }); + StackArray::RunWithCapacity( + static_cast((Limits::digits + exp + 31) / 32), + [=](absl::Span input) { + f(FractionalDigitGenerator(input, v, exp)); + }); } // Returns true if there are any more non-zero digits left. - bool HasMoreDigits() const { return next_digit_ != 0 || chunk_index_ >= 0; } + bool HasMoreDigits() const { return next_digit_ != 0 || after_chunk_index_; } // Returns true if the remainder digits are greater than 5000... bool IsGreaterThanHalf() const { - return next_digit_ > 5 || (next_digit_ == 5 && chunk_index_ >= 0); + return next_digit_ > 5 || (next_digit_ == 5 && after_chunk_index_); } // Returns true if the remainder digits are exactly 5000... - bool IsExactlyHalf() const { return next_digit_ == 5 && chunk_index_ < 0; } + bool IsExactlyHalf() const { return next_digit_ == 5 && !after_chunk_index_; } struct Digits { - int digit_before_nine; - int num_nines; + char digit_before_nine; + size_t num_nines; }; // Get the next set of digits. @@ -288,35 +293,37 @@ class FractionalDigitGenerator { private: // Return the next digit. - int GetOneDigit() { - if (chunk_index_ < 0) return 0; + char GetOneDigit() { + if (!after_chunk_index_) + return 0; - uint32_t carry = 0; - for (int i = chunk_index_; i >= 0; --i) { - carry = MultiplyBy10WithCarry(&data_[i], carry); + char carry = 0; + for (size_t i = after_chunk_index_; i > 0; --i) { + carry = MultiplyBy10WithCarry(&data_[i - 1], carry); } // If the lowest chunk is now empty, remove it from view. - if (data_[chunk_index_] == 0) --chunk_index_; + if (data_[after_chunk_index_ - 1] == 0) + --after_chunk_index_; return carry; } FractionalDigitGenerator(absl::Span data, uint128 v, int exp) - : chunk_index_(exp / 32), data_(data) { + : after_chunk_index_(static_cast(exp / 32 + 1)), data_(data) { const int offset = exp % 32; // Right shift `v` by `exp` bits. - data_[chunk_index_] = static_cast(v << (32 - offset)); + data_[after_chunk_index_ - 1] = static_cast(v << (32 - offset)); v >>= offset; // Make sure we don't overflow the data. We already calculated that // non-zero bits fit, so we might not have space for leading zero bits. - for (int pos = chunk_index_; v; v >>= 32) + for (size_t pos = after_chunk_index_ - 1; v; v >>= 32) data_[--pos] = static_cast(v); // Fill next_digit_, as GetDigits expects it to be populated always. next_digit_ = GetOneDigit(); } - int next_digit_; - int chunk_index_; + char next_digit_; + size_t after_chunk_index_; absl::Span data_; }; @@ -362,7 +369,7 @@ char *PrintIntegralDigitsFromRightFast(uint128 v, char *p) { auto low = static_cast(v); while (high != 0) { - uint64_t carry = DivideBy10WithCarry(&high, 0); + char carry = DivideBy10WithCarry(&high, 0); carry = DivideBy10WithCarry(&low, carry); *--p = carry + '0'; } @@ -373,13 +380,15 @@ char *PrintIntegralDigitsFromRightFast(uint128 v, char *p) { // shifting. // Performs rounding if necessary to fit within `precision`. // Returns the pointer to one after the last character written. -char *PrintFractionalDigitsFast(uint64_t v, char *start, int exp, - int precision) { +char* PrintFractionalDigitsFast(uint64_t v, + char* start, + int exp, + size_t precision) { char *p = start; v <<= (64 - exp); while (precision > 0) { if (!v) return p; - *p++ = MultiplyBy10WithCarry(&v, uint64_t{0}) + '0'; + *p++ = MultiplyBy10WithCarry(&v, 0) + '0'; --precision; } @@ -393,8 +402,6 @@ char *PrintFractionalDigitsFast(uint64_t v, char *start, int exp, RoundToEven(p - 1); } - assert(precision == 0); - // Precision can only be zero here. return p; } @@ -402,8 +409,10 @@ char *PrintFractionalDigitsFast(uint64_t v, char *start, int exp, // after shifting. // Performs rounding if necessary to fit within `precision`. // Returns the pointer to one after the last character written. -char *PrintFractionalDigitsFast(uint128 v, char *start, int exp, - int precision) { +char* PrintFractionalDigitsFast(uint128 v, + char* start, + int exp, + size_t precision) { char *p = start; v <<= (128 - exp); auto high = static_cast(v >> 64); @@ -412,7 +421,7 @@ char *PrintFractionalDigitsFast(uint128 v, char *start, int exp, // While we have digits to print and `low` is not empty, do the long // multiplication. while (precision > 0 && low != 0) { - uint64_t carry = MultiplyBy10WithCarry(&low, uint64_t{0}); + char carry = MultiplyBy10WithCarry(&low, 0); carry = MultiplyBy10WithCarry(&high, carry); *p++ = carry + '0'; @@ -424,7 +433,7 @@ char *PrintFractionalDigitsFast(uint128 v, char *start, int exp, // above. while (precision > 0) { if (!high) return p; - *p++ = MultiplyBy10WithCarry(&high, uint64_t{0}) + '0'; + *p++ = MultiplyBy10WithCarry(&high, 0) + '0'; --precision; } @@ -438,14 +447,12 @@ char *PrintFractionalDigitsFast(uint128 v, char *start, int exp, RoundToEven(p - 1); } - assert(precision == 0); - // Precision can only be zero here. return p; } struct FormatState { char sign_char; - int precision; + size_t precision; const FormatConversionSpecImpl &conv; FormatSinkImpl *sink; @@ -455,9 +462,9 @@ struct FormatState { }; struct Padding { - int left_spaces; - int zeros; - int right_spaces; + size_t left_spaces; + size_t zeros; + size_t right_spaces; }; Padding ExtraWidthToPadding(size_t total_size, const FormatState &state) { @@ -465,7 +472,7 @@ Padding ExtraWidthToPadding(size_t total_size, const FormatState &state) { static_cast(state.conv.width()) <= total_size) { return {0, 0, 0}; } - int missing_chars = state.conv.width() - total_size; + size_t missing_chars = static_cast(state.conv.width()) - total_size; if (state.conv.has_left_flag()) { return {0, 0, missing_chars}; } else if (state.conv.has_zero_flag()) { @@ -475,8 +482,10 @@ Padding ExtraWidthToPadding(size_t total_size, const FormatState &state) { } } -void FinalPrint(const FormatState &state, absl::string_view data, - int padding_offset, int trailing_zeros, +void FinalPrint(const FormatState& state, + absl::string_view data, + size_t padding_offset, + size_t trailing_zeros, absl::string_view data_postfix) { if (state.conv.width() < 0) { // No width specified. Fast-path. @@ -487,10 +496,10 @@ void FinalPrint(const FormatState &state, absl::string_view data, return; } - auto padding = ExtraWidthToPadding((state.sign_char != '\0' ? 1 : 0) + - data.size() + data_postfix.size() + - static_cast(trailing_zeros), - state); + auto padding = + ExtraWidthToPadding((state.sign_char != '\0' ? 1 : 0) + data.size() + + data_postfix.size() + trailing_zeros, + state); state.sink->Append(padding.left_spaces, ' '); if (state.sign_char != '\0') state.sink->Append(1, state.sign_char); @@ -547,15 +556,16 @@ void FormatFFast(Int v, int exp, const FormatState &state) { if (integral_digits_start[-1] != '0') --integral_digits_start; } - size_t size = fractional_digits_end - integral_digits_start; + size_t size = + static_cast(fractional_digits_end - integral_digits_start); // In `alt` mode (flag #) we keep the `.` even if there are no fractional // digits. In non-alt mode, we strip it. if (!state.ShouldPrintDot()) --size; FinalPrint(state, absl::string_view(integral_digits_start, size), /*padding_offset=*/0, - static_cast(state.precision - (fractional_digits_end - - fractional_digits_start)), + state.precision - static_cast(fractional_digits_end - + fractional_digits_start), /*data_postfix=*/""); } @@ -567,21 +577,22 @@ void FormatFFast(Int v, int exp, const FormatState &state) { void FormatFPositiveExpSlow(uint128 v, int exp, const FormatState &state) { BinaryToDecimal::RunConversion(v, exp, [&](BinaryToDecimal btd) { const size_t total_digits = - btd.TotalDigits() + - (state.ShouldPrintDot() ? static_cast(state.precision) + 1 : 0); + btd.TotalDigits() + (state.ShouldPrintDot() ? state.precision + 1 : 0); const auto padding = ExtraWidthToPadding( total_digits + (state.sign_char != '\0' ? 1 : 0), state); state.sink->Append(padding.left_spaces, ' '); - if (state.sign_char != '\0') state.sink->Append(1, state.sign_char); + if (state.sign_char != '\0') + state.sink->Append(1, state.sign_char); state.sink->Append(padding.zeros, '0'); do { state.sink->Append(btd.CurrentDigits()); } while (btd.AdvanceDigits()); - if (state.ShouldPrintDot()) state.sink->Append(1, '.'); + if (state.ShouldPrintDot()) + state.sink->Append(1, '.'); state.sink->Append(state.precision, '0'); state.sink->Append(padding.right_spaces, ' '); }); @@ -594,8 +605,7 @@ void FormatFPositiveExpSlow(uint128 v, int exp, const FormatState &state) { // digits. void FormatFNegativeExpSlow(uint128 v, int exp, const FormatState &state) { const size_t total_digits = - /* 0 */ 1 + - (state.ShouldPrintDot() ? static_cast(state.precision) + 1 : 0); + /* 0 */ 1 + (state.ShouldPrintDot() ? state.precision + 1 : 0); auto padding = ExtraWidthToPadding(total_digits + (state.sign_char ? 1 : 0), state); padding.zeros += 1; @@ -606,7 +616,7 @@ void FormatFNegativeExpSlow(uint128 v, int exp, const FormatState &state) { if (state.ShouldPrintDot()) state.sink->Append(1, '.'); // Print digits - int digits_to_go = state.precision; + size_t digits_to_go = state.precision; FractionalDigitGenerator::RunConversion( v, exp, [&](FractionalDigitGenerator digit_gen) { @@ -666,7 +676,8 @@ void FormatFNegativeExpSlow(uint128 v, int exp, const FormatState &state) { template void FormatF(Int mantissa, int exp, const FormatState &state) { if (exp >= 0) { - const int total_bits = sizeof(Int) * 8 - LeadingZeros(mantissa) + exp; + const int total_bits = + static_cast(sizeof(Int) * 8) - LeadingZeros(mantissa) + exp; // Fallback to the slow stack-based approach if we can't do it in a 64 or // 128 bit state. @@ -686,9 +697,9 @@ void FormatF(Int mantissa, int exp, const FormatState &state) { // Grab the group of four bits (nibble) from `n`. E.g., nibble 1 corresponds to // bits 4-7. template -uint8_t GetNibble(Int n, int nibble_index) { +uint8_t GetNibble(Int n, size_t nibble_index) { constexpr Int mask_low_nibble = Int{0xf}; - int shift = nibble_index * 4; + int shift = static_cast(nibble_index * 4); n &= mask_low_nibble << shift; return static_cast((n >> shift) & 0xf); } @@ -696,9 +707,9 @@ uint8_t GetNibble(Int n, int nibble_index) { // Add one to the given nibble, applying carry to higher nibbles. Returns true // if overflow, false otherwise. template -bool IncrementNibble(int nibble_index, Int *n) { - constexpr int kShift = sizeof(Int) * 8 - 1; - constexpr int kNumNibbles = sizeof(Int) * 8 / 4; +bool IncrementNibble(size_t nibble_index, Int* n) { + constexpr size_t kShift = sizeof(Int) * 8 - 1; + constexpr size_t kNumNibbles = sizeof(Int) * 8 / 4; Int before = *n >> kShift; // Here we essentially want to take the number 1 and move it into the requsted // nibble, then add it to *n to effectively increment the nibble. However, @@ -706,28 +717,32 @@ bool IncrementNibble(int nibble_index, Int *n) { // i.e., if the nibble_index is out of range. So therefore we check for this // and if we are out of range we just add 0 which leaves *n unchanged, which // seems like the reasonable thing to do in that case. - *n += ((nibble_index >= kNumNibbles) ? 0 : (Int{1} << (nibble_index * 4))); + *n += ((nibble_index >= kNumNibbles) + ? 0 + : (Int{1} << static_cast(nibble_index * 4))); Int after = *n >> kShift; return (before && !after) || (nibble_index >= kNumNibbles); } // Return a mask with 1's in the given nibble and all lower nibbles. template -Int MaskUpToNibbleInclusive(int nibble_index) { - constexpr int kNumNibbles = sizeof(Int) * 8 / 4; +Int MaskUpToNibbleInclusive(size_t nibble_index) { + constexpr size_t kNumNibbles = sizeof(Int) * 8 / 4; static const Int ones = ~Int{0}; - return ones >> std::max(0, 4 * (kNumNibbles - nibble_index - 1)); + ++nibble_index; + return ones >> static_cast( + 4 * (std::max(kNumNibbles, nibble_index) - nibble_index)); } // Return a mask with 1's below the given nibble. template -Int MaskUpToNibbleExclusive(int nibble_index) { - return nibble_index <= 0 ? 0 : MaskUpToNibbleInclusive(nibble_index - 1); +Int MaskUpToNibbleExclusive(size_t nibble_index) { + return nibble_index == 0 ? 0 : MaskUpToNibbleInclusive(nibble_index - 1); } template -Int MoveToNibble(uint8_t nibble, int nibble_index) { - return Int{nibble} << (4 * nibble_index); +Int MoveToNibble(uint8_t nibble, size_t nibble_index) { + return Int{nibble} << static_cast(4 * nibble_index); } // Given mantissa size, find optimal # of mantissa bits to put in initial digit. @@ -744,10 +759,10 @@ Int MoveToNibble(uint8_t nibble, int nibble_index) { // a multiple of four. Once again, the goal is to have all fractional digits // represent real precision. template -constexpr int HexFloatLeadingDigitSizeInBits() { +constexpr size_t HexFloatLeadingDigitSizeInBits() { return std::numeric_limits::digits % 4 > 0 - ? std::numeric_limits::digits % 4 - : 4; + ? static_cast(std::numeric_limits::digits % 4) + : size_t{4}; } // This function captures the rounding behavior of glibc for hex float @@ -757,16 +772,17 @@ constexpr int HexFloatLeadingDigitSizeInBits() { // point that is not followed by 800000..., it disregards the parity and rounds // up if > 8 and rounds down if < 8. template -bool HexFloatNeedsRoundUp(Int mantissa, int final_nibble_displayed, +bool HexFloatNeedsRoundUp(Int mantissa, + size_t final_nibble_displayed, uint8_t leading) { // If the last nibble (hex digit) to be displayed is the lowest on in the // mantissa then that means that we don't have any further nibbles to inform // rounding, so don't round. - if (final_nibble_displayed <= 0) { + if (final_nibble_displayed == 0) { return false; } - int rounding_nibble_idx = final_nibble_displayed - 1; - constexpr int kTotalNibbles = sizeof(Int) * 8 / 4; + size_t rounding_nibble_idx = final_nibble_displayed - 1; + constexpr size_t kTotalNibbles = sizeof(Int) * 8 / 4; assert(final_nibble_displayed <= kTotalNibbles); Int mantissa_up_to_rounding_nibble_inclusive = mantissa & MaskUpToNibbleInclusive(rounding_nibble_idx); @@ -793,7 +809,7 @@ struct HexFloatTypeParams { } int min_exponent; - int leading_digit_size_bits; + size_t leading_digit_size_bits; }; // Hex Float Rounding. First check if we need to round; if so, then we do that @@ -803,10 +819,12 @@ struct HexFloatTypeParams { template void FormatARound(bool precision_specified, const FormatState &state, uint8_t *leading, Int *mantissa, int *exp) { - constexpr int kTotalNibbles = sizeof(Int) * 8 / 4; + constexpr size_t kTotalNibbles = sizeof(Int) * 8 / 4; // Index of the last nibble that we could display given precision. - int final_nibble_displayed = - precision_specified ? std::max(0, (kTotalNibbles - state.precision)) : 0; + size_t final_nibble_displayed = + precision_specified + ? (std::max(kTotalNibbles, state.precision) - state.precision) + : 0; if (HexFloatNeedsRoundUp(*mantissa, final_nibble_displayed, *leading)) { // Need to round up. bool overflow = IncrementNibble(final_nibble_displayed, mantissa); @@ -830,9 +848,9 @@ void FormatARound(bool precision_specified, const FormatState &state, template void FormatANormalize(const HexFloatTypeParams float_traits, uint8_t *leading, Int *mantissa, int *exp) { - constexpr int kIntBits = sizeof(Int) * 8; + constexpr size_t kIntBits = sizeof(Int) * 8; static const Int kHighIntBit = Int{1} << (kIntBits - 1); - const int kLeadDigitBitsCount = float_traits.leading_digit_size_bits; + const size_t kLeadDigitBitsCount = float_traits.leading_digit_size_bits; // Normalize mantissa so that highest bit set is in MSB position, unless we // get interrupted by the exponent threshold. while (*mantissa && !(*mantissa & kHighIntBit)) { @@ -846,18 +864,18 @@ void FormatANormalize(const HexFloatTypeParams float_traits, uint8_t *leading, } // Extract bits for leading digit then shift them away leaving the // fractional part. - *leading = - static_cast(*mantissa >> (kIntBits - kLeadDigitBitsCount)); - *exp -= (*mantissa != 0) ? kLeadDigitBitsCount : *exp; - *mantissa <<= kLeadDigitBitsCount; + *leading = static_cast( + *mantissa >> static_cast(kIntBits - kLeadDigitBitsCount)); + *exp -= (*mantissa != 0) ? static_cast(kLeadDigitBitsCount) : *exp; + *mantissa <<= static_cast(kLeadDigitBitsCount); } template void FormatA(const HexFloatTypeParams float_traits, Int mantissa, int exp, bool uppercase, const FormatState &state) { // Int properties. - constexpr int kIntBits = sizeof(Int) * 8; - constexpr int kTotalNibbles = sizeof(Int) * 8 / 4; + constexpr size_t kIntBits = sizeof(Int) * 8; + constexpr size_t kTotalNibbles = sizeof(Int) * 8 / 4; // Did the user specify a precision explicitly? const bool precision_specified = state.conv.precision() >= 0; @@ -903,16 +921,19 @@ void FormatA(const HexFloatTypeParams float_traits, Int mantissa, int exp, } // ============ Fractional Digits ============ - int digits_emitted = 0; + size_t digits_emitted = 0; while (mantissa > 0) { *digits_iter++ = digits[GetNibble(mantissa, kTotalNibbles - 1)]; mantissa <<= 4; ++digits_emitted; } - int trailing_zeros = - precision_specified ? state.precision - digits_emitted : 0; - assert(trailing_zeros >= 0); - auto digits_result = string_view(digits_buffer, digits_iter - digits_buffer); + size_t trailing_zeros = 0; + if (precision_specified) { + assert(state.precision >= digits_emitted); + trailing_zeros = state.precision - digits_emitted; + } + auto digits_result = string_view( + digits_buffer, static_cast(digits_iter - digits_buffer)); // =============== Exponent ================== constexpr size_t kBufSizeForExpDecRepr = @@ -925,11 +946,11 @@ void FormatA(const HexFloatTypeParams float_traits, Int mantissa, int exp, numbers_internal::FastIntToBuffer(exp < 0 ? -exp : exp, exp_buffer + 2); // ============ Assemble Result ============== - FinalPrint(state, // - digits_result, // 0xN.NNN... - 2, // offset in `data` to start padding if needed. - trailing_zeros, // num remaining mantissa padding zeros - exp_buffer); // exponent + FinalPrint(state, + digits_result, // 0xN.NNN... + 2, // offset of any padding + static_cast(trailing_zeros), // remaining mantissa padding + exp_buffer); // exponent } char *CopyStringTo(absl::string_view v, char *out) { @@ -961,10 +982,10 @@ bool FallbackToSnprintf(const Float v, const FormatConversionSpecImpl &conv, int n = snprintf(&space[0], space.size(), fmt, w, p, v); if (n < 0) return false; if (static_cast(n) < space.size()) { - result = absl::string_view(space.data(), n); + result = absl::string_view(space.data(), static_cast(n)); break; } - space.resize(n + 1); + space.resize(static_cast(n) + 1); } sink->Append(result); return true; @@ -972,13 +993,13 @@ bool FallbackToSnprintf(const Float v, const FormatConversionSpecImpl &conv, // 128-bits in decimal: ceil(128*log(2)/log(10)) // or std::numeric_limits<__uint128_t>::digits10 -constexpr int kMaxFixedPrecision = 39; +constexpr size_t kMaxFixedPrecision = 39; -constexpr int kBufferLength = /*sign*/ 1 + - /*integer*/ kMaxFixedPrecision + - /*point*/ 1 + - /*fraction*/ kMaxFixedPrecision + - /*exponent e+123*/ 5; +constexpr size_t kBufferLength = /*sign*/ 1 + + /*integer*/ kMaxFixedPrecision + + /*point*/ 1 + + /*fraction*/ kMaxFixedPrecision + + /*exponent e+123*/ 5; struct Buffer { void push_front(char c) { @@ -1001,7 +1022,7 @@ struct Buffer { char last_digit() const { return end[-1] == '.' ? end[-2] : end[-1]; } - int size() const { return static_cast(end - begin); } + size_t size() const { return static_cast(end - begin); } char data[kBufferLength]; char *begin; @@ -1030,8 +1051,9 @@ bool ConvertNonNumericFloats(char sign_char, Float v, return false; } - return sink->PutPaddedString(string_view(text, ptr - text), conv.width(), -1, - conv.has_left_flag()); + return sink->PutPaddedString( + string_view(text, static_cast(ptr - text)), conv.width(), -1, + conv.has_left_flag()); } // Round up the last digit of the value. @@ -1068,12 +1090,12 @@ void PrintExponent(int exp, char e, Buffer *out) { } // Exponent digits. if (exp > 99) { - out->push_back(exp / 100 + '0'); - out->push_back(exp / 10 % 10 + '0'); - out->push_back(exp % 10 + '0'); + out->push_back(static_cast(exp / 100 + '0')); + out->push_back(static_cast(exp / 10 % 10 + '0')); + out->push_back(static_cast(exp % 10 + '0')); } else { - out->push_back(exp / 10 + '0'); - out->push_back(exp % 10 + '0'); + out->push_back(static_cast(exp / 10 + '0')); + out->push_back(static_cast(exp % 10 + '0')); } } @@ -1115,8 +1137,8 @@ Decomposed Decompose(Float v) { // In Fixed mode, we add a '.' at the end. // In Precision mode, we add a '.' after the first digit. template -int PrintIntegralDigits(Int digits, Buffer *out) { - int printed = 0; +size_t PrintIntegralDigits(Int digits, Buffer* out) { + size_t printed = 0; if (digits) { for (; digits; digits /= 10) out->push_front(digits % 10 + '0'); printed = out->size(); @@ -1135,10 +1157,10 @@ int PrintIntegralDigits(Int digits, Buffer *out) { } // Back out 'extra_digits' digits and round up if necessary. -bool RemoveExtraPrecision(int extra_digits, bool has_leftover_value, - Buffer *out, int *exp_out) { - if (extra_digits <= 0) return false; - +void RemoveExtraPrecision(size_t extra_digits, + bool has_leftover_value, + Buffer* out, + int* exp_out) { // Back out the extra digits out->end -= extra_digits; @@ -1158,15 +1180,17 @@ bool RemoveExtraPrecision(int extra_digits, bool has_leftover_value, if (needs_to_round_up) { RoundUp(out, exp_out); } - return true; } // Print the value into the buffer. // This will not include the exponent, which will be returned in 'exp_out' for // Precision mode. template -bool FloatToBufferImpl(Int int_mantissa, int exp, int precision, Buffer *out, - int *exp_out) { +bool FloatToBufferImpl(Int int_mantissa, + int exp, + size_t precision, + Buffer* out, + int* exp_out) { assert((CanFitMantissa())); const int int_bits = std::numeric_limits::digits; @@ -1182,14 +1206,16 @@ bool FloatToBufferImpl(Int int_mantissa, int exp, int precision, Buffer *out, // The value will overflow the Int return false; } - int digits_printed = PrintIntegralDigits(int_mantissa << exp, out); - int digits_to_zero_pad = precision; + size_t digits_printed = PrintIntegralDigits(int_mantissa << exp, out); + size_t digits_to_zero_pad = precision; if (mode == FormatStyle::Precision) { - *exp_out = digits_printed - 1; - digits_to_zero_pad -= digits_printed - 1; - if (RemoveExtraPrecision(-digits_to_zero_pad, false, out, exp_out)) { + *exp_out = static_cast(digits_printed - 1); + if (digits_to_zero_pad < digits_printed - 1) { + RemoveExtraPrecision(digits_printed - 1 - digits_to_zero_pad, false, + out, exp_out); return true; } + digits_to_zero_pad -= digits_printed - 1; } for (; digits_to_zero_pad-- > 0;) out->push_back('0'); return true; @@ -1203,10 +1229,10 @@ bool FloatToBufferImpl(Int int_mantissa, int exp, int precision, Buffer *out, const Int mask = (Int{1} << exp) - 1; // Print the integral part first. - int digits_printed = PrintIntegralDigits(int_mantissa >> exp, out); + size_t digits_printed = PrintIntegralDigits(int_mantissa >> exp, out); int_mantissa &= mask; - int fractional_count = precision; + size_t fractional_count = precision; if (mode == FormatStyle::Precision) { if (digits_printed == 0) { // Find the first non-zero digit, when in Precision mode. @@ -1222,20 +1248,21 @@ bool FloatToBufferImpl(Int int_mantissa, int exp, int precision, Buffer *out, int_mantissa &= mask; } else { // We already have a digit, and a '.' - *exp_out = digits_printed - 1; - fractional_count -= *exp_out; - if (RemoveExtraPrecision(-fractional_count, int_mantissa != 0, out, - exp_out)) { + *exp_out = static_cast(digits_printed - 1); + if (fractional_count < digits_printed - 1) { // If we had enough digits, return right away. // The code below will try to round again otherwise. + RemoveExtraPrecision(digits_printed - 1 - fractional_count, + int_mantissa != 0, out, exp_out); return true; } + fractional_count -= digits_printed - 1; } } auto get_next_digit = [&] { int_mantissa *= 10; - int digit = static_cast(int_mantissa >> exp); + char digit = static_cast(int_mantissa >> exp); int_mantissa &= mask; return digit; }; @@ -1245,7 +1272,7 @@ bool FloatToBufferImpl(Int int_mantissa, int exp, int precision, Buffer *out, out->push_back(get_next_digit() + '0'); } - int next_digit = get_next_digit(); + char next_digit = get_next_digit(); if (next_digit > 5 || (next_digit == 5 && (int_mantissa || out->last_digit() % 2 == 1))) { RoundUp(out, exp_out); @@ -1255,24 +1282,25 @@ bool FloatToBufferImpl(Int int_mantissa, int exp, int precision, Buffer *out, } template -bool FloatToBuffer(Decomposed decomposed, int precision, Buffer *out, - int *exp) { +bool FloatToBuffer(Decomposed decomposed, + size_t precision, + Buffer* out, + int* exp) { if (precision > kMaxFixedPrecision) return false; // Try with uint64_t. if (CanFitMantissa() && FloatToBufferImpl( - static_cast(decomposed.mantissa), - static_cast(decomposed.exponent), precision, out, exp)) + static_cast(decomposed.mantissa), decomposed.exponent, + precision, out, exp)) return true; #if defined(ABSL_HAVE_INTRINSIC_INT128) // If that is not enough, try with __uint128_t. return CanFitMantissa() && FloatToBufferImpl<__uint128_t, Float, mode>( - static_cast<__uint128_t>(decomposed.mantissa), - static_cast<__uint128_t>(decomposed.exponent), precision, out, - exp); + static_cast<__uint128_t>(decomposed.mantissa), decomposed.exponent, + precision, out, exp); #endif return false; } @@ -1280,12 +1308,15 @@ bool FloatToBuffer(Decomposed decomposed, int precision, Buffer *out, void WriteBufferToSink(char sign_char, absl::string_view str, const FormatConversionSpecImpl &conv, FormatSinkImpl *sink) { - int left_spaces = 0, zeros = 0, right_spaces = 0; - int missing_chars = - conv.width() >= 0 ? std::max(conv.width() - static_cast(str.size()) - - static_cast(sign_char != 0), - 0) - : 0; + size_t left_spaces = 0, zeros = 0, right_spaces = 0; + size_t missing_chars = 0; + if (conv.width() >= 0) { + const size_t conv_width_size_t = static_cast(conv.width()); + const size_t existing_chars = + str.size() + static_cast(sign_char != 0); + if (conv_width_size_t > existing_chars) + missing_chars = conv_width_size_t - existing_chars; + } if (conv.has_left_flag()) { right_spaces = missing_chars; } else if (conv.has_zero_flag()) { @@ -1321,7 +1352,8 @@ bool FloatToSink(const Float v, const FormatConversionSpecImpl &conv, return true; } - int precision = conv.precision() < 0 ? 6 : conv.precision(); + size_t precision = + conv.precision() < 0 ? 6 : static_cast(conv.precision()); int exp = 0; @@ -1348,12 +1380,12 @@ bool FloatToSink(const Float v, const FormatConversionSpecImpl &conv, &buffer); } else if (c == FormatConversionCharInternal::g || c == FormatConversionCharInternal::G) { - precision = std::max(0, precision - 1); + precision = std::max(precision, size_t{1}) - 1; if (!FloatToBuffer(decomposed, precision, &buffer, &exp)) { return FallbackToSnprintf(v, conv, sink); } - if (precision + 1 > exp && exp >= -4) { + if ((exp < 0 || precision + 1 > static_cast(exp)) && exp >= -4) { if (exp < 0) { // Have 1.23456, needs 0.00123456 // Move the first digit @@ -1388,9 +1420,11 @@ bool FloatToSink(const Float v, const FormatConversionSpecImpl &conv, return false; } - WriteBufferToSink(sign_char, - absl::string_view(buffer.begin, buffer.end - buffer.begin), - conv, sink); + WriteBufferToSink( + sign_char, + absl::string_view(buffer.begin, + static_cast(buffer.end - buffer.begin)), + conv, sink); return true; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/output.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/output.h index 8030dae00f..15e751ab6f 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/output.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/output.h @@ -22,6 +22,7 @@ #define ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_ #include +#include #include #include @@ -71,7 +72,7 @@ inline void AbslFormatFlush(std::string* out, string_view s) { out->append(s.data(), s.size()); } inline void AbslFormatFlush(std::ostream* out, string_view s) { - out->write(s.data(), s.size()); + out->write(s.data(), static_cast(s.size())); } inline void AbslFormatFlush(FILERawSink* sink, string_view v) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc index 2c9c07dacc..13731ee247 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc @@ -56,7 +56,7 @@ ABSL_CONST_INIT const ConvTag kTags[256] = { CC::X, {}, {}, {}, {}, {}, {}, {}, // XYZ[\]^_ {}, CC::a, {}, CC::c, CC::d, CC::e, CC::f, CC::g, // `abcdefg LM::h, CC::i, LM::j, {}, LM::l, {}, CC::n, CC::o, // hijklmno - CC::p, LM::q, {}, CC::s, LM::t, CC::u, {}, {}, // pqrstuvw + CC::p, LM::q, {}, CC::s, LM::t, CC::u, CC::v, {}, // pqrstuvw CC::x, {}, LM::z, {}, {}, {}, {}, {}, // xyz{|}! {}, {}, {}, {}, {}, {}, {}, {}, // 80-87 {}, {}, {}, {}, {}, {}, {}, {}, // 88-8f @@ -202,6 +202,8 @@ const char *ConsumeConversion(const char *pos, const char *const end, auto tag = GetTagForChar(c); + if (ABSL_PREDICT_FALSE(c == 'v' && (pos - original_pos) != 1)) return nullptr; + if (ABSL_PREDICT_FALSE(!tag.is_conv())) { if (ABSL_PREDICT_FALSE(!tag.is_length())) return nullptr; @@ -219,6 +221,8 @@ const char *ConsumeConversion(const char *pos, const char *const end, conv->length_mod = length_mod; } tag = GetTagForChar(c); + + if (ABSL_PREDICT_FALSE(c == 'v')) return nullptr; if (ABSL_PREDICT_FALSE(!tag.is_conv())) return nullptr; } @@ -312,11 +316,11 @@ bool ParsedFormatBase::MatchesConversions( std::initializer_list convs) const { std::unordered_set used; auto add_if_valid_conv = [&](int pos, char c) { - if (static_cast(pos) > convs.size() || - !Contains(convs.begin()[pos - 1], c)) - return false; - used.insert(pos); - return true; + if (static_cast(pos) > convs.size() || + !Contains(convs.begin()[pos - 1], c)) + return false; + used.insert(pos); + return true; }; for (const ConversionItem &item : items_) { if (!item.is_conversion) continue; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser.h index ad8646edff..a81bac8333 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser.h @@ -151,13 +151,15 @@ bool ParseFormatString(string_view src, Consumer consumer) { const char* p = src.data(); const char* const end = p + src.size(); while (p != end) { - const char* percent = static_cast(memchr(p, '%', end - p)); + const char* percent = + static_cast(memchr(p, '%', static_cast(end - p))); if (!percent) { // We found the last substring. - return consumer.Append(string_view(p, end - p)); + return consumer.Append(string_view(p, static_cast(end - p))); } // We found a percent, so push the text run then process the percent. - if (ABSL_PREDICT_FALSE(!consumer.Append(string_view(p, percent - p)))) { + if (ABSL_PREDICT_FALSE(!consumer.Append( + string_view(p, static_cast(percent - p))))) { return false; } if (ABSL_PREDICT_FALSE(percent + 1 >= end)) return false; @@ -188,7 +190,8 @@ bool ParseFormatString(string_view src, Consumer consumer) { p = ConsumeUnboundConversion(percent + 1, end, &conv, &next_arg); if (ABSL_PREDICT_FALSE(p == nullptr)) return false; if (ABSL_PREDICT_FALSE(!consumer.ConvertOne( - conv, string_view(percent + 1, p - (percent + 1))))) { + conv, string_view(percent + 1, + static_cast(p - (percent + 1)))))) { return false; } } else { @@ -242,7 +245,8 @@ class ParsedFormatBase { string_view text(base, 0); for (const auto& item : items_) { const char* const end = text.data() + text.size(); - text = string_view(end, (base + item.text_end) - end); + text = + string_view(end, static_cast((base + item.text_end) - end)); if (item.is_conversion) { if (!consumer.ConvertOne(item.conv, text)) return false; } else { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser_test.cc index fe0d296360..c3e825fe02 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_format/parser_test.cc @@ -110,10 +110,13 @@ TEST_F(ConsumeUnboundConversionTest, ConsumeSpecification) { {__LINE__, "ba", "", "ba"}, // 'b' is invalid {__LINE__, "l", "", "l" }, // just length mod isn't okay {__LINE__, "d", "d", "" }, // basic + {__LINE__, "v", "v", "" }, // basic {__LINE__, "d ", "d", " " }, // leave suffix {__LINE__, "dd", "d", "d" }, // don't be greedy {__LINE__, "d9", "d", "9" }, // leave non-space suffix {__LINE__, "dzz", "d", "zz"}, // length mod as suffix + {__LINE__, "3v", "", "3v"}, // 'v' cannot have modifiers + {__LINE__, "hv", "", "hv"}, // 'v' cannot have modifiers {__LINE__, "1$*2$d", "1$*2$d", "" }, // arg indexing and * allowed. {__LINE__, "0-14.3hhd", "0-14.3hhd", ""}, // precision, width {__LINE__, " 0-+#14.3hhd", " 0-+#14.3hhd", ""}, // flags diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_join_internal.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_join_internal.h index 31dbf672f0..d97d5033d8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_join_internal.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_join_internal.h @@ -229,10 +229,11 @@ std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s, std::string result; if (start != end) { // Sums size - size_t result_size = start->size(); + auto&& start_value = *start; + size_t result_size = start_value.size(); for (Iterator it = start; ++it != end;) { result_size += s.size(); - result_size += it->size(); + result_size += (*it).size(); } if (result_size > 0) { @@ -240,13 +241,15 @@ std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s, // Joins strings char* result_buf = &*result.begin(); - memcpy(result_buf, start->data(), start->size()); - result_buf += start->size(); + + memcpy(result_buf, start_value.data(), start_value.size()); + result_buf += start_value.size(); for (Iterator it = start; ++it != end;) { memcpy(result_buf, s.data(), s.size()); result_buf += s.size(); - memcpy(result_buf, it->data(), it->size()); - result_buf += it->size(); + auto&& value = *it; + memcpy(result_buf, value.data(), value.size()); + result_buf += value.size(); } } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_split_internal.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_split_internal.h index e766421617..35edf3aa43 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_split_internal.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/str_split_internal.h @@ -132,7 +132,8 @@ class SplitIterator { const absl::string_view text = splitter_->text(); const absl::string_view d = delimiter_.Find(text, pos_); if (d.data() == text.data() + text.size()) state_ = kLastState; - curr_ = text.substr(pos_, d.data() - (text.data() + pos_)); + curr_ = text.substr(pos_, + static_cast(d.data() - (text.data() + pos_))); pos_ += curr_.size() + d.size(); } while (!predicate_(curr_)); return *this; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/string_constant.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/string_constant.h index a11336b7f0..f68b17d75e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/string_constant.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/string_constant.h @@ -35,17 +35,25 @@ namespace strings_internal { // below. template struct StringConstant { + private: + static constexpr bool TryConstexprEval(absl::string_view view) { + return view.empty() || 2 * view[0] != 1; + } + + public: static constexpr absl::string_view value = T{}(); constexpr absl::string_view operator()() const { return value; } // Check to be sure `view` points to constant data. // Otherwise, it can't be constant evaluated. - static_assert(value.empty() || 2 * value[0] != 1, + static_assert(TryConstexprEval(value), "The input string_view must point to constant data."); }; +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL template -constexpr absl::string_view StringConstant::value; // NOLINT +constexpr absl::string_view StringConstant::value; +#endif // Factory function for `StringConstant` instances. // It supports callables that have a constexpr default constructor and a diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/stringify_sink.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/stringify_sink.cc new file mode 100644 index 0000000000..7c6995abb1 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/stringify_sink.cc @@ -0,0 +1,28 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/strings/internal/stringify_sink.h" +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace strings_internal { + +void StringifySink::Append(size_t count, char ch) { buffer_.append(count, ch); } + +void StringifySink::Append(string_view v) { + buffer_.append(v.data(), v.size()); +} + +} // namespace strings_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/stringify_sink.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/stringify_sink.h new file mode 100644 index 0000000000..fc3747bb72 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/stringify_sink.h @@ -0,0 +1,57 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_ +#define ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace strings_internal { +class StringifySink { + public: + void Append(size_t count, char ch); + + void Append(string_view v); + + // Support `absl::Format(&sink, format, args...)`. + friend void AbslFormatFlush(StringifySink* sink, absl::string_view v) { + sink->Append(v); + } + + private: + template + friend string_view ExtractStringification(StringifySink& sink, const T& v); + + std::string buffer_; +}; + +template +string_view ExtractStringification(StringifySink& sink, const T& v) { + AbslStringify(sink, v); + return sink.buffer_; +} + +} // namespace strings_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/utf8.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/utf8.cc index 8fd8edc1ec..7ecb93dfbe 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/utf8.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/internal/utf8.cc @@ -25,25 +25,25 @@ size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) { *buffer = static_cast(utf8_char); return 1; } else if (utf8_char <= 0x7FF) { - buffer[1] = 0x80 | (utf8_char & 0x3F); + buffer[1] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[0] = 0xC0 | utf8_char; + buffer[0] = static_cast(0xC0 | utf8_char); return 2; } else if (utf8_char <= 0xFFFF) { - buffer[2] = 0x80 | (utf8_char & 0x3F); + buffer[2] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[1] = 0x80 | (utf8_char & 0x3F); + buffer[1] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[0] = 0xE0 | utf8_char; + buffer[0] = static_cast(0xE0 | utf8_char); return 3; } else { - buffer[3] = 0x80 | (utf8_char & 0x3F); + buffer[3] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[2] = 0x80 | (utf8_char & 0x3F); + buffer[2] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[1] = 0x80 | (utf8_char & 0x3F); + buffer[1] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[0] = 0xF0 | utf8_char; + buffer[0] = static_cast(0xF0 | utf8_char); return 4; } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers.cc index 966d94bd70..2987158e07 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers.cc @@ -190,32 +190,32 @@ char* numbers_internal::FastIntToBuffer(uint32_t i, char* buffer) { if (i >= 1000) goto lt10_000; digits = i / 100; i -= digits * 100; - *buffer++ = '0' + digits; + *buffer++ = '0' + static_cast(digits); goto lt100; } if (i < 1000000) { // 1,000,000 if (i >= 100000) goto lt1_000_000; digits = i / 10000; // 10,000 i -= digits * 10000; - *buffer++ = '0' + digits; + *buffer++ = '0' + static_cast(digits); goto lt10_000; } if (i < 100000000) { // 100,000,000 if (i >= 10000000) goto lt100_000_000; digits = i / 1000000; // 1,000,000 i -= digits * 1000000; - *buffer++ = '0' + digits; + *buffer++ = '0' + static_cast(digits); goto lt1_000_000; } // we already know that i < 1,000,000,000 digits = i / 100000000; // 100,000,000 i -= digits * 100000000; - *buffer++ = '0' + digits; + *buffer++ = '0' + static_cast(digits); goto lt100_000_000; } char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) { - uint32_t u = i; + uint32_t u = static_cast(i); if (i < 0) { *buffer++ = '-'; // We need to do the negation in modular (i.e., "unsigned") @@ -268,7 +268,7 @@ char* numbers_internal::FastIntToBuffer(uint64_t i, char* buffer) { } char* numbers_internal::FastIntToBuffer(int64_t i, char* buffer) { - uint64_t u = i; + uint64_t u = static_cast(i); if (i < 0) { *buffer++ = '-'; u = 0 - u; @@ -329,7 +329,7 @@ static std::pair PowFive(uint64_t num, int expfive) { result = Mul32(result, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5); expfive -= 13; } - constexpr int powers_of_five[13] = { + constexpr uint32_t powers_of_five[13] = { 1, 5, 5 * 5, @@ -404,14 +404,14 @@ static ExpDigits SplitToSix(const double value) { // we multiply it by 65536 and see if the fractional part is close to 32768. // (The number doesn't have to be a power of two,but powers of two are faster) uint64_t d64k = d * 65536; - int dddddd; // A 6-digit decimal integer. + uint32_t dddddd; // A 6-digit decimal integer. if ((d64k % 65536) == 32767 || (d64k % 65536) == 32768) { // OK, it's fairly likely that precision was lost above, which is // not a surprise given only 52 mantissa bits are available. Therefore // redo the calculation using 128-bit numbers. (64 bits are not enough). // Start out with digits rounded down; maybe add one below. - dddddd = static_cast(d64k / 65536); + dddddd = static_cast(d64k / 65536); // mantissa is a 64-bit integer representing M.mmm... * 2^63. The actual // value we're representing, of course, is M.mmm... * 2^exp2. @@ -461,7 +461,7 @@ static ExpDigits SplitToSix(const double value) { } } else { // Here, we are not close to the edge. - dddddd = static_cast((d64k + 32768) / 65536); + dddddd = static_cast((d64k + 32768) / 65536); } if (dddddd == 1000000) { dddddd = 100000; @@ -469,7 +469,7 @@ static ExpDigits SplitToSix(const double value) { } exp_dig.exponent = exp; - int two_digits = dddddd / 10000; + uint32_t two_digits = dddddd / 10000; dddddd -= two_digits * 10000; numbers_internal::PutTwoDigits(two_digits, &exp_dig.digits[0]); @@ -499,15 +499,15 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { if (std::signbit(d)) *out++ = '-'; *out++ = '0'; *out = 0; - return out - buffer; + return static_cast(out - buffer); } if (d < 0) { *out++ = '-'; d = -d; } - if (std::isinf(d)) { + if (d > std::numeric_limits::max()) { strcpy(out, "inf"); // NOLINT(runtime/printf) - return out + 3 - buffer; + return static_cast(out + 3 - buffer); } auto exp_dig = SplitToSix(d); @@ -519,7 +519,7 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { case 5: memcpy(out, &digits[0], 6), out += 6; *out = 0; - return out - buffer; + return static_cast(out - buffer); case 4: memcpy(out, &digits[0], 5), out += 5; if (digits[5] != '0') { @@ -527,7 +527,7 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { *out++ = digits[5]; } *out = 0; - return out - buffer; + return static_cast(out - buffer); case 3: memcpy(out, &digits[0], 4), out += 4; if ((digits[5] | digits[4]) != '0') { @@ -536,7 +536,7 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { if (digits[5] != '0') *out++ = digits[5]; } *out = 0; - return out - buffer; + return static_cast(out - buffer); case 2: memcpy(out, &digits[0], 3), out += 3; *out++ = '.'; @@ -545,7 +545,7 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { while (out[-1] == '0') --out; if (out[-1] == '.') --out; *out = 0; - return out - buffer; + return static_cast(out - buffer); case 1: memcpy(out, &digits[0], 2), out += 2; *out++ = '.'; @@ -554,7 +554,7 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { while (out[-1] == '0') --out; if (out[-1] == '.') --out; *out = 0; - return out - buffer; + return static_cast(out - buffer); case 0: memcpy(out, &digits[0], 1), out += 1; *out++ = '.'; @@ -563,7 +563,7 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { while (out[-1] == '0') --out; if (out[-1] == '.') --out; *out = 0; - return out - buffer; + return static_cast(out - buffer); case -4: out[2] = '0'; ++out; @@ -582,7 +582,7 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { out += 6; while (out[-1] == '0') --out; *out = 0; - return out - buffer; + return static_cast(out - buffer); } assert(exp < -4 || exp >= 6); out[0] = digits[0]; @@ -601,12 +601,12 @@ size_t numbers_internal::SixDigitsToBuffer(double d, char* const buffer) { if (exp > 99) { int dig1 = exp / 100; exp -= dig1 * 100; - *out++ = '0' + dig1; + *out++ = '0' + static_cast(dig1); } - PutTwoDigits(exp, out); + PutTwoDigits(static_cast(exp), out); out += 2; *out = 0; - return out - buffer; + return static_cast(out - buffer); } namespace { @@ -642,10 +642,12 @@ inline bool safe_parse_sign_and_base(absl::string_view* text /*inout*/, int base = *base_ptr; // Consume whitespace. - while (start < end && absl::ascii_isspace(start[0])) { + while (start < end && + absl::ascii_isspace(static_cast(start[0]))) { ++start; } - while (start < end && absl::ascii_isspace(end[-1])) { + while (start < end && + absl::ascii_isspace(static_cast(end[-1]))) { --end; } if (start >= end) { @@ -694,7 +696,7 @@ inline bool safe_parse_sign_and_base(absl::string_view* text /*inout*/, } else { return false; } - *text = absl::string_view(start, end - start); + *text = absl::string_view(start, static_cast(end - start)); *base_ptr = base; return true; } @@ -757,8 +759,8 @@ struct LookupTables { // // uint128& operator/=(uint128) is not constexpr, so hardcode the resulting // array to avoid a static initializer. -template<> -const uint128 LookupTables::kVmaxOverBase[] = { +template <> +ABSL_CONST_INIT const uint128 LookupTables::kVmaxOverBase[] = { 0, 0, MakeUint128(9223372036854775807u, 18446744073709551615u), @@ -809,8 +811,8 @@ const uint128 LookupTables::kVmaxOverBase[] = { // // int128& operator/=(int128) is not constexpr, so hardcode the resulting array // to avoid a static initializer. -template<> -const int128 LookupTables::kVmaxOverBase[] = { +template <> +ABSL_CONST_INIT const int128 LookupTables::kVmaxOverBase[] = { 0, 0, MakeInt128(4611686018427387903, 18446744073709551615u), @@ -862,8 +864,8 @@ const int128 LookupTables::kVmaxOverBase[] = { // // int128& operator/=(int128) is not constexpr, so hardcode the resulting array // to avoid a static initializer. -template<> -const int128 LookupTables::kVminOverBase[] = { +template <> +ABSL_CONST_INIT const int128 LookupTables::kVminOverBase[] = { 0, 0, MakeInt128(-4611686018427387904, 0u), @@ -904,11 +906,11 @@ const int128 LookupTables::kVminOverBase[] = { }; template -const IntType LookupTables::kVmaxOverBase[] = +ABSL_CONST_INIT const IntType LookupTables::kVmaxOverBase[] = X_OVER_BASE_INITIALIZER(std::numeric_limits::max()); template -const IntType LookupTables::kVminOverBase[] = +ABSL_CONST_INIT const IntType LookupTables::kVminOverBase[] = X_OVER_BASE_INITIALIZER(std::numeric_limits::min()); #undef X_OVER_BASE_INITIALIZER @@ -920,17 +922,18 @@ inline bool safe_parse_positive_int(absl::string_view text, int base, const IntType vmax = std::numeric_limits::max(); assert(vmax > 0); assert(base >= 0); - assert(vmax >= static_cast(base)); + const IntType base_inttype = static_cast(base); + assert(vmax >= base_inttype); const IntType vmax_over_base = LookupTables::kVmaxOverBase[base]; assert(base < 2 || - std::numeric_limits::max() / base == vmax_over_base); + std::numeric_limits::max() / base_inttype == vmax_over_base); const char* start = text.data(); const char* end = start + text.size(); // loop over digits for (; start < end; ++start) { unsigned char c = static_cast(start[0]); - int digit = kAsciiToInt[c]; - if (digit >= base) { + IntType digit = static_cast(kAsciiToInt[c]); + if (digit >= base_inttype) { *value_p = value; return false; } @@ -938,7 +941,7 @@ inline bool safe_parse_positive_int(absl::string_view text, int base, *value_p = vmax; return false; } - value *= base; + value *= base_inttype; if (value > vmax - digit) { *value_p = vmax; return false; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers.h index 1780bb44bd..86c84ed39b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers.h @@ -23,8 +23,12 @@ #ifndef ABSL_STRINGS_NUMBERS_H_ #define ABSL_STRINGS_NUMBERS_H_ -#ifdef __SSE4_2__ -#include +#ifdef __SSSE3__ +#include +#endif + +#ifdef _MSC_VER +#include #endif #include @@ -36,14 +40,7 @@ #include #include "absl/base/config.h" -#ifdef __SSE4_2__ -// TODO(jorg): Remove this when we figure out the right way -// to swap bytes on SSE 4.2 that works with the compilers -// we claim to support. Also, add tests for the compiler -// that doesn't support the Intel _bswap64 intrinsic but -// does support all the SSE 4.2 intrinsics #include "absl/base/internal/endian.h" -#endif #include "absl/base/macros.h" #include "absl/base/port.h" #include "absl/numeric/bits.h" @@ -96,6 +93,25 @@ ABSL_MUST_USE_RESULT bool SimpleAtod(absl::string_view str, double* out); // unspecified state. ABSL_MUST_USE_RESULT bool SimpleAtob(absl::string_view str, bool* out); +// SimpleHexAtoi() +// +// Converts a hexadecimal string (optionally followed or preceded by ASCII +// whitespace) to an integer, returning `true` if successful. Only valid base-16 +// hexadecimal integers whose value falls within the range of the integer type +// (optionally preceded by a `+` or `-`) can be converted. A valid hexadecimal +// value may include both upper and lowercase character symbols, and may +// optionally include a leading "0x" (or "0X") number prefix, which is ignored +// by this function. If any errors are encountered, this function returns +// `false`, leaving `out` in an unspecified state. +template +ABSL_MUST_USE_RESULT bool SimpleHexAtoi(absl::string_view str, int_type* out); + +// Overloads of SimpleHexAtoi() for 128 bit integers. +ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str, + absl::int128* out); +ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str, + absl::uint128* out); + ABSL_NAMESPACE_END } // namespace absl @@ -162,16 +178,19 @@ char* FastIntToBuffer(int_type i, char* buffer) { // TODO(jorg): This signed-ness check is used because it works correctly // with enums, and it also serves to check that int_type is not a pointer. // If one day something like std::is_signed works, switch to it. - if (static_cast(1) - 2 < 0) { // Signed - if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit + // These conditions are constexpr bools to suppress MSVC warning C4127. + constexpr bool kIsSigned = static_cast(1) - 2 < 0; + constexpr bool kUse64Bit = sizeof(i) > 32 / 8; + if (kIsSigned) { + if (kUse64Bit) { return FastIntToBuffer(static_cast(i), buffer); - } else { // 32-bit or less + } else { return FastIntToBuffer(static_cast(i), buffer); } - } else { // Unsigned - if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit + } else { + if (kUse64Bit) { return FastIntToBuffer(static_cast(i), buffer); - } else { // 32-bit or less + } else { return FastIntToBuffer(static_cast(i), buffer); } } @@ -190,22 +209,25 @@ ABSL_MUST_USE_RESULT bool safe_strtoi_base(absl::string_view s, int_type* out, // TODO(jorg): This signed-ness check is used because it works correctly // with enums, and it also serves to check that int_type is not a pointer. // If one day something like std::is_signed works, switch to it. - if (static_cast(1) - 2 < 0) { // Signed - if (sizeof(*out) == 64 / 8) { // 64-bit + // These conditions are constexpr bools to suppress MSVC warning C4127. + constexpr bool kIsSigned = static_cast(1) - 2 < 0; + constexpr bool kUse64Bit = sizeof(*out) == 64 / 8; + if (kIsSigned) { + if (kUse64Bit) { int64_t val; parsed = numbers_internal::safe_strto64_base(s, &val, base); *out = static_cast(val); - } else { // 32-bit + } else { int32_t val; parsed = numbers_internal::safe_strto32_base(s, &val, base); *out = static_cast(val); } - } else { // Unsigned - if (sizeof(*out) == 64 / 8) { // 64-bit + } else { + if (kUse64Bit) { uint64_t val; parsed = numbers_internal::safe_strtou64_base(s, &val, base); *out = static_cast(val); - } else { // 32-bit + } else { uint32_t val; parsed = numbers_internal::safe_strtou32_base(s, &val, base); *out = static_cast(val); @@ -221,7 +243,7 @@ ABSL_MUST_USE_RESULT bool safe_strtoi_base(absl::string_view s, int_type* out, // Returns the number of non-pad digits of the output (it can never be zero // since 0 has one digit). inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) { -#ifdef __SSE4_2__ +#ifdef ABSL_INTERNAL_HAVE_SSSE3 uint64_t be = absl::big_endian::FromHost64(val); const auto kNibbleMask = _mm_set1_epi8(0xf); const auto kHexDigits = _mm_setr_epi8('0', '1', '2', '3', '4', '5', '6', '7', @@ -240,7 +262,7 @@ inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) { } #endif // | 0x1 so that even 0 has 1 digit. - return 16 - countl_zero(val | 0x1) / 4; + return 16 - static_cast(countl_zero(val | 0x1) / 4); } } // namespace numbers_internal @@ -260,6 +282,21 @@ ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str, return numbers_internal::safe_strtou128_base(str, out, 10); } +template +ABSL_MUST_USE_RESULT bool SimpleHexAtoi(absl::string_view str, int_type* out) { + return numbers_internal::safe_strtoi_base(str, out, 16); +} + +ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str, + absl::int128* out) { + return numbers_internal::safe_strto128_base(str, out, 16); +} + +ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str, + absl::uint128* out) { + return numbers_internal::safe_strtou128_base(str, out, 16); +} + ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers_test.cc index f3103106b4..b3c098d1a8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/numbers_test.cc @@ -19,6 +19,7 @@ #include #include // NOLINT(build/c++11) +#include #include #include #include @@ -47,6 +48,7 @@ namespace { using absl::SimpleAtoi; +using absl::SimpleHexAtoi; using absl::numbers_internal::kSixDigitsToBufferSize; using absl::numbers_internal::safe_strto32_base; using absl::numbers_internal::safe_strto64_base; @@ -387,9 +389,209 @@ TEST(NumbersTest, Atoi) { } TEST(NumbersTest, Atod) { + // DBL_TRUE_MIN and FLT_TRUE_MIN were not mandated in before C++17. +#if !defined(DBL_TRUE_MIN) + static constexpr double DBL_TRUE_MIN = + 4.940656458412465441765687928682213723650598026143247644255856825e-324; +#endif +#if !defined(FLT_TRUE_MIN) + static constexpr float FLT_TRUE_MIN = + 1.401298464324817070923729583289916131280261941876515771757068284e-45f; +#endif + double d; - EXPECT_TRUE(absl::SimpleAtod("nan", &d)); + float f; + + // NaN can be spelled in multiple ways. + EXPECT_TRUE(absl::SimpleAtod("NaN", &d)); EXPECT_TRUE(std::isnan(d)); + EXPECT_TRUE(absl::SimpleAtod("nAN", &d)); + EXPECT_TRUE(std::isnan(d)); + EXPECT_TRUE(absl::SimpleAtod("-nan", &d)); + EXPECT_TRUE(std::isnan(d)); + + // Likewise for Infinity. + EXPECT_TRUE(absl::SimpleAtod("inf", &d)); + EXPECT_TRUE(std::isinf(d) && (d > 0)); + EXPECT_TRUE(absl::SimpleAtod("+Infinity", &d)); + EXPECT_TRUE(std::isinf(d) && (d > 0)); + EXPECT_TRUE(absl::SimpleAtod("-INF", &d)); + EXPECT_TRUE(std::isinf(d) && (d < 0)); + + // Parse DBL_MAX. Parsing something more than twice as big should also + // produce infinity. + EXPECT_TRUE(absl::SimpleAtod("1.7976931348623157e+308", &d)); + EXPECT_EQ(d, 1.7976931348623157e+308); + EXPECT_TRUE(absl::SimpleAtod("5e308", &d)); + EXPECT_TRUE(std::isinf(d) && (d > 0)); + // Ditto, but for FLT_MAX. + EXPECT_TRUE(absl::SimpleAtof("3.4028234663852886e+38", &f)); + EXPECT_EQ(f, 3.4028234663852886e+38f); + EXPECT_TRUE(absl::SimpleAtof("7e38", &f)); + EXPECT_TRUE(std::isinf(f) && (f > 0)); + + // Parse the largest N such that parsing 1eN produces a finite value and the + // smallest M = N + 1 such that parsing 1eM produces infinity. + // + // The 309 exponent (and 39) confirms the "definition of + // kEiselLemireMaxExclExp10" comment in charconv.cc. + EXPECT_TRUE(absl::SimpleAtod("1e308", &d)); + EXPECT_EQ(d, 1e308); + EXPECT_FALSE(std::isinf(d)); + EXPECT_TRUE(absl::SimpleAtod("1e309", &d)); + EXPECT_TRUE(std::isinf(d)); + // Ditto, but for Atof instead of Atod. + EXPECT_TRUE(absl::SimpleAtof("1e38", &f)); + EXPECT_EQ(f, 1e38f); + EXPECT_FALSE(std::isinf(f)); + EXPECT_TRUE(absl::SimpleAtof("1e39", &f)); + EXPECT_TRUE(std::isinf(f)); + + // Parse the largest N such that parsing 9.999999999999999999eN, with 19 + // nines, produces a finite value. + // + // 9999999999999999999, with 19 nines but no decimal point, is the largest + // "repeated nines" integer that fits in a uint64_t. + EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e307", &d)); + EXPECT_EQ(d, 9.999999999999999999e307); + EXPECT_FALSE(std::isinf(d)); + EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e308", &d)); + EXPECT_TRUE(std::isinf(d)); + // Ditto, but for Atof instead of Atod. + EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e37", &f)); + EXPECT_EQ(f, 9.999999999999999999e37f); + EXPECT_FALSE(std::isinf(f)); + EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e38", &f)); + EXPECT_TRUE(std::isinf(f)); + + // Parse DBL_MIN (normal), DBL_TRUE_MIN (subnormal) and (DBL_TRUE_MIN / 10) + // (effectively zero). + EXPECT_TRUE(absl::SimpleAtod("2.2250738585072014e-308", &d)); + EXPECT_EQ(d, 2.2250738585072014e-308); + EXPECT_TRUE(absl::SimpleAtod("4.9406564584124654e-324", &d)); + EXPECT_EQ(d, 4.9406564584124654e-324); + EXPECT_TRUE(absl::SimpleAtod("4.9406564584124654e-325", &d)); + EXPECT_EQ(d, 0); + // Ditto, but for FLT_MIN, FLT_TRUE_MIN and (FLT_TRUE_MIN / 10). + EXPECT_TRUE(absl::SimpleAtof("1.1754943508222875e-38", &f)); + EXPECT_EQ(f, 1.1754943508222875e-38f); + EXPECT_TRUE(absl::SimpleAtof("1.4012984643248171e-45", &f)); + EXPECT_EQ(f, 1.4012984643248171e-45f); + EXPECT_TRUE(absl::SimpleAtof("1.4012984643248171e-46", &f)); + EXPECT_EQ(f, 0); + + // Parse the largest N (the most negative -N) such that parsing 1e-N produces + // a normal or subnormal (but still positive) or zero value. + EXPECT_TRUE(absl::SimpleAtod("1e-307", &d)); + EXPECT_EQ(d, 1e-307); + EXPECT_GE(d, DBL_MIN); + EXPECT_LT(d, DBL_MIN * 10); + EXPECT_TRUE(absl::SimpleAtod("1e-323", &d)); + EXPECT_EQ(d, 1e-323); + EXPECT_GE(d, DBL_TRUE_MIN); + EXPECT_LT(d, DBL_TRUE_MIN * 10); + EXPECT_TRUE(absl::SimpleAtod("1e-324", &d)); + EXPECT_EQ(d, 0); + // Ditto, but for Atof instead of Atod. + EXPECT_TRUE(absl::SimpleAtof("1e-37", &f)); + EXPECT_EQ(f, 1e-37f); + EXPECT_GE(f, FLT_MIN); + EXPECT_LT(f, FLT_MIN * 10); + EXPECT_TRUE(absl::SimpleAtof("1e-45", &f)); + EXPECT_EQ(f, 1e-45f); + EXPECT_GE(f, FLT_TRUE_MIN); + EXPECT_LT(f, FLT_TRUE_MIN * 10); + EXPECT_TRUE(absl::SimpleAtof("1e-46", &f)); + EXPECT_EQ(f, 0); + + // Parse the largest N (the most negative -N) such that parsing + // 9.999999999999999999e-N, with 19 nines, produces a normal or subnormal + // (but still positive) or zero value. + // + // 9999999999999999999, with 19 nines but no decimal point, is the largest + // "repeated nines" integer that fits in a uint64_t. + // + // The -324/-325 exponents (and -46/-47) confirms the "definition of + // kEiselLemireMinInclExp10" comment in charconv.cc. + EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-308", &d)); + EXPECT_EQ(d, 9.999999999999999999e-308); + EXPECT_GE(d, DBL_MIN); + EXPECT_LT(d, DBL_MIN * 10); + EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-324", &d)); + EXPECT_EQ(d, 9.999999999999999999e-324); + EXPECT_GE(d, DBL_TRUE_MIN); + EXPECT_LT(d, DBL_TRUE_MIN * 10); + EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-325", &d)); + EXPECT_EQ(d, 0); + // Ditto, but for Atof instead of Atod. + EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-38", &f)); + EXPECT_EQ(f, 9.999999999999999999e-38f); + EXPECT_GE(f, FLT_MIN); + EXPECT_LT(f, FLT_MIN * 10); + EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-46", &f)); + EXPECT_EQ(f, 9.999999999999999999e-46f); + EXPECT_GE(f, FLT_TRUE_MIN); + EXPECT_LT(f, FLT_TRUE_MIN * 10); + EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-47", &f)); + EXPECT_EQ(f, 0); + + // Leading and/or trailing whitespace is OK. + EXPECT_TRUE(absl::SimpleAtod(" \t\r\n 2.718", &d)); + EXPECT_EQ(d, 2.718); + EXPECT_TRUE(absl::SimpleAtod(" 3.141 ", &d)); + EXPECT_EQ(d, 3.141); + + // Leading or trailing not-whitespace is not OK. + EXPECT_FALSE(absl::SimpleAtod("n 0", &d)); + EXPECT_FALSE(absl::SimpleAtod("0n ", &d)); + + // Multiple leading 0s are OK. + EXPECT_TRUE(absl::SimpleAtod("000123", &d)); + EXPECT_EQ(d, 123); + EXPECT_TRUE(absl::SimpleAtod("000.456", &d)); + EXPECT_EQ(d, 0.456); + + // An absent leading 0 (for a fraction < 1) is OK. + EXPECT_TRUE(absl::SimpleAtod(".5", &d)); + EXPECT_EQ(d, 0.5); + EXPECT_TRUE(absl::SimpleAtod("-.707", &d)); + EXPECT_EQ(d, -0.707); + + // Unary + is OK. + EXPECT_TRUE(absl::SimpleAtod("+6.0221408e+23", &d)); + EXPECT_EQ(d, 6.0221408e+23); + + // Underscores are not OK. + EXPECT_FALSE(absl::SimpleAtod("123_456", &d)); + + // The decimal separator must be '.' and is never ','. + EXPECT_TRUE(absl::SimpleAtod("8.9", &d)); + EXPECT_FALSE(absl::SimpleAtod("8,9", &d)); + + // These examples are called out in the EiselLemire function's comments. + EXPECT_TRUE(absl::SimpleAtod("4503599627370497.5", &d)); + EXPECT_EQ(d, 4503599627370497.5); + EXPECT_TRUE(absl::SimpleAtod("1e+23", &d)); + EXPECT_EQ(d, 1e+23); + EXPECT_TRUE(absl::SimpleAtod("9223372036854775807", &d)); + EXPECT_EQ(d, 9223372036854775807); + // Ditto, but for Atof instead of Atod. + EXPECT_TRUE(absl::SimpleAtof("0.0625", &f)); + EXPECT_EQ(f, 0.0625f); + EXPECT_TRUE(absl::SimpleAtof("20040229.0", &f)); + EXPECT_EQ(f, 20040229.0f); + EXPECT_TRUE(absl::SimpleAtof("2147483647.0", &f)); + EXPECT_EQ(f, 2147483647.0f); + + // Some parsing algorithms don't always round correctly (but absl::SimpleAtod + // should). This test case comes from + // https://github.com/serde-rs/json/issues/707 + // + // See also atod_manual_test.cc for running many more test cases. + EXPECT_TRUE(absl::SimpleAtod("122.416294033786585", &d)); + EXPECT_EQ(d, 122.416294033786585); + EXPECT_TRUE(absl::SimpleAtof("122.416294033786585", &f)); + EXPECT_EQ(f, 122.416294033786585f); } TEST(NumbersTest, Prefixes) { @@ -468,6 +670,148 @@ TEST(NumbersTest, Atoenum) { VerifySimpleAtoiGood(E_biguint_max32, E_biguint_max32); } +template +void VerifySimpleHexAtoiGood(in_val_type in_value, int_type exp_value) { + std::string s; + // uint128 can be streamed but not StrCat'd + absl::strings_internal::OStringStream strm(&s); + if (in_value >= 0) { + strm << std::hex << in_value; + } else { + // Inefficient for small integers, but works with all integral types. + strm << "-" << std::hex << -absl::uint128(in_value); + } + int_type x = static_cast(~exp_value); + EXPECT_TRUE(SimpleHexAtoi(s, &x)) + << "in_value=" << std::hex << in_value << " s=" << s << " x=" << x; + EXPECT_EQ(exp_value, x); + x = static_cast(~exp_value); + EXPECT_TRUE(SimpleHexAtoi( + s.c_str(), &x)); // NOLINT: readability-redundant-string-conversions + EXPECT_EQ(exp_value, x); +} + +template +void VerifySimpleHexAtoiBad(in_val_type in_value) { + std::string s; + // uint128 can be streamed but not StrCat'd + absl::strings_internal::OStringStream strm(&s); + if (in_value >= 0) { + strm << std::hex << in_value; + } else { + // Inefficient for small integers, but works with all integral types. + strm << "-" << std::hex << -absl::uint128(in_value); + } + int_type x; + EXPECT_FALSE(SimpleHexAtoi(s, &x)); + EXPECT_FALSE(SimpleHexAtoi( + s.c_str(), &x)); // NOLINT: readability-redundant-string-conversions +} + +TEST(NumbersTest, HexAtoi) { + // SimpleHexAtoi(absl::string_view, int32_t) + VerifySimpleHexAtoiGood(0, 0); + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiGood(-0x42, -0x42); + + VerifySimpleHexAtoiGood(std::numeric_limits::min(), + std::numeric_limits::min()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + + // SimpleHexAtoi(absl::string_view, uint32_t) + VerifySimpleHexAtoiGood(0, 0); + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiBad(-0x42); + + VerifySimpleHexAtoiBad(std::numeric_limits::min()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiBad(std::numeric_limits::min()); + VerifySimpleHexAtoiBad(std::numeric_limits::max()); + VerifySimpleHexAtoiBad(std::numeric_limits::max()); + + // SimpleHexAtoi(absl::string_view, int64_t) + VerifySimpleHexAtoiGood(0, 0); + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiGood(-0x42, -0x42); + + VerifySimpleHexAtoiGood(std::numeric_limits::min(), + std::numeric_limits::min()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiGood(std::numeric_limits::min(), + std::numeric_limits::min()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiBad(std::numeric_limits::max()); + + // SimpleHexAtoi(absl::string_view, uint64_t) + VerifySimpleHexAtoiGood(0, 0); + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiBad(-0x42); + + VerifySimpleHexAtoiBad(std::numeric_limits::min()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiBad(std::numeric_limits::min()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + + // SimpleHexAtoi(absl::string_view, absl::uint128) + VerifySimpleHexAtoiGood(0, 0); + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiBad(-0x42); + + VerifySimpleHexAtoiBad(std::numeric_limits::min()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiBad(std::numeric_limits::min()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiGood(std::numeric_limits::max(), + std::numeric_limits::max()); + VerifySimpleHexAtoiGood( + std::numeric_limits::max(), + std::numeric_limits::max()); + + // Some other types + VerifySimpleHexAtoiGood(-0x42, -0x42); + VerifySimpleHexAtoiGood(-0x42, -0x42); + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiGood(-0x42, -0x42); + VerifySimpleHexAtoiGood(-0x42, -0x42); // NOLINT: runtime-int + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiGood(0x42, 0x42); + VerifySimpleHexAtoiGood(0x42, 0x42); + + // Number prefix + int32_t value; + EXPECT_TRUE(safe_strto32_base("0x34234324", &value, 16)); + EXPECT_EQ(0x34234324, value); + + EXPECT_TRUE(safe_strto32_base("0X34234324", &value, 16)); + EXPECT_EQ(0x34234324, value); + + // ASCII whitespace + EXPECT_TRUE(safe_strto32_base(" \t\n 34234324", &value, 16)); + EXPECT_EQ(0x34234324, value); + + EXPECT_TRUE(safe_strto32_base("34234324 \t\n ", &value, 16)); + EXPECT_EQ(0x34234324, value); +} + TEST(stringtest, safe_strto32_base) { int32_t value; EXPECT_TRUE(safe_strto32_base("0x34234324", &value, 16)); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat.cc index f4a77493a4..e5cb6d84e8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat.cc @@ -17,12 +17,15 @@ #include #include +#include #include #include +#include #include "absl/strings/ascii.h" #include "absl/strings/internal/resize_uninitialized.h" #include "absl/strings/numbers.h" +#include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -56,7 +59,7 @@ AlphaNum::AlphaNum(Dec dec) { *--writer = '0' + (value % 10); value /= 10; } - *--writer = '0' + value; + *--writer = '0' + static_cast(value); if (neg) *--writer = '-'; ptrdiff_t fillers = writer - minfill; @@ -73,7 +76,7 @@ AlphaNum::AlphaNum(Dec dec) { if (add_sign_again) *--writer = '-'; } - piece_ = absl::string_view(writer, end - writer); + piece_ = absl::string_view(writer, static_cast(end - writer)); } // ---------------------------------------------------------------------- diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat.h index a8a85c7322..5ee26db027 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat.h @@ -48,6 +48,40 @@ // `StrCat()` or `StrAppend()`. You may specify a minimum hex field width using // a `PadSpec` enum. // +// User-defined types can be formatted with the `AbslStringify()` customization +// point. The API relies on detecting an overload in the user-defined type's +// namespace of a free (non-member) `AbslStringify()` function as a definition +// (typically declared as a friend and implemented in-line. +// with the following signature: +// +// class MyClass { ... }; +// +// template +// void AbslStringify(Sink& sink, const MyClass& value); +// +// An `AbslStringify()` overload for a type should only be declared in the same +// file and namespace as said type. +// +// Note that `AbslStringify()` also supports use with `absl::StrFormat()` and +// `absl::Substitute()`. +// +// Example: +// +// struct Point { +// // To add formatting support to `Point`, we simply need to add a free +// // (non-member) function `AbslStringify()`. This method specifies how +// // Point should be printed when absl::StrCat() is called on it. You can add +// // such a free function using a friend declaration within the body of the +// // class. The sink parameter is a templated type to avoid requiring +// // dependencies. +// template friend void AbslStringify(Sink& +// sink, const Point& p) { +// absl::Format(&sink, "(%v, %v)", p.x, p.y); +// } +// +// int x; +// int y; +// }; // ----------------------------------------------------------------------------- #ifndef ABSL_STRINGS_STR_CAT_H_ @@ -57,9 +91,12 @@ #include #include #include +#include #include #include "absl/base/port.h" +#include "absl/strings/internal/has_absl_stringify.h" +#include "absl/strings/internal/stringify_sink.h" #include "absl/strings/numbers.h" #include "absl/strings/string_view.h" @@ -214,23 +251,29 @@ class AlphaNum { // A bool ctor would also convert incoming pointers (bletch). AlphaNum(int x) // NOLINT(runtime/explicit) - : piece_(digits_, - numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {} + : piece_(digits_, static_cast( + numbers_internal::FastIntToBuffer(x, digits_) - + &digits_[0])) {} AlphaNum(unsigned int x) // NOLINT(runtime/explicit) - : piece_(digits_, - numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {} + : piece_(digits_, static_cast( + numbers_internal::FastIntToBuffer(x, digits_) - + &digits_[0])) {} AlphaNum(long x) // NOLINT(*) - : piece_(digits_, - numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {} + : piece_(digits_, static_cast( + numbers_internal::FastIntToBuffer(x, digits_) - + &digits_[0])) {} AlphaNum(unsigned long x) // NOLINT(*) - : piece_(digits_, - numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {} + : piece_(digits_, static_cast( + numbers_internal::FastIntToBuffer(x, digits_) - + &digits_[0])) {} AlphaNum(long long x) // NOLINT(*) - : piece_(digits_, - numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {} + : piece_(digits_, static_cast( + numbers_internal::FastIntToBuffer(x, digits_) - + &digits_[0])) {} AlphaNum(unsigned long long x) // NOLINT(*) - : piece_(digits_, - numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {} + : piece_(digits_, static_cast( + numbers_internal::FastIntToBuffer(x, digits_) - + &digits_[0])) {} AlphaNum(float f) // NOLINT(runtime/explicit) : piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {} @@ -245,9 +288,17 @@ class AlphaNum { const strings_internal::AlphaNumBuffer& buf) : piece_(&buf.data[0], buf.size) {} - AlphaNum(const char* c_str) : piece_(c_str) {} // NOLINT(runtime/explicit) + AlphaNum(const char* c_str) // NOLINT(runtime/explicit) + : piece_(NullSafeStringView(c_str)) {} // NOLINT(runtime/explicit) AlphaNum(absl::string_view pc) : piece_(pc) {} // NOLINT(runtime/explicit) + template ::value>::type> + AlphaNum( // NOLINT(runtime/explicit) + const T& v, // NOLINT(runtime/explicit) + strings_internal::StringifySink&& sink = {}) // NOLINT(runtime/explicit) + : piece_(strings_internal::ExtractStringification(sink, v)) {} + template AlphaNum( // NOLINT(runtime/explicit) const std::basic_string, Allocator>& str) @@ -267,7 +318,8 @@ class AlphaNum { // This overload matches only scoped enums. template {} && !std::is_convertible{}>::type> + std::is_enum{} && !std::is_convertible{} && + !strings_internal::HasAbslStringify::value>::type> AlphaNum(T e) // NOLINT(runtime/explicit) : AlphaNum(static_cast::type>(e)) {} diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat_test.cc index f3770dc076..c3fb317023 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_cat_test.cc @@ -21,6 +21,7 @@ #include #include "gtest/gtest.h" +#include "absl/strings/str_format.h" #include "absl/strings/substitute.h" #ifdef __ANDROID__ @@ -210,6 +211,11 @@ TEST(StrCat, CornerCases) { EXPECT_EQ(result, ""); } +TEST(StrCat, NullConstCharPtr) { + const char* null = nullptr; + EXPECT_EQ(absl::StrCat("mon", null, "key"), "monkey"); +} + // A minimal allocator that uses malloc(). template struct Mallocator { @@ -607,4 +613,53 @@ TEST(Numbers, TestFunctionsMovedOverFromNumbersMain) { TestFastPrints(); } +struct PointStringify { + template + friend void AbslStringify(FormatSink& sink, const PointStringify& p) { + sink.Append("("); + sink.Append(absl::StrCat(p.x)); + sink.Append(", "); + sink.Append(absl::StrCat(p.y)); + sink.Append(")"); + } + + double x = 10.0; + double y = 20.0; +}; + +TEST(StrCat, AbslStringifyExample) { + PointStringify p; + EXPECT_EQ(absl::StrCat(p), "(10, 20)"); + EXPECT_EQ(absl::StrCat("a ", p, " z"), "a (10, 20) z"); +} + +struct PointStringifyUsingFormat { + template + friend void AbslStringify(FormatSink& sink, + const PointStringifyUsingFormat& p) { + absl::Format(&sink, "(%g, %g)", p.x, p.y); + } + + double x = 10.0; + double y = 20.0; +}; + +TEST(StrCat, AbslStringifyExampleUsingFormat) { + PointStringifyUsingFormat p; + EXPECT_EQ(absl::StrCat(p), "(10, 20)"); + EXPECT_EQ(absl::StrCat("a ", p, " z"), "a (10, 20) z"); +} + +enum class EnumWithStringify { Many = 0, Choices = 1 }; + +template +void AbslStringify(Sink& sink, EnumWithStringify e) { + absl::Format(&sink, "%s", e == EnumWithStringify::Many ? "Many" : "Choices"); +} + +TEST(StrCat, AbslStringifyWithEnum) { + const auto e = EnumWithStringify::Choices; + EXPECT_EQ(absl::StrCat(e), "Choices"); +} + } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_format.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_format.h index 4b05c70c23..f4c98f415b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_format.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_format.h @@ -191,7 +191,7 @@ class FormatCountCapture { // absl::StrFormat(formatString, "TheVillage", 6); // // A format string generally follows the POSIX syntax as used within the POSIX -// `printf` specification. +// `printf` specification. (Exceptions are noted below.) // // (See http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html.) // @@ -211,6 +211,10 @@ class FormatCountCapture { // * `n` for the special case of writing out the number of characters // written to this point. The resulting value must be captured within an // `absl::FormatCountCapture` type. +// * `v` for values using the default format for a deduced type. These deduced +// types include many of the primitive types denoted here as well as +// user-defined types containing the proper extensions. (See below for more +// information.) // // Implementation-defined behavior: // * A null pointer provided to "%s" or "%p" is output as "(nil)". @@ -239,6 +243,15 @@ class FormatCountCapture { // "%s%d%n", "hello", 123, absl::FormatCountCapture(&n)); // EXPECT_EQ(8, n); // +// NOTE: the `v` specifier (for "value") is a type specifier not present in the +// POSIX specification. %v will format values according to their deduced type. +// `v` uses `d` for signed integer values, `u` for unsigned integer values, `g` +// for floating point values, and formats boolean values as "true"/"false" +// (instead of 1 or 0 for booleans formatted using d). `const char*` is not +// supported; please use `std:string` and `string_view`. `char` is also not +// supported due to ambiguity of the type. This specifier does not support +// modifiers. +// // The `FormatSpec` intrinsically supports all of these fundamental C++ types: // // * Characters: `char`, `signed char`, `unsigned char` @@ -570,6 +583,41 @@ ABSL_MUST_USE_RESULT inline bool FormatUntyped( // StrFormat Extensions //------------------------------------------------------------------------------ // +// AbslStringify() +// +// A simpler customization API for formatting user-defined types using +// absl::StrFormat(). The API relies on detecting an overload in the +// user-defined type's namespace of a free (non-member) `AbslStringify()` +// function as a friend definition with the following signature: +// +// template +// void AbslStringify(Sink& sink, const X& value); +// +// An `AbslStringify()` overload for a type should only be declared in the same +// file and namespace as said type. +// +// Note that unlike with AbslFormatConvert(), AbslStringify() does not allow +// customization of allowed conversion characters. AbslStringify() uses `%v` as +// the underlying conversion specififer. Additionally, AbslStringify() supports +// use with absl::StrCat while AbslFormatConvert() does not. +// +// Example: +// +// struct Point { +// // To add formatting support to `Point`, we simply need to add a free +// // (non-member) function `AbslStringify()`. This method prints in the +// // request format using the underlying `%v` specifier. You can add such a +// // free function using a friend declaration within the body of the class. +// // The sink parameter is a templated type to avoid requiring dependencies. +// template +// friend void AbslStringify(Sink& sink, const Point& p) { +// absl::Format(&sink, "(%v, %v)", p.x, p.y); +// } +// +// int x; +// int y; +// }; +// // AbslFormatConvert() // // The StrFormat library provides a customization API for formatting @@ -616,9 +664,9 @@ ABSL_MUST_USE_RESULT inline bool FormatUntyped( // AbslFormatConvert(const Point& p, const absl::FormatConversionSpec& spec, // absl::FormatSink* s) { // if (spec.conversion_char() == absl::FormatConversionChar::s) { -// s->Append(absl::StrCat("x=", p.x, " y=", p.y)); +// absl::Format(s, "x=%vy=%v", p.x, p.y); // } else { -// s->Append(absl::StrCat(p.x, ",", p.y)); +// absl::Format(s, "%v,%v", p.x, p.y); // } // return {true}; // } @@ -637,7 +685,7 @@ enum class FormatConversionChar : uint8_t { c, s, // text d, i, o, u, x, X, // int f, F, e, E, g, G, a, A, // float - n, p // misc + n, p, v // misc }; // clang-format on @@ -757,6 +805,7 @@ enum class FormatConversionCharSet : uint64_t { // misc n = str_format_internal::FormatConversionCharToConvInt('n'), p = str_format_internal::FormatConversionCharToConvInt('p'), + v = str_format_internal::FormatConversionCharToConvInt('v'), // Used for width/precision '*' specification. kStar = static_cast( @@ -771,23 +820,36 @@ enum class FormatConversionCharSet : uint64_t { // FormatSink // -// An abstraction to which conversions write their string data. +// A format sink is a generic abstraction to which conversions may write their +// formatted string data. `absl::FormatConvert()` uses this sink to write its +// formatted string. // class FormatSink { public: - // Appends `count` copies of `ch`. + // FormatSink::Append() + // + // Appends `count` copies of `ch` to the format sink. void Append(size_t count, char ch) { sink_->Append(count, ch); } + // Overload of FormatSink::Append() for appending the characters of a string + // view to a format sink. void Append(string_view v) { sink_->Append(v); } - // Appends the first `precision` bytes of `v`. If this is less than - // `width`, spaces will be appended first (if `left` is false), or + // FormatSink::PutPaddedString() + // + // Appends `precision` number of bytes of `v` to the format sink. If this is + // less than `width`, spaces will be appended first (if `left` is false), or // after (if `left` is true) to ensure the total amount appended is // at least `width`. bool PutPaddedString(string_view v, int width, int precision, bool left) { return sink_->PutPaddedString(v, width, precision, left); } + // Support `absl::Format(&sink, format, args...)`. + friend void AbslFormatFlush(FormatSink* sink, absl::string_view v) { + sink->Append(v); + } + private: friend str_format_internal::FormatSinkImpl; explicit FormatSink(str_format_internal::FormatSinkImpl* s) : sink_(s) {} diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_format_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_format_test.cc index c60027ad29..2aa22b0d06 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_format_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_format_test.cc @@ -42,6 +42,18 @@ TEST_F(FormatEntryPointTest, Format) { EXPECT_TRUE(Format(&sink, pc, 123)); EXPECT_EQ("A format 123", sink); } + +TEST_F(FormatEntryPointTest, FormatWithV) { + std::string sink; + EXPECT_TRUE(Format(&sink, "A format %v", 123)); + EXPECT_EQ("A format 123", sink); + sink.clear(); + + ParsedFormat<'v'> pc("A format %v"); + EXPECT_TRUE(Format(&sink, pc, 123)); + EXPECT_EQ("A format 123", sink); +} + TEST_F(FormatEntryPointTest, UntypedFormat) { constexpr const char* formats[] = { "", @@ -84,6 +96,14 @@ TEST_F(FormatEntryPointTest, StringFormat) { EXPECT_EQ("=123=", StrFormat(view, 123)); } +TEST_F(FormatEntryPointTest, StringFormatV) { + std::string hello = "hello"; + EXPECT_EQ("hello", StrFormat("%v", hello)); + EXPECT_EQ("123", StrFormat("%v", 123)); + constexpr absl::string_view view("=%v=", 4); + EXPECT_EQ("=123=", StrFormat(view, 123)); +} + TEST_F(FormatEntryPointTest, AppendFormat) { std::string s; std::string& r = StrAppendFormat(&s, "%d", 123); @@ -91,6 +111,13 @@ TEST_F(FormatEntryPointTest, AppendFormat) { EXPECT_EQ("123", r); } +TEST_F(FormatEntryPointTest, AppendFormatWithV) { + std::string s; + std::string& r = StrAppendFormat(&s, "%v", 123); + EXPECT_EQ(&s, &r); // should be same object + EXPECT_EQ("123", r); +} + TEST_F(FormatEntryPointTest, AppendFormatFail) { std::string s = "orig"; @@ -103,6 +130,17 @@ TEST_F(FormatEntryPointTest, AppendFormatFail) { {&arg, 1})); } +TEST_F(FormatEntryPointTest, AppendFormatFailWithV) { + std::string s = "orig"; + + UntypedFormatSpec format(" more %v"); + FormatArgImpl arg("not an int"); + + EXPECT_EQ("orig", + str_format_internal::AppendPack( + &s, str_format_internal::UntypedFormatSpecImpl::Extract(format), + {&arg, 1})); +} TEST_F(FormatEntryPointTest, ManyArgs) { EXPECT_EQ("24", StrFormat("%24$d", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, @@ -123,6 +161,15 @@ TEST_F(FormatEntryPointTest, Preparsed) { EXPECT_EQ("=123=", StrFormat(ParsedFormat<'d'>(view), 123)); } +TEST_F(FormatEntryPointTest, PreparsedWithV) { + ParsedFormat<'v'> pc("%v"); + EXPECT_EQ("123", StrFormat(pc, 123)); + // rvalue ok? + EXPECT_EQ("123", StrFormat(ParsedFormat<'v'>("%v"), 123)); + constexpr absl::string_view view("=%v=", 4); + EXPECT_EQ("=123=", StrFormat(ParsedFormat<'v'>(view), 123)); +} + TEST_F(FormatEntryPointTest, FormatCountCapture) { int n = 0; EXPECT_EQ("", StrFormat("%n", FormatCountCapture(&n))); @@ -131,6 +178,14 @@ TEST_F(FormatEntryPointTest, FormatCountCapture) { EXPECT_EQ(3, n); } +TEST_F(FormatEntryPointTest, FormatCountCaptureWithV) { + int n = 0; + EXPECT_EQ("", StrFormat("%n", FormatCountCapture(&n))); + EXPECT_EQ(0, n); + EXPECT_EQ("123", StrFormat("%v%n", 123, FormatCountCapture(&n))); + EXPECT_EQ(3, n); +} + TEST_F(FormatEntryPointTest, FormatCountCaptureWrongType) { // Should reject int*. int n = 0; @@ -143,6 +198,18 @@ TEST_F(FormatEntryPointTest, FormatCountCaptureWrongType) { absl::MakeSpan(args))); } +TEST_F(FormatEntryPointTest, FormatCountCaptureWrongTypeWithV) { + // Should reject int*. + int n = 0; + UntypedFormatSpec format("%v%n"); + int i = 123, *ip = &n; + FormatArgImpl args[2] = {FormatArgImpl(i), FormatArgImpl(ip)}; + + EXPECT_EQ("", str_format_internal::FormatPack( + str_format_internal::UntypedFormatSpecImpl::Extract(format), + absl::MakeSpan(args))); +} + TEST_F(FormatEntryPointTest, FormatCountCaptureMultiple) { int n1 = 0; int n2 = 0; @@ -165,6 +232,21 @@ TEST_F(FormatEntryPointTest, FormatCountCaptureExample) { s); } +TEST_F(FormatEntryPointTest, FormatCountCaptureExampleWithV) { + int n; + std::string s; + std::string a1 = "(1,1)"; + std::string a2 = "(1,2)"; + std::string a3 = "(2,2)"; + StrAppendFormat(&s, "%v: %n%v\n", a1, FormatCountCapture(&n), a2); + StrAppendFormat(&s, "%*s%v\n", n, "", a3); + EXPECT_EQ(7, n); + EXPECT_EQ( + "(1,1): (1,2)\n" + " (2,2)\n", + s); +} + TEST_F(FormatEntryPointTest, Stream) { const std::string formats[] = { "", @@ -183,7 +265,7 @@ TEST_F(FormatEntryPointTest, Stream) { std::ostringstream oss; oss << StreamFormat(*parsed, 123, 3, 49, "multistreaming!!!", 1.01, 1.01); int fmt_result = snprintf(&*buf.begin(), buf.size(), fmt.c_str(), // - 123, 3, 49, "multistreaming!!!", 1.01, 1.01); + 123, 3, 49, "multistreaming!!!", 1.01, 1.01); ASSERT_TRUE(oss) << fmt; ASSERT_TRUE(fmt_result >= 0 && static_cast(fmt_result) < buf.size()) << fmt_result; @@ -191,6 +273,36 @@ TEST_F(FormatEntryPointTest, Stream) { } } +TEST_F(FormatEntryPointTest, StreamWithV) { + const std::string formats[] = { + "", + "a", + "%v %u %c %v %f %v", + }; + + const std::string formats_for_buf[] = { + "", + "a", + "%d %u %c %s %f %g", + }; + + std::string buf(4096, '\0'); + for (auto i = 0; i < ABSL_ARRAYSIZE(formats); ++i) { + const auto parsed = + ParsedFormat<'v', 'u', 'c', 'v', 'f', 'v'>::NewAllowIgnored(formats[i]); + std::ostringstream oss; + oss << StreamFormat(*parsed, 123, 3, 49, + absl::string_view("multistreaming!!!"), 1.01, 1.01); + int fmt_result = + snprintf(&*buf.begin(), buf.size(), formats_for_buf[i].c_str(), // + 123, 3, 49, "multistreaming!!!", 1.01, 1.01); + ASSERT_TRUE(oss) << formats[i]; + ASSERT_TRUE(fmt_result >= 0 && static_cast(fmt_result) < buf.size()) + << fmt_result; + EXPECT_EQ(buf.c_str(), oss.str()); + } +} + TEST_F(FormatEntryPointTest, StreamOk) { std::ostringstream oss; oss << StreamFormat("hello %d", 123); @@ -198,6 +310,13 @@ TEST_F(FormatEntryPointTest, StreamOk) { EXPECT_TRUE(oss.good()); } +TEST_F(FormatEntryPointTest, StreamOkWithV) { + std::ostringstream oss; + oss << StreamFormat("hello %v", 123); + EXPECT_EQ("hello 123", oss.str()); + EXPECT_TRUE(oss.good()); +} + TEST_F(FormatEntryPointTest, StreamFail) { std::ostringstream oss; UntypedFormatSpec format("hello %d"); @@ -208,6 +327,16 @@ TEST_F(FormatEntryPointTest, StreamFail) { EXPECT_TRUE(oss.fail()); } +TEST_F(FormatEntryPointTest, StreamFailWithV) { + std::ostringstream oss; + UntypedFormatSpec format("hello %v"); + FormatArgImpl arg("non-numeric"); + oss << str_format_internal::Streamable( + str_format_internal::UntypedFormatSpecImpl::Extract(format), {&arg, 1}); + EXPECT_EQ("hello ", oss.str()); // partial write + EXPECT_TRUE(oss.fail()); +} + std::string WithSnprintf(const char* fmt, ...) { std::string buf; buf.resize(128); @@ -249,6 +378,12 @@ TEST_F(FormatEntryPointTest, FormatStreamed) { EXPECT_EQ("123", StrFormat("%s", FormatStreamed(StreamFormat("%d", 123)))); } +TEST_F(FormatEntryPointTest, FormatStreamedWithV) { + EXPECT_EQ("123", StrFormat("%v", FormatStreamed(123))); + EXPECT_EQ("X", StrFormat("%v", FormatStreamed(streamed_test::X()))); + EXPECT_EQ("123", StrFormat("%v", FormatStreamed(StreamFormat("%d", 123)))); +} + // Helper class that creates a temporary file and exposes a FILE* to it. // It will close the file on destruction. class TempFile { @@ -284,6 +419,14 @@ TEST_F(FormatEntryPointTest, FPrintF) { EXPECT_EQ(tmp.ReadFile(), "STRING: ABC NUMBER: -000000019"); } +TEST_F(FormatEntryPointTest, FPrintFWithV) { + TempFile tmp; + int result = + FPrintF(tmp.file(), "STRING: %v NUMBER: %010d", std::string("ABC"), -19); + EXPECT_EQ(result, 30); + EXPECT_EQ(tmp.ReadFile(), "STRING: ABC NUMBER: -000000019"); +} + TEST_F(FormatEntryPointTest, FPrintFError) { errno = 0; int result = FPrintF(stdin, "ABC"); @@ -318,6 +461,23 @@ TEST_F(FormatEntryPointTest, PrintF) { EXPECT_EQ(result, 30); EXPECT_EQ(tmp.ReadFile(), "STRING: ABC NUMBER: -000000019"); } + +TEST_F(FormatEntryPointTest, PrintFWithV) { + int stdout_tmp = dup(STDOUT_FILENO); + + TempFile tmp; + std::fflush(stdout); + dup2(fileno(tmp.file()), STDOUT_FILENO); + + int result = PrintF("STRING: %v NUMBER: %010d", std::string("ABC"), -19); + + std::fflush(stdout); + dup2(stdout_tmp, STDOUT_FILENO); + close(stdout_tmp); + + EXPECT_EQ(result, 30); + EXPECT_EQ(tmp.ReadFile(), "STRING: ABC NUMBER: -000000019"); +} #endif // __GLIBC__ TEST_F(FormatEntryPointTest, SNPrintF) { @@ -347,9 +507,41 @@ TEST_F(FormatEntryPointTest, SNPrintF) { EXPECT_EQ(result, 37); } +TEST_F(FormatEntryPointTest, SNPrintFWithV) { + char buffer[16]; + int result = + SNPrintF(buffer, sizeof(buffer), "STRING: %v", std::string("ABC")); + EXPECT_EQ(result, 11); + EXPECT_EQ(std::string(buffer), "STRING: ABC"); + + result = SNPrintF(buffer, sizeof(buffer), "NUMBER: %v", 123456); + EXPECT_EQ(result, 14); + EXPECT_EQ(std::string(buffer), "NUMBER: 123456"); + + result = SNPrintF(buffer, sizeof(buffer), "NUMBER: %v", 1234567); + EXPECT_EQ(result, 15); + EXPECT_EQ(std::string(buffer), "NUMBER: 1234567"); + + result = SNPrintF(buffer, sizeof(buffer), "NUMBER: %v", 12345678); + EXPECT_EQ(result, 16); + EXPECT_EQ(std::string(buffer), "NUMBER: 1234567"); + + result = SNPrintF(buffer, sizeof(buffer), "NUMBER: %v", 123456789); + EXPECT_EQ(result, 17); + EXPECT_EQ(std::string(buffer), "NUMBER: 1234567"); + + std::string size = "size"; + + result = SNPrintF(nullptr, 0, "Just checking the %v of the output.", size); + EXPECT_EQ(result, 37); +} + TEST(StrFormat, BehavesAsDocumented) { std::string s = absl::StrFormat("%s, %d!", "Hello", 123); EXPECT_EQ("Hello, 123!", s); + std::string hello = "Hello"; + std::string s2 = absl::StrFormat("%v, %v!", hello, 123); + EXPECT_EQ("Hello, 123!", s2); // The format of a replacement is // '%'[position][flags][width['.'precision]][length_modifier][format] EXPECT_EQ(absl::StrFormat("%1$+3.2Lf", 1.1), "+1.10"); @@ -364,22 +556,31 @@ TEST(StrFormat, BehavesAsDocumented) { // "s" - string Eg: "C" -> "C", std::string("C++") -> "C++" // Formats std::string, char*, string_view, and Cord. EXPECT_EQ(StrFormat("%s", "C"), "C"); + EXPECT_EQ(StrFormat("%v", std::string("C")), "C"); EXPECT_EQ(StrFormat("%s", std::string("C++")), "C++"); + EXPECT_EQ(StrFormat("%v", std::string("C++")), "C++"); EXPECT_EQ(StrFormat("%s", string_view("view")), "view"); + EXPECT_EQ(StrFormat("%v", string_view("view")), "view"); EXPECT_EQ(StrFormat("%s", absl::Cord("cord")), "cord"); + EXPECT_EQ(StrFormat("%v", absl::Cord("cord")), "cord"); // Integral Conversion // These format integral types: char, int, long, uint64_t, etc. EXPECT_EQ(StrFormat("%d", char{10}), "10"); EXPECT_EQ(StrFormat("%d", int{10}), "10"); EXPECT_EQ(StrFormat("%d", long{10}), "10"); // NOLINT EXPECT_EQ(StrFormat("%d", uint64_t{10}), "10"); + EXPECT_EQ(StrFormat("%v", int{10}), "10"); + EXPECT_EQ(StrFormat("%v", long{10}), "10"); // NOLINT + EXPECT_EQ(StrFormat("%v", uint64_t{10}), "10"); // d,i - signed decimal Eg: -10 -> "-10" EXPECT_EQ(StrFormat("%d", -10), "-10"); EXPECT_EQ(StrFormat("%i", -10), "-10"); + EXPECT_EQ(StrFormat("%v", -10), "-10"); // o - octal Eg: 10 -> "12" EXPECT_EQ(StrFormat("%o", 10), "12"); // u - unsigned decimal Eg: 10 -> "10" EXPECT_EQ(StrFormat("%u", 10), "10"); + EXPECT_EQ(StrFormat("%v", 10), "10"); // x/X - lower,upper case hex Eg: 10 -> "a"/"A" EXPECT_EQ(StrFormat("%x", 10), "a"); EXPECT_EQ(StrFormat("%X", 10), "A"); @@ -404,6 +605,8 @@ TEST(StrFormat, BehavesAsDocumented) { EXPECT_EQ(StrFormat("%g", .01), "0.01"); EXPECT_EQ(StrFormat("%g", 1e10), "1e+10"); EXPECT_EQ(StrFormat("%G", 1e10), "1E+10"); + EXPECT_EQ(StrFormat("%v", .01), "0.01"); + EXPECT_EQ(StrFormat("%v", 1e10), "1e+10"); // a/A - lower,upper case hex Eg: -3.0 -> "-0x1.8p+1"/"-0X1.8P+1" // On Android platform <=21, there is a regression in hexfloat formatting. @@ -441,6 +644,11 @@ TEST(StrFormat, BehavesAsDocumented) { EXPECT_EQ(StrFormat("%zd", int{1}), "1"); EXPECT_EQ(StrFormat("%td", int{1}), "1"); EXPECT_EQ(StrFormat("%qd", int{1}), "1"); + + // Bool is handled correctly depending on whether %v is used + EXPECT_EQ(StrFormat("%v", true), "true"); + EXPECT_EQ(StrFormat("%v", false), "false"); + EXPECT_EQ(StrFormat("%d", true), "1"); } using str_format_internal::ExtendedParsedFormat; @@ -490,6 +698,15 @@ TEST_F(ParsedFormatTest, SimpleChecked) { SummarizeParsedFormat(ParsedFormat<'s', '*', 'd'>("%s %.*d"))); } +TEST_F(ParsedFormatTest, SimpleCheckedWithV) { + EXPECT_EQ("[ABC]{v:1$v}[DEF]", + SummarizeParsedFormat(ParsedFormat<'v'>("ABC%vDEF"))); + EXPECT_EQ("{v:1$v}[FFF]{v:2$v}[ZZZ]{f:3$f}", + SummarizeParsedFormat(ParsedFormat<'v', 'v', 'f'>("%vFFF%vZZZ%f"))); + EXPECT_EQ("{v:1$v}[ ]{.*d:3$.2$*d}", + SummarizeParsedFormat(ParsedFormat<'v', '*', 'd'>("%v %.*d"))); +} + TEST_F(ParsedFormatTest, SimpleUncheckedCorrect) { auto f = ParsedFormat<'d'>::New("ABC%dDEF"); ASSERT_TRUE(f); @@ -520,6 +737,23 @@ TEST_F(ParsedFormatTest, SimpleUncheckedCorrect) { SummarizeParsedFormat(*dollar)); } +TEST_F(ParsedFormatTest, SimpleUncheckedCorrectWithV) { + auto f = ParsedFormat<'v'>::New("ABC%vDEF"); + ASSERT_TRUE(f); + EXPECT_EQ("[ABC]{v:1$v}[DEF]", SummarizeParsedFormat(*f)); + + std::string format = "%vFFF%vZZZ%f"; + auto f2 = ParsedFormat<'v', 'v', 'f'>::New(format); + + ASSERT_TRUE(f2); + EXPECT_EQ("{v:1$v}[FFF]{v:2$v}[ZZZ]{f:3$f}", SummarizeParsedFormat(*f2)); + + f2 = ParsedFormat<'v', 'v', 'f'>::New("%v %v %f"); + + ASSERT_TRUE(f2); + EXPECT_EQ("{v:1$v}[ ]{v:2$v}[ ]{f:3$f}", SummarizeParsedFormat(*f2)); +} + TEST_F(ParsedFormatTest, SimpleUncheckedIgnoredArgs) { EXPECT_FALSE((ParsedFormat<'d', 's'>::New("ABC"))); EXPECT_FALSE((ParsedFormat<'d', 's'>::New("%dABC"))); @@ -535,6 +769,18 @@ TEST_F(ParsedFormatTest, SimpleUncheckedIgnoredArgs) { EXPECT_EQ("[ABC]{2$s:2$s}", SummarizeParsedFormat(*f)); } +TEST_F(ParsedFormatTest, SimpleUncheckedIgnoredArgsWithV) { + EXPECT_FALSE((ParsedFormat<'v', 'v'>::New("ABC"))); + EXPECT_FALSE((ParsedFormat<'v', 'v'>::New("%vABC"))); + EXPECT_FALSE((ParsedFormat<'v', 's'>::New("ABC%2$s"))); + auto f = ParsedFormat<'v', 'v'>::NewAllowIgnored("ABC"); + ASSERT_TRUE(f); + EXPECT_EQ("[ABC]", SummarizeParsedFormat(*f)); + f = ParsedFormat<'v', 'v'>::NewAllowIgnored("%vABC"); + ASSERT_TRUE(f); + EXPECT_EQ("{v:1$v}[ABC]", SummarizeParsedFormat(*f)); +} + TEST_F(ParsedFormatTest, SimpleUncheckedUnsupported) { EXPECT_FALSE(ParsedFormat<'d'>::New("%1$d %1$x")); EXPECT_FALSE(ParsedFormat<'x'>::New("%1$d %1$x")); @@ -549,6 +795,15 @@ TEST_F(ParsedFormatTest, SimpleUncheckedIncorrect) { EXPECT_FALSE((ParsedFormat<'s', 'd', 'g'>::New(format))); } +TEST_F(ParsedFormatTest, SimpleUncheckedIncorrectWithV) { + EXPECT_FALSE(ParsedFormat<'v'>::New("")); + + EXPECT_FALSE(ParsedFormat<'v'>::New("ABC%vDEF%v")); + + std::string format = "%vFFF%vZZZ%f"; + EXPECT_FALSE((ParsedFormat<'v', 'v', 'g'>::New(format))); +} + #if defined(__cpp_nontype_template_parameter_auto) template @@ -595,6 +850,23 @@ TEST_F(ParsedFormatTest, ExtendedTyping) { 's'>::New("%s%s"); ASSERT_TRUE(v4); } + +TEST_F(ParsedFormatTest, ExtendedTypingWithV) { + EXPECT_FALSE(ParsedFormat::New("")); + ASSERT_TRUE(ParsedFormat::New("%v")); + auto v1 = ParsedFormat<'v', absl::FormatConversionCharSet::v>::New("%v%v"); + ASSERT_TRUE(v1); + auto v2 = ParsedFormat::New("%v%v"); + ASSERT_TRUE(v2); + auto v3 = ParsedFormat::New("%v%v"); + ASSERT_TRUE(v3); + auto v4 = ParsedFormat::New("%v%v"); + ASSERT_TRUE(v4); +} #endif TEST_F(ParsedFormatTest, UncheckedCorrect) { @@ -638,6 +910,28 @@ TEST_F(ParsedFormatTest, UncheckedCorrect) { SummarizeParsedFormat(*dollar)); } +TEST_F(ParsedFormatTest, UncheckedCorrectWithV) { + auto f = + ExtendedParsedFormat::New("ABC%vDEF"); + ASSERT_TRUE(f); + EXPECT_EQ("[ABC]{v:1$v}[DEF]", SummarizeParsedFormat(*f)); + + std::string format = "%vFFF%vZZZ%f"; + auto f2 = ExtendedParsedFormat< + absl::FormatConversionCharSet::v, absl::FormatConversionCharSet::v, + absl::FormatConversionCharSet::kFloating>::New(format); + + ASSERT_TRUE(f2); + EXPECT_EQ("{v:1$v}[FFF]{v:2$v}[ZZZ]{f:3$f}", SummarizeParsedFormat(*f2)); + + f2 = ExtendedParsedFormat< + absl::FormatConversionCharSet::v, absl::FormatConversionCharSet::v, + absl::FormatConversionCharSet::kFloating>::New("%v %v %f"); + + ASSERT_TRUE(f2); + EXPECT_EQ("{v:1$v}[ ]{v:2$v}[ ]{f:3$f}", SummarizeParsedFormat(*f2)); +} + TEST_F(ParsedFormatTest, UncheckedIgnoredArgs) { EXPECT_FALSE( (ExtendedParsedFormat::New("ABC"))); + EXPECT_FALSE( + (ExtendedParsedFormat::New("%vABC"))); + EXPECT_FALSE((ExtendedParsedFormat:: + New("ABC%2$s"))); + auto f = ExtendedParsedFormat< + absl::FormatConversionCharSet::v, + absl::FormatConversionCharSet::v>::NewAllowIgnored("ABC"); + ASSERT_TRUE(f); + EXPECT_EQ("[ABC]", SummarizeParsedFormat(*f)); + f = ExtendedParsedFormat< + absl::FormatConversionCharSet::v, + absl::FormatConversionCharSet::v>::NewAllowIgnored("%vABC"); + ASSERT_TRUE(f); + EXPECT_EQ("{v:1$v}[ABC]", SummarizeParsedFormat(*f)); +} + TEST_F(ParsedFormatTest, UncheckedMultipleTypes) { auto dx = ExtendedParsedFormat::New(format))); } +TEST_F(ParsedFormatTest, UncheckedIncorrectWithV) { + EXPECT_FALSE(ExtendedParsedFormat::New("")); + + EXPECT_FALSE(ExtendedParsedFormat::New( + "ABC%vDEF%v")); + + std::string format = "%vFFF%vZZZ%f"; + EXPECT_FALSE( + (ExtendedParsedFormat::New(format))); +} + TEST_F(ParsedFormatTest, RegressionMixPositional) { EXPECT_FALSE( (ExtendedParsedFormat::New("%1$d %o"))); } +TEST_F(ParsedFormatTest, DisallowModifiersWithV) { + auto f = ParsedFormat<'v'>::New("ABC%80vDEF"); + EXPECT_EQ(f, nullptr); + + f = ParsedFormat<'v'>::New("ABC%0vDEF"); + EXPECT_EQ(f, nullptr); + + f = ParsedFormat<'v'>::New("ABC%.1vDEF"); + EXPECT_EQ(f, nullptr); +} + using FormatWrapperTest = ::testing::Test; // Plain wrapper for StrFormat. @@ -710,20 +1049,33 @@ TEST_F(FormatWrapperTest, ConstexprStringFormat) { EXPECT_EQ(WrappedFormat("%s there", "hello"), "hello there"); } +TEST_F(FormatWrapperTest, ConstexprStringFormatWithV) { + std::string hello = "hello"; + EXPECT_EQ(WrappedFormat("%v there", hello), "hello there"); +} + TEST_F(FormatWrapperTest, ParsedFormat) { ParsedFormat<'s'> format("%s there"); EXPECT_EQ(WrappedFormat(format, "hello"), "hello there"); } +TEST_F(FormatWrapperTest, ParsedFormatWithV) { + std::string hello = "hello"; + ParsedFormat<'v'> format("%v there"); + EXPECT_EQ(WrappedFormat(format, hello), "hello there"); +} + } // namespace ABSL_NAMESPACE_END } // namespace absl +namespace { using FormatExtensionTest = ::testing::Test; struct Point { friend absl::FormatConvertResult + absl::FormatConversionCharSet::kIntegral | + absl::FormatConversionCharSet::v> AbslFormatConvert(const Point& p, const absl::FormatConversionSpec& spec, absl::FormatSink* s) { if (spec.conversion_char() == absl::FormatConversionChar::s) { @@ -742,6 +1094,7 @@ TEST_F(FormatExtensionTest, AbslFormatConvertExample) { Point p; EXPECT_EQ(absl::StrFormat("a %s z", p), "a x=10 y=20 z"); EXPECT_EQ(absl::StrFormat("a %d z", p), "a 10,20 z"); + EXPECT_EQ(absl::StrFormat("a %v z", p), "a 10,20 z"); // Typed formatting will fail to compile an invalid format. // StrFormat("%f", p); // Does not compile. @@ -751,6 +1104,51 @@ TEST_F(FormatExtensionTest, AbslFormatConvertExample) { EXPECT_FALSE(absl::FormatUntyped(&actual, f1, {absl::FormatArg(p)})); } +struct PointStringify { + template + friend void AbslStringify(FormatSink& sink, const PointStringify& p) { + sink.Append(absl::StrCat("(", p.x, ", ", p.y, ")")); + } + + double x = 10.0; + double y = 20.0; +}; + +TEST_F(FormatExtensionTest, AbslStringifyExample) { + PointStringify p; + EXPECT_EQ(absl::StrFormat("a %v z", p), "a (10, 20) z"); +} + +struct PointStringifyUsingFormat { + template + friend void AbslStringify(FormatSink& sink, + const PointStringifyUsingFormat& p) { + absl::Format(&sink, "(%g, %g)", p.x, p.y); + } + + double x = 10.0; + double y = 20.0; +}; + +TEST_F(FormatExtensionTest, AbslStringifyExampleUsingFormat) { + PointStringifyUsingFormat p; + EXPECT_EQ(absl::StrFormat("a %v z", p), "a (10, 20) z"); +} + +enum class EnumWithStringify { Many = 0, Choices = 1 }; + +template +void AbslStringify(Sink& sink, EnumWithStringify e) { + absl::Format(&sink, "%s", e == EnumWithStringify::Many ? "Many" : "Choices"); +} + +TEST_F(FormatExtensionTest, AbslStringifyWithEnum) { + const auto e = EnumWithStringify::Choices; + EXPECT_EQ(absl::StrFormat("My choice is %v", e), "My choice is Choices"); +} + +} // namespace + // Some codegen thunks that we can use to easily dump the generated assembly for // different StrFormat calls. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_join.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_join.h index 33534536cf..ee5ae7efdf 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_join.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_join.h @@ -72,21 +72,15 @@ ABSL_NAMESPACE_BEGIN // functions. You may provide your own Formatter to enable `absl::StrJoin()` to // work with arbitrary types. // -// The following is an example of a custom Formatter that simply uses -// `std::to_string()` to format an integer as a std::string. +// The following is an example of a custom Formatter that uses +// `absl::FormatDuration` to join a list of `absl::Duration`s. // -// struct MyFormatter { -// void operator()(std::string* out, int i) const { -// out->append(std::to_string(i)); -// } -// }; -// -// You would use the above formatter by passing an instance of it as the final -// argument to `absl::StrJoin()`: -// -// std::vector v = {1, 2, 3, 4}; -// std::string s = absl::StrJoin(v, "-", MyFormatter()); -// EXPECT_EQ("1-2-3-4", s); +// std::vector v = {absl::Seconds(1), absl::Milliseconds(10)}; +// std::string s = +// absl::StrJoin(v, ", ", [](std::string* out, absl::Duration dur) { +// absl::StrAppend(out, absl::FormatDuration(dur)); +// }); +// EXPECT_EQ("1s, 10ms", s); // // The following standard formatters are provided within this file: // diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_join_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_join_test.cc index 2be6256e43..c986e863b6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_join_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_join_test.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" namespace { @@ -471,4 +473,136 @@ TEST(StrJoin, Tuple) { "-", absl::DereferenceFormatter(TestFormatter()))); } +// A minimal value type for `StrJoin` inputs. +// Used to ensure we do not excessively require more a specific type, such as a +// `string_view`. +// +// Anything that can be `data()` and `size()` is OK. +class TestValue { + public: + TestValue(const char* data, size_t size) : data_(data), size_(size) {} + const char* data() const { return data_; } + size_t size() const { return size_; } + + private: + const char* data_; + size_t size_; +}; + +// A minimal C++20 forward iterator, used to test that we do not impose +// excessive requirements on StrJoin inputs. +// +// The 2 main differences between pre-C++20 LegacyForwardIterator and the +// C++20 ForwardIterator are: +// 1. `operator->` is not required in C++20. +// 2. `operator*` result does not need to be an lvalue (a reference). +// +// The `operator->` requirement was removed on page 17 in: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1037r0.pdf +// +// See the `[iterator.requirements]` section of the C++ standard. +// +// The value type is a template parameter so that we can test the behaviour +// of `StrJoin` specializations, e.g. the NoFormatter specialization for +// `string_view`. +template +class TestIterator { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = ValueT; + using pointer = void; + using reference = const value_type&; + using difference_type = int; + + // `data` must outlive the result. + static TestIterator begin(const std::vector& data) { + return TestIterator(&data, 0); + } + + static TestIterator end(const std::vector& data) { + return TestIterator(nullptr, data.size()); + } + + bool operator==(const TestIterator& other) const { + return pos_ == other.pos_; + } + bool operator!=(const TestIterator& other) const { + return pos_ != other.pos_; + } + + // This deliberately returns a `prvalue`. + // The requirement to return a reference was removed in C++20. + value_type operator*() const { + return ValueT((*data_)[pos_].data(), (*data_)[pos_].size()); + } + + // `operator->()` is deliberately omitted. + // The requirement to provide it was removed in C++20. + + TestIterator& operator++() { + ++pos_; + return *this; + } + + TestIterator operator++(int) { + TestIterator result = *this; + ++(*this); + return result; + } + + TestIterator& operator--() { + --pos_; + return *this; + } + + TestIterator operator--(int) { + TestIterator result = *this; + --(*this); + return result; + } + + private: + TestIterator(const std::vector* data, size_t pos) + : data_(data), pos_(pos) {} + + const std::vector* data_; + size_t pos_; +}; + +template +class TestIteratorRange { + public: + // `data` must be non-null and must outlive the result. + explicit TestIteratorRange(const std::vector& data) + : begin_(TestIterator::begin(data)), + end_(TestIterator::end(data)) {} + + const TestIterator& begin() const { return begin_; } + const TestIterator& end() const { return end_; } + + private: + TestIterator begin_; + TestIterator end_; +}; + +TEST(StrJoin, TestIteratorRequirementsNoFormatter) { + const std::vector a = {"a", "b", "c"}; + + // When the value type is string-like (`std::string` or `string_view`), + // the NoFormatter template specialization is used internally. + EXPECT_EQ("a-b-c", + absl::StrJoin(TestIteratorRange(a), "-")); +} + +TEST(StrJoin, TestIteratorRequirementsCustomFormatter) { + const std::vector a = {"a", "b", "c"}; + EXPECT_EQ("a-b-c", + absl::StrJoin(TestIteratorRange(a), "-", + [](std::string* out, const TestValue& value) { + absl::StrAppend( + out, + absl::string_view(value.data(), value.size())); + })); +} + } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_split.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_split.h index bfbca422a8..7bbb68a343 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_split.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_split.h @@ -461,8 +461,7 @@ using EnableSplitIfString = // first two split strings become the `std::pair` `.first` and `.second` // members, respectively. The remaining split substrings are discarded. If there // are less than two split substrings, the empty string is used for the -// corresponding -// `std::pair` member. +// corresponding `std::pair` member. // // Example: // diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_split_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_split_test.cc index f472f9eda1..1b4427b849 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_split_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/str_split_test.cc @@ -943,8 +943,14 @@ TEST(Delimiter, ByLength) { } TEST(Split, WorksWithLargeStrings) { +#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER) + constexpr size_t kSize = (uint32_t{1} << 26) + 1; // 64M + 1 byte +#else + constexpr size_t kSize = (uint32_t{1} << 31) + 1; // 2G + 1 byte +#endif if (sizeof(size_t) > 4) { - std::string s((uint32_t{1} << 31) + 1, 'x'); // 2G + 1 byte + std::string s(kSize, 'x'); s.back() = '-'; std::vector v = absl::StrSplit(s, '-'); EXPECT_EQ(2, v.size()); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view.cc index d596e08cde..e2261625f9 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view.cc @@ -32,7 +32,7 @@ void WritePadding(std::ostream& o, size_t pad) { memset(fill_buf, o.fill(), sizeof(fill_buf)); while (pad) { size_t n = std::min(pad, sizeof(fill_buf)); - o.write(fill_buf, n); + o.write(fill_buf, static_cast(n)); pad -= n; } } @@ -63,7 +63,7 @@ std::ostream& operator<<(std::ostream& o, string_view piece) { size_t lpad = 0; size_t rpad = 0; if (static_cast(o.width()) > piece.size()) { - size_t pad = o.width() - piece.size(); + size_t pad = static_cast(o.width()) - piece.size(); if ((o.flags() & o.adjustfield) == o.left) { rpad = pad; } else { @@ -71,7 +71,7 @@ std::ostream& operator<<(std::ostream& o, string_view piece) { } } if (lpad) WritePadding(o, lpad); - o.write(piece.data(), piece.size()); + o.write(piece.data(), static_cast(piece.size())); if (rpad) WritePadding(o, rpad); o.width(0); } @@ -86,7 +86,7 @@ string_view::size_type string_view::find(string_view s, } const char* result = strings_internal::memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_); - return result ? result - ptr_ : npos; + return result ? static_cast(result - ptr_) : npos; } string_view::size_type string_view::find(char c, size_type pos) const noexcept { @@ -95,7 +95,7 @@ string_view::size_type string_view::find(char c, size_type pos) const noexcept { } const char* result = static_cast(memchr(ptr_ + pos, c, length_ - pos)); - return result != nullptr ? result - ptr_ : npos; + return result != nullptr ? static_cast(result - ptr_) : npos; } string_view::size_type string_view::rfind(string_view s, @@ -104,7 +104,7 @@ string_view::size_type string_view::rfind(string_view s, if (s.empty()) return std::min(length_, pos); const char* last = ptr_ + std::min(length_ - s.length_, pos) + s.length_; const char* result = std::find_end(ptr_, last, s.ptr_, s.ptr_ + s.length_); - return result != last ? result - ptr_ : npos; + return result != last ? static_cast(result - ptr_) : npos; } // Search range is [0..pos] inclusive. If pos == npos, search everything. @@ -207,22 +207,11 @@ string_view::size_type string_view::find_last_not_of( return npos; } -// MSVC has non-standard behavior that implicitly creates definitions for static -// const members. These implicit definitions conflict with explicit out-of-class -// member definitions that are required by the C++ standard, resulting in -// LNK1169 "multiply defined" errors at link time. __declspec(selectany) asks -// MSVC to choose only one definition for the symbol it decorates. See details -// at https://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx -#ifdef _MSC_VER -#define ABSL_STRING_VIEW_SELECTANY __declspec(selectany) -#else -#define ABSL_STRING_VIEW_SELECTANY -#endif -ABSL_STRING_VIEW_SELECTANY +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr string_view::size_type string_view::npos; -ABSL_STRING_VIEW_SELECTANY constexpr string_view::size_type string_view::kMaxSize; +#endif ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view.h index ea760526da..eae11b2ab6 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view.h @@ -55,19 +55,14 @@ ABSL_NAMESPACE_END #else // ABSL_USES_STD_STRING_VIEW -#if ABSL_HAVE_BUILTIN(__builtin_memcmp) || \ - (defined(__GNUC__) && !defined(__clang__)) +#if ABSL_HAVE_BUILTIN(__builtin_memcmp) || \ + (defined(__GNUC__) && !defined(__clang__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1928) #define ABSL_INTERNAL_STRING_VIEW_MEMCMP __builtin_memcmp #else // ABSL_HAVE_BUILTIN(__builtin_memcmp) #define ABSL_INTERNAL_STRING_VIEW_MEMCMP memcmp #endif // ABSL_HAVE_BUILTIN(__builtin_memcmp) -#if defined(__cplusplus) && __cplusplus >= 201402L -#define ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR constexpr -#else -#define ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR -#endif - namespace absl { ABSL_NAMESPACE_BEGIN @@ -340,7 +335,7 @@ class string_view { // // Removes the first `n` characters from the `string_view`. Note that the // underlying string is not changed, only the view. - ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR void remove_prefix(size_type n) { + constexpr void remove_prefix(size_type n) { ABSL_HARDENING_ASSERT(n <= length_); ptr_ += n; length_ -= n; @@ -350,7 +345,7 @@ class string_view { // // Removes the last `n` characters from the `string_view`. Note that the // underlying string is not changed, only the view. - ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR void remove_suffix(size_type n) { + constexpr void remove_suffix(size_type n) { ABSL_HARDENING_ASSERT(n <= length_); length_ -= n; } @@ -358,7 +353,7 @@ class string_view { // string_view::swap() // // Swaps this `string_view` with another `string_view`. - ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR void swap(string_view& s) noexcept { + constexpr void swap(string_view& s) noexcept { auto t = *this; *this = s; s = t; @@ -596,7 +591,7 @@ class string_view { } private: - // The constructor from std::string delegates to this constuctor. + // The constructor from std::string delegates to this constructor. // See the comment on that constructor for the rationale. struct SkipCheckLengthTag {}; string_view(const char* data, size_type len, SkipCheckLengthTag) noexcept @@ -677,7 +672,6 @@ std::ostream& operator<<(std::ostream& o, string_view piece); ABSL_NAMESPACE_END } // namespace absl -#undef ABSL_INTERNAL_STRING_VIEW_CXX14_CONSTEXPR #undef ABSL_INTERNAL_STRING_VIEW_MEMCMP #endif // ABSL_USES_STD_STRING_VIEW diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view_test.cc index 2c13dd1c14..990c211a8e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/string_view_test.cc @@ -82,7 +82,7 @@ TEST(StringViewTest, Ctor) { // Null. absl::string_view s10; EXPECT_TRUE(s10.data() == nullptr); - EXPECT_EQ(0, s10.length()); + EXPECT_EQ(0u, s10.length()); } { @@ -90,17 +90,17 @@ TEST(StringViewTest, Ctor) { const char* hello = "hello"; absl::string_view s20(hello); EXPECT_TRUE(s20.data() == hello); - EXPECT_EQ(5, s20.length()); + EXPECT_EQ(5u, s20.length()); // const char* with length. absl::string_view s21(hello, 4); EXPECT_TRUE(s21.data() == hello); - EXPECT_EQ(4, s21.length()); + EXPECT_EQ(4u, s21.length()); // Not recommended, but valid C++ absl::string_view s22(hello, 6); EXPECT_TRUE(s22.data() == hello); - EXPECT_EQ(6, s22.length()); + EXPECT_EQ(6u, s22.length()); } { @@ -108,7 +108,7 @@ TEST(StringViewTest, Ctor) { std::string hola = "hola"; absl::string_view s30(hola); EXPECT_TRUE(s30.data() == hola.data()); - EXPECT_EQ(4, s30.length()); + EXPECT_EQ(4u, s30.length()); // std::string with embedded '\0'. hola.push_back('\0'); @@ -116,7 +116,7 @@ TEST(StringViewTest, Ctor) { hola.push_back('\0'); absl::string_view s31(hola); EXPECT_TRUE(s31.data() == hola.data()); - EXPECT_EQ(8, s31.length()); + EXPECT_EQ(8u, s31.length()); } { @@ -165,7 +165,7 @@ TEST(StringViewTest, STLComparator) { map.insert(std::make_pair(p1, 0)); map.insert(std::make_pair(p2, 1)); map.insert(std::make_pair(p3, 2)); - EXPECT_EQ(map.size(), 3); + EXPECT_EQ(map.size(), 3u); TestMap::const_iterator iter = map.begin(); EXPECT_EQ(iter->second, 1); @@ -183,7 +183,7 @@ TEST(StringViewTest, STLComparator) { EXPECT_TRUE(new_iter != map.end()); map.erase(new_iter); - EXPECT_EQ(map.size(), 2); + EXPECT_EQ(map.size(), 2u); iter = map.begin(); EXPECT_EQ(iter->second, 2); @@ -261,11 +261,11 @@ TEST(StringViewTest, ComparisonOperators) { TEST(StringViewTest, ComparisonOperatorsByCharacterPosition) { std::string x; - for (int i = 0; i < 256; i++) { + for (size_t i = 0; i < 256; i++) { x += 'a'; std::string y = x; COMPARE(true, ==, x, y); - for (int j = 0; j < i; j++) { + for (size_t j = 0; j < i; j++) { std::string z = x; z[j] = 'b'; // Differs in position 'j' COMPARE(false, ==, x, z); @@ -341,12 +341,12 @@ TEST(StringViewTest, STL1) { EXPECT_EQ(*(c.rend() - 1), 'x'); EXPECT_TRUE(a.rbegin() + 26 == a.rend()); - EXPECT_EQ(a.size(), 26); - EXPECT_EQ(b.size(), 3); - EXPECT_EQ(c.size(), 3); - EXPECT_EQ(d.size(), 6); - EXPECT_EQ(e.size(), 0); - EXPECT_EQ(f.size(), 7); + EXPECT_EQ(a.size(), 26u); + EXPECT_EQ(b.size(), 3u); + EXPECT_EQ(c.size(), 3u); + EXPECT_EQ(d.size(), 6u); + EXPECT_EQ(e.size(), 0u); + EXPECT_EQ(f.size(), 7u); EXPECT_TRUE(!d.empty()); EXPECT_TRUE(d.begin() != d.end()); @@ -356,17 +356,17 @@ TEST(StringViewTest, STL1) { EXPECT_TRUE(e.begin() == e.end()); char buf[4] = { '%', '%', '%', '%' }; - EXPECT_EQ(a.copy(buf, 4), 4); + EXPECT_EQ(a.copy(buf, 4), 4u); EXPECT_EQ(buf[0], a[0]); EXPECT_EQ(buf[1], a[1]); EXPECT_EQ(buf[2], a[2]); EXPECT_EQ(buf[3], a[3]); - EXPECT_EQ(a.copy(buf, 3, 7), 3); + EXPECT_EQ(a.copy(buf, 3, 7), 3u); EXPECT_EQ(buf[0], a[7]); EXPECT_EQ(buf[1], a[8]); EXPECT_EQ(buf[2], a[9]); EXPECT_EQ(buf[3], a[3]); - EXPECT_EQ(c.copy(buf, 99), 3); + EXPECT_EQ(c.copy(buf, 99), 3u); EXPECT_EQ(buf[0], c[0]); EXPECT_EQ(buf[1], c[1]); EXPECT_EQ(buf[2], c[2]); @@ -393,22 +393,22 @@ TEST(StringViewTest, STL2) { 7); d = absl::string_view(); - EXPECT_EQ(d.size(), 0); + EXPECT_EQ(d.size(), 0u); EXPECT_TRUE(d.empty()); EXPECT_TRUE(d.data() == nullptr); EXPECT_TRUE(d.begin() == d.end()); - EXPECT_EQ(a.find(b), 0); + EXPECT_EQ(a.find(b), 0u); EXPECT_EQ(a.find(b, 1), absl::string_view::npos); - EXPECT_EQ(a.find(c), 23); - EXPECT_EQ(a.find(c, 9), 23); + EXPECT_EQ(a.find(c), 23u); + EXPECT_EQ(a.find(c, 9), 23u); EXPECT_EQ(a.find(c, absl::string_view::npos), absl::string_view::npos); EXPECT_EQ(b.find(c), absl::string_view::npos); EXPECT_EQ(b.find(c, absl::string_view::npos), absl::string_view::npos); - EXPECT_EQ(a.find(d), 0); - EXPECT_EQ(a.find(e), 0); - EXPECT_EQ(a.find(d, 12), 12); - EXPECT_EQ(a.find(e, 17), 17); + EXPECT_EQ(a.find(d), 0u); + EXPECT_EQ(a.find(e), 0u); + EXPECT_EQ(a.find(d, 12), 12u); + EXPECT_EQ(a.find(e, 17), 17u); absl::string_view g("xx not found bb"); EXPECT_EQ(a.find(g), absl::string_view::npos); // empty string nonsense @@ -427,17 +427,17 @@ TEST(StringViewTest, STL2) { EXPECT_EQ(e.find(d, 4), std::string().find(std::string(), 4)); EXPECT_EQ(e.find(e, 4), std::string().find(std::string(), 4)); - EXPECT_EQ(a.find('a'), 0); - EXPECT_EQ(a.find('c'), 2); - EXPECT_EQ(a.find('z'), 25); + EXPECT_EQ(a.find('a'), 0u); + EXPECT_EQ(a.find('c'), 2u); + EXPECT_EQ(a.find('z'), 25u); EXPECT_EQ(a.find('$'), absl::string_view::npos); EXPECT_EQ(a.find('\0'), absl::string_view::npos); - EXPECT_EQ(f.find('\0'), 3); - EXPECT_EQ(f.find('3'), 2); - EXPECT_EQ(f.find('5'), 5); - EXPECT_EQ(g.find('o'), 4); - EXPECT_EQ(g.find('o', 4), 4); - EXPECT_EQ(g.find('o', 5), 8); + EXPECT_EQ(f.find('\0'), 3u); + EXPECT_EQ(f.find('3'), 2u); + EXPECT_EQ(f.find('5'), 5u); + EXPECT_EQ(g.find('o'), 4u); + EXPECT_EQ(g.find('o', 4), 4u); + EXPECT_EQ(g.find('o', 5), 8u); EXPECT_EQ(a.find('b', 5), absl::string_view::npos); // empty string nonsense EXPECT_EQ(d.find('\0'), absl::string_view::npos); @@ -449,8 +449,8 @@ TEST(StringViewTest, STL2) { EXPECT_EQ(d.find('x', 4), absl::string_view::npos); EXPECT_EQ(e.find('x', 7), absl::string_view::npos); - EXPECT_EQ(a.find(b.data(), 1, 0), 1); - EXPECT_EQ(a.find(c.data(), 9, 0), 9); + EXPECT_EQ(a.find(b.data(), 1, 0), 1u); + EXPECT_EQ(a.find(c.data(), 9, 0), 9u); EXPECT_EQ(a.find(c.data(), absl::string_view::npos, 0), absl::string_view::npos); EXPECT_EQ(b.find(c.data(), absl::string_view::npos, 0), @@ -460,16 +460,16 @@ TEST(StringViewTest, STL2) { EXPECT_EQ(e.find(b.data(), 7, 0), absl::string_view::npos); EXPECT_EQ(a.find(b.data(), 1), absl::string_view::npos); - EXPECT_EQ(a.find(c.data(), 9), 23); + EXPECT_EQ(a.find(c.data(), 9), 23u); EXPECT_EQ(a.find(c.data(), absl::string_view::npos), absl::string_view::npos); EXPECT_EQ(b.find(c.data(), absl::string_view::npos), absl::string_view::npos); // empty string nonsense EXPECT_EQ(d.find(b.data(), 4), absl::string_view::npos); EXPECT_EQ(e.find(b.data(), 7), absl::string_view::npos); - EXPECT_EQ(a.rfind(b), 0); - EXPECT_EQ(a.rfind(b, 1), 0); - EXPECT_EQ(a.rfind(c), 23); + EXPECT_EQ(a.rfind(b), 0u); + EXPECT_EQ(a.rfind(b, 1), 0u); + EXPECT_EQ(a.rfind(c), 23u); EXPECT_EQ(a.rfind(c, 22), absl::string_view::npos); EXPECT_EQ(a.rfind(c, 1), absl::string_view::npos); EXPECT_EQ(a.rfind(c, 0), absl::string_view::npos); @@ -477,8 +477,8 @@ TEST(StringViewTest, STL2) { EXPECT_EQ(b.rfind(c, 0), absl::string_view::npos); EXPECT_EQ(a.rfind(d), std::string(a).rfind(std::string())); EXPECT_EQ(a.rfind(e), std::string(a).rfind(std::string())); - EXPECT_EQ(a.rfind(d, 12), 12); - EXPECT_EQ(a.rfind(e, 17), 17); + EXPECT_EQ(a.rfind(d, 12), 12u); + EXPECT_EQ(a.rfind(e, 17), 17u); EXPECT_EQ(a.rfind(g), absl::string_view::npos); EXPECT_EQ(d.rfind(b), absl::string_view::npos); EXPECT_EQ(e.rfind(b), absl::string_view::npos); @@ -494,28 +494,28 @@ TEST(StringViewTest, STL2) { EXPECT_EQ(d.rfind(e), std::string().rfind(std::string())); EXPECT_EQ(e.rfind(e), std::string().rfind(std::string())); - EXPECT_EQ(g.rfind('o'), 8); + EXPECT_EQ(g.rfind('o'), 8u); EXPECT_EQ(g.rfind('q'), absl::string_view::npos); - EXPECT_EQ(g.rfind('o', 8), 8); - EXPECT_EQ(g.rfind('o', 7), 4); + EXPECT_EQ(g.rfind('o', 8), 8u); + EXPECT_EQ(g.rfind('o', 7), 4u); EXPECT_EQ(g.rfind('o', 3), absl::string_view::npos); - EXPECT_EQ(f.rfind('\0'), 3); - EXPECT_EQ(f.rfind('\0', 12), 3); - EXPECT_EQ(f.rfind('3'), 2); - EXPECT_EQ(f.rfind('5'), 5); + EXPECT_EQ(f.rfind('\0'), 3u); + EXPECT_EQ(f.rfind('\0', 12), 3u); + EXPECT_EQ(f.rfind('3'), 2u); + EXPECT_EQ(f.rfind('5'), 5u); // empty string nonsense EXPECT_EQ(d.rfind('o'), absl::string_view::npos); EXPECT_EQ(e.rfind('o'), absl::string_view::npos); EXPECT_EQ(d.rfind('o', 4), absl::string_view::npos); EXPECT_EQ(e.rfind('o', 7), absl::string_view::npos); - EXPECT_EQ(a.rfind(b.data(), 1, 0), 1); - EXPECT_EQ(a.rfind(c.data(), 22, 0), 22); - EXPECT_EQ(a.rfind(c.data(), 1, 0), 1); - EXPECT_EQ(a.rfind(c.data(), 0, 0), 0); - EXPECT_EQ(b.rfind(c.data(), 0, 0), 0); - EXPECT_EQ(d.rfind(b.data(), 4, 0), 0); - EXPECT_EQ(e.rfind(b.data(), 7, 0), 0); + EXPECT_EQ(a.rfind(b.data(), 1, 0), 1u); + EXPECT_EQ(a.rfind(c.data(), 22, 0), 22u); + EXPECT_EQ(a.rfind(c.data(), 1, 0), 1u); + EXPECT_EQ(a.rfind(c.data(), 0, 0), 0u); + EXPECT_EQ(b.rfind(c.data(), 0, 0), 0u); + EXPECT_EQ(d.rfind(b.data(), 4, 0), 0u); + EXPECT_EQ(e.rfind(b.data(), 7, 0), 0u); } // Continued from STL2 @@ -533,18 +533,18 @@ TEST(StringViewTest, STL2FindFirst) { absl::string_view g("xx not found bb"); d = absl::string_view(); - EXPECT_EQ(a.find_first_of(b), 0); - EXPECT_EQ(a.find_first_of(b, 0), 0); - EXPECT_EQ(a.find_first_of(b, 1), 1); - EXPECT_EQ(a.find_first_of(b, 2), 2); + EXPECT_EQ(a.find_first_of(b), 0u); + EXPECT_EQ(a.find_first_of(b, 0), 0u); + EXPECT_EQ(a.find_first_of(b, 1), 1u); + EXPECT_EQ(a.find_first_of(b, 2), 2u); EXPECT_EQ(a.find_first_of(b, 3), absl::string_view::npos); - EXPECT_EQ(a.find_first_of(c), 23); - EXPECT_EQ(a.find_first_of(c, 23), 23); - EXPECT_EQ(a.find_first_of(c, 24), 24); - EXPECT_EQ(a.find_first_of(c, 25), 25); + EXPECT_EQ(a.find_first_of(c), 23u); + EXPECT_EQ(a.find_first_of(c, 23), 23u); + EXPECT_EQ(a.find_first_of(c, 24), 24u); + EXPECT_EQ(a.find_first_of(c, 25), 25u); EXPECT_EQ(a.find_first_of(c, 26), absl::string_view::npos); - EXPECT_EQ(g.find_first_of(b), 13); - EXPECT_EQ(g.find_first_of(c), 0); + EXPECT_EQ(g.find_first_of(b), 13u); + EXPECT_EQ(g.find_first_of(c), 0u); EXPECT_EQ(a.find_first_of(f), absl::string_view::npos); EXPECT_EQ(f.find_first_of(a), absl::string_view::npos); // empty string nonsense @@ -557,19 +557,19 @@ TEST(StringViewTest, STL2FindFirst) { EXPECT_EQ(d.find_first_of(e), absl::string_view::npos); EXPECT_EQ(e.find_first_of(e), absl::string_view::npos); - EXPECT_EQ(a.find_first_not_of(b), 3); - EXPECT_EQ(a.find_first_not_of(c), 0); + EXPECT_EQ(a.find_first_not_of(b), 3u); + EXPECT_EQ(a.find_first_not_of(c), 0u); EXPECT_EQ(b.find_first_not_of(a), absl::string_view::npos); EXPECT_EQ(c.find_first_not_of(a), absl::string_view::npos); - EXPECT_EQ(f.find_first_not_of(a), 0); - EXPECT_EQ(a.find_first_not_of(f), 0); - EXPECT_EQ(a.find_first_not_of(d), 0); - EXPECT_EQ(a.find_first_not_of(e), 0); + EXPECT_EQ(f.find_first_not_of(a), 0u); + EXPECT_EQ(a.find_first_not_of(f), 0u); + EXPECT_EQ(a.find_first_not_of(d), 0u); + EXPECT_EQ(a.find_first_not_of(e), 0u); // empty string nonsense - EXPECT_EQ(a.find_first_not_of(d), 0); - EXPECT_EQ(a.find_first_not_of(e), 0); - EXPECT_EQ(a.find_first_not_of(d, 1), 1); - EXPECT_EQ(a.find_first_not_of(e, 1), 1); + EXPECT_EQ(a.find_first_not_of(d), 0u); + EXPECT_EQ(a.find_first_not_of(e), 0u); + EXPECT_EQ(a.find_first_not_of(d, 1), 1u); + EXPECT_EQ(a.find_first_not_of(e, 1), 1u); EXPECT_EQ(a.find_first_not_of(d, a.size() - 1), a.size() - 1); EXPECT_EQ(a.find_first_not_of(e, a.size() - 1), a.size() - 1); EXPECT_EQ(a.find_first_not_of(d, a.size()), absl::string_view::npos); @@ -588,11 +588,11 @@ TEST(StringViewTest, STL2FindFirst) { absl::string_view h("===="); EXPECT_EQ(h.find_first_not_of('='), absl::string_view::npos); EXPECT_EQ(h.find_first_not_of('=', 3), absl::string_view::npos); - EXPECT_EQ(h.find_first_not_of('\0'), 0); - EXPECT_EQ(g.find_first_not_of('x'), 2); - EXPECT_EQ(f.find_first_not_of('\0'), 0); - EXPECT_EQ(f.find_first_not_of('\0', 3), 4); - EXPECT_EQ(f.find_first_not_of('\0', 2), 2); + EXPECT_EQ(h.find_first_not_of('\0'), 0u); + EXPECT_EQ(g.find_first_not_of('x'), 2u); + EXPECT_EQ(f.find_first_not_of('\0'), 0u); + EXPECT_EQ(f.find_first_not_of('\0', 3), 4u); + EXPECT_EQ(f.find_first_not_of('\0', 2), 2u); // empty string nonsense EXPECT_EQ(d.find_first_not_of('x'), absl::string_view::npos); EXPECT_EQ(e.find_first_not_of('x'), absl::string_view::npos); @@ -618,20 +618,20 @@ TEST(StringViewTest, STL2FindLast) { d = absl::string_view(); EXPECT_EQ(h.find_last_of(a), absl::string_view::npos); - EXPECT_EQ(g.find_last_of(a), g.size()-1); - EXPECT_EQ(a.find_last_of(b), 2); - EXPECT_EQ(a.find_last_of(c), a.size()-1); - EXPECT_EQ(f.find_last_of(i), 6); - EXPECT_EQ(a.find_last_of('a'), 0); - EXPECT_EQ(a.find_last_of('b'), 1); - EXPECT_EQ(a.find_last_of('z'), 25); - EXPECT_EQ(a.find_last_of('a', 5), 0); - EXPECT_EQ(a.find_last_of('b', 5), 1); + EXPECT_EQ(g.find_last_of(a), g.size() - 1); + EXPECT_EQ(a.find_last_of(b), 2u); + EXPECT_EQ(a.find_last_of(c), a.size() - 1); + EXPECT_EQ(f.find_last_of(i), 6u); + EXPECT_EQ(a.find_last_of('a'), 0u); + EXPECT_EQ(a.find_last_of('b'), 1u); + EXPECT_EQ(a.find_last_of('z'), 25u); + EXPECT_EQ(a.find_last_of('a', 5), 0u); + EXPECT_EQ(a.find_last_of('b', 5), 1u); EXPECT_EQ(a.find_last_of('b', 0), absl::string_view::npos); - EXPECT_EQ(a.find_last_of('z', 25), 25); + EXPECT_EQ(a.find_last_of('z', 25), 25u); EXPECT_EQ(a.find_last_of('z', 24), absl::string_view::npos); - EXPECT_EQ(f.find_last_of(i, 5), 5); - EXPECT_EQ(f.find_last_of(i, 6), 6); + EXPECT_EQ(f.find_last_of(i, 5), 5u); + EXPECT_EQ(f.find_last_of(i, 6), 6u); EXPECT_EQ(f.find_last_of(a, 4), absl::string_view::npos); // empty string nonsense EXPECT_EQ(f.find_last_of(d), absl::string_view::npos); @@ -651,19 +651,19 @@ TEST(StringViewTest, STL2FindLast) { EXPECT_EQ(d.find_last_of(f, 4), absl::string_view::npos); EXPECT_EQ(e.find_last_of(f, 4), absl::string_view::npos); - EXPECT_EQ(a.find_last_not_of(b), a.size()-1); - EXPECT_EQ(a.find_last_not_of(c), 22); + EXPECT_EQ(a.find_last_not_of(b), a.size() - 1); + EXPECT_EQ(a.find_last_not_of(c), 22u); EXPECT_EQ(b.find_last_not_of(a), absl::string_view::npos); EXPECT_EQ(b.find_last_not_of(b), absl::string_view::npos); - EXPECT_EQ(f.find_last_not_of(i), 4); - EXPECT_EQ(a.find_last_not_of(c, 24), 22); - EXPECT_EQ(a.find_last_not_of(b, 3), 3); + EXPECT_EQ(f.find_last_not_of(i), 4u); + EXPECT_EQ(a.find_last_not_of(c, 24), 22u); + EXPECT_EQ(a.find_last_not_of(b, 3), 3u); EXPECT_EQ(a.find_last_not_of(b, 2), absl::string_view::npos); // empty string nonsense - EXPECT_EQ(f.find_last_not_of(d), f.size()-1); - EXPECT_EQ(f.find_last_not_of(e), f.size()-1); - EXPECT_EQ(f.find_last_not_of(d, 4), 4); - EXPECT_EQ(f.find_last_not_of(e, 4), 4); + EXPECT_EQ(f.find_last_not_of(d), f.size() - 1); + EXPECT_EQ(f.find_last_not_of(e), f.size() - 1); + EXPECT_EQ(f.find_last_not_of(d, 4), 4u); + EXPECT_EQ(f.find_last_not_of(e, 4), 4u); EXPECT_EQ(d.find_last_not_of(d), absl::string_view::npos); EXPECT_EQ(d.find_last_not_of(e), absl::string_view::npos); EXPECT_EQ(e.find_last_not_of(d), absl::string_view::npos); @@ -679,10 +679,10 @@ TEST(StringViewTest, STL2FindLast) { EXPECT_EQ(h.find_last_not_of('x'), h.size() - 1); EXPECT_EQ(h.find_last_not_of('='), absl::string_view::npos); - EXPECT_EQ(b.find_last_not_of('c'), 1); - EXPECT_EQ(h.find_last_not_of('x', 2), 2); + EXPECT_EQ(b.find_last_not_of('c'), 1u); + EXPECT_EQ(h.find_last_not_of('x', 2), 2u); EXPECT_EQ(h.find_last_not_of('=', 2), absl::string_view::npos); - EXPECT_EQ(b.find_last_not_of('b', 1), 0); + EXPECT_EQ(b.find_last_not_of('b', 1), 0u); // empty string nonsense EXPECT_EQ(d.find_last_not_of('x'), absl::string_view::npos); EXPECT_EQ(e.find_last_not_of('x'), absl::string_view::npos); @@ -734,7 +734,7 @@ TEST(StringViewTest, TruncSubstr) { TEST(StringViewTest, UTF8) { std::string utf8 = "\u00E1"; std::string utf8_twice = utf8 + " " + utf8; - int utf8_len = strlen(utf8.data()); + size_t utf8_len = strlen(utf8.data()); EXPECT_EQ(utf8_len, absl::string_view(utf8_twice).find_first_of(" ")); EXPECT_EQ(utf8_len, absl::string_view(utf8_twice).find_first_of(" \t")); } @@ -879,12 +879,12 @@ TEST(StringViewTest, FrontBackEmpty) { TEST(StringViewTest, NULLInput) { absl::string_view s; EXPECT_EQ(s.data(), nullptr); - EXPECT_EQ(s.size(), 0); + EXPECT_EQ(s.size(), 0u); #ifdef ABSL_HAVE_STRING_VIEW_FROM_NULLPTR s = absl::string_view(nullptr); EXPECT_EQ(s.data(), nullptr); - EXPECT_EQ(s.size(), 0); + EXPECT_EQ(s.size(), 0u); // .ToString() on a absl::string_view with nullptr should produce the empty // string. @@ -959,7 +959,7 @@ TEST(StringViewTest, NullSafeStringView) { { absl::string_view s = absl::NullSafeStringView(nullptr); EXPECT_EQ(nullptr, s.data()); - EXPECT_EQ(0, s.size()); + EXPECT_EQ(0u, s.size()); EXPECT_EQ(absl::string_view(), s); } { @@ -975,7 +975,7 @@ TEST(StringViewTest, ConstexprNullSafeStringView) { { constexpr absl::string_view s = absl::NullSafeStringView(nullptr); EXPECT_EQ(nullptr, s.data()); - EXPECT_EQ(0, s.size()); + EXPECT_EQ(0u, s.size()); EXPECT_EQ(absl::string_view(), s); } #if !defined(_MSC_VER) || _MSC_VER >= 1910 @@ -990,7 +990,7 @@ TEST(StringViewTest, ConstexprNullSafeStringView) { } { constexpr absl::string_view s = absl::NullSafeStringView("hello"); - EXPECT_EQ(s.size(), 5); + EXPECT_EQ(s.size(), 5u); EXPECT_EQ("hello", s); } #endif @@ -1036,7 +1036,7 @@ TEST(StringViewTest, ConstexprCompiles) { #ifdef ABSL_HAVE_CONSTEXPR_STRING_VIEW_FROM_CSTR constexpr absl::string_view cstr_strlen("foo"); - EXPECT_EQ(cstr_strlen.length(), 3); + EXPECT_EQ(cstr_strlen.length(), 3u); constexpr absl::string_view cstr_strlen2 = "bar"; EXPECT_EQ(cstr_strlen2, "bar"); @@ -1111,7 +1111,7 @@ TEST(StringViewTest, ConstexprCompiles) { EXPECT_NE(cstr_ptr, nullptr); constexpr size_t sp_npos = sp.npos; - EXPECT_EQ(sp_npos, -1); + EXPECT_EQ(sp_npos, static_cast(-1)); } constexpr char ConstexprMethodsHelper() { @@ -1179,7 +1179,7 @@ TEST(StringViewTest, BoundsCheck) { // Abseil's string_view implementation has bounds-checking in debug mode. absl::string_view h = "hello"; ABSL_EXPECT_DEATH_IF_SUPPORTED(h[5], ""); - ABSL_EXPECT_DEATH_IF_SUPPORTED(h[-1], ""); + ABSL_EXPECT_DEATH_IF_SUPPORTED(h[static_cast(-1)], ""); #endif #endif } @@ -1189,7 +1189,7 @@ TEST(ComparisonOpsTest, StringCompareNotAmbiguous) { EXPECT_LT("hello", std::string("world")); } -TEST(ComparisonOpsTest, HeterogenousStringViewEquals) { +TEST(ComparisonOpsTest, HeterogeneousStringViewEquals) { EXPECT_EQ(absl::string_view("hello"), std::string("hello")); EXPECT_EQ("hello", absl::string_view("hello")); } @@ -1201,17 +1201,17 @@ TEST(FindOneCharTest, EdgeCases) { a.remove_prefix(1); a.remove_suffix(1); - EXPECT_EQ(0, a.find('x')); - EXPECT_EQ(0, a.find('x', 0)); - EXPECT_EQ(4, a.find('x', 1)); - EXPECT_EQ(4, a.find('x', 4)); + EXPECT_EQ(0u, a.find('x')); + EXPECT_EQ(0u, a.find('x', 0)); + EXPECT_EQ(4u, a.find('x', 1)); + EXPECT_EQ(4u, a.find('x', 4)); EXPECT_EQ(absl::string_view::npos, a.find('x', 5)); - EXPECT_EQ(4, a.rfind('x')); - EXPECT_EQ(4, a.rfind('x', 5)); - EXPECT_EQ(4, a.rfind('x', 4)); - EXPECT_EQ(0, a.rfind('x', 3)); - EXPECT_EQ(0, a.rfind('x', 0)); + EXPECT_EQ(4u, a.rfind('x')); + EXPECT_EQ(4u, a.rfind('x', 5)); + EXPECT_EQ(4u, a.rfind('x', 4)); + EXPECT_EQ(0u, a.rfind('x', 3)); + EXPECT_EQ(0u, a.rfind('x', 0)); // Set a = "yyy". a.remove_prefix(1); @@ -1239,8 +1239,8 @@ TEST(HugeStringView, TwoPointTwoGB) { #if !defined(NDEBUG) && !defined(ABSL_USES_STD_STRING_VIEW) TEST(NonNegativeLenTest, NonNegativeLen) { - ABSL_EXPECT_DEATH_IF_SUPPORTED(absl::string_view("xyz", -1), - "len <= kMaxSize"); + ABSL_EXPECT_DEATH_IF_SUPPORTED( + absl::string_view("xyz", static_cast(-1)), "len <= kMaxSize"); } TEST(LenExceedsMaxSizeTest, LenExceedsMaxSize) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/strip.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/strip.h index 111872ca54..341e66fc92 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/strip.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/strip.h @@ -34,8 +34,9 @@ ABSL_NAMESPACE_BEGIN // ConsumePrefix() // -// Strips the `expected` prefix from the start of the given string, returning -// `true` if the strip operation succeeded or false otherwise. +// Strips the `expected` prefix, if found, from the start of `str`. +// If the operation succeeded, `true` is returned. If not, `false` +// is returned and `str` is not modified. // // Example: // @@ -49,8 +50,9 @@ inline bool ConsumePrefix(absl::string_view* str, absl::string_view expected) { } // ConsumeSuffix() // -// Strips the `expected` suffix from the end of the given string, returning -// `true` if the strip operation succeeded or false otherwise. +// Strips the `expected` suffix, if found, from the end of `str`. +// If the operation succeeded, `true` is returned. If not, `false` +// is returned and `str` is not modified. // // Example: // @@ -65,7 +67,7 @@ inline bool ConsumeSuffix(absl::string_view* str, absl::string_view expected) { // StripPrefix() // -// Returns a view into the input string 'str' with the given 'prefix' removed, +// Returns a view into the input string `str` with the given `prefix` removed, // but leaving the original string intact. If the prefix does not match at the // start of the string, returns the original string instead. ABSL_MUST_USE_RESULT inline absl::string_view StripPrefix( @@ -76,7 +78,7 @@ ABSL_MUST_USE_RESULT inline absl::string_view StripPrefix( // StripSuffix() // -// Returns a view into the input string 'str' with the given 'suffix' removed, +// Returns a view into the input string `str` with the given `suffix` removed, // but leaving the original string intact. If the suffix does not match at the // end of the string, returns the original string instead. ABSL_MUST_USE_RESULT inline absl::string_view StripSuffix( diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute.cc index 1f3c7409ab..33a39305db 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute.cc @@ -40,7 +40,8 @@ void SubstituteAndAppendArray(std::string* output, absl::string_view format, absl::CEscape(format).c_str()); #endif return; - } else if (absl::ascii_isdigit(format[i + 1])) { + } else if (absl::ascii_isdigit( + static_cast(format[i + 1]))) { int index = format[i + 1] - '0'; if (static_cast(index) >= num_args) { #ifndef NDEBUG @@ -75,11 +76,12 @@ void SubstituteAndAppendArray(std::string* output, absl::string_view format, // Build the string. size_t original_size = output->size(); - strings_internal::STLStringResizeUninitialized(output, original_size + size); + strings_internal::STLStringResizeUninitializedAmortized(output, + original_size + size); char* target = &(*output)[original_size]; for (size_t i = 0; i < format.size(); i++) { if (format[i] == '$') { - if (absl::ascii_isdigit(format[i + 1])) { + if (absl::ascii_isdigit(static_cast(format[i + 1]))) { const absl::string_view src = args_array[format[i + 1] - '0']; target = std::copy(src.begin(), src.end(), target); ++i; // Skip next char. @@ -109,7 +111,8 @@ Arg::Arg(const void* value) { } while (num != 0); *--ptr = 'x'; *--ptr = '0'; - piece_ = absl::string_view(ptr, scratch_ + sizeof(scratch_) - ptr); + piece_ = absl::string_view( + ptr, static_cast(scratch_ + sizeof(scratch_) - ptr)); } } @@ -131,7 +134,7 @@ Arg::Arg(Hex hex) { beg = writer; } - piece_ = absl::string_view(beg, end - beg); + piece_ = absl::string_view(beg, static_cast(end - beg)); } // TODO(jorg): Don't duplicate so much code between here and str_cat.cc @@ -146,7 +149,7 @@ Arg::Arg(Dec dec) { *--writer = '0' + (value % 10); value /= 10; } - *--writer = '0' + value; + *--writer = '0' + static_cast(value); if (neg) *--writer = '-'; ptrdiff_t fillers = writer - minfill; @@ -163,7 +166,7 @@ Arg::Arg(Dec dec) { if (add_sign_again) *--writer = '-'; } - piece_ = absl::string_view(writer, end - writer); + piece_ = absl::string_view(writer, static_cast(end - writer)); } } // namespace substitute_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute.h index 151c56f543..5c3f6eff34 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute.h @@ -55,6 +55,8 @@ // * bool (Printed as "true" or "false") // * pointer types other than char* (Printed as "0x", // except that null is printed as "NULL") +// * user-defined types via the `AbslStringify()` customization point. See the +// documentation for `absl::StrCat` for an explanation on how to use this. // // If an invalid format string is provided, Substitute returns an empty string // and SubstituteAndAppend does not change the provided output string. @@ -79,6 +81,7 @@ #include "absl/base/port.h" #include "absl/strings/ascii.h" #include "absl/strings/escaping.h" +#include "absl/strings/internal/stringify_sink.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" @@ -102,14 +105,14 @@ class Arg { // Overloads for string-y things // // Explicitly overload `const char*` so the compiler doesn't cast to `bool`. - Arg(const char* value) // NOLINT(runtime/explicit) + Arg(const char* value) // NOLINT(google-explicit-constructor) : piece_(absl::NullSafeStringView(value)) {} template Arg( // NOLINT const std::basic_string, Allocator>& value) noexcept : piece_(value) {} - Arg(absl::string_view value) // NOLINT(runtime/explicit) + Arg(absl::string_view value) // NOLINT(google-explicit-constructor) : piece_(value) {} // Overloads for primitives @@ -119,48 +122,70 @@ class Arg { // probably using them as 8-bit integers and would probably prefer an integer // representation. However, we can't really know, so we make the caller decide // what to do. - Arg(char value) // NOLINT(runtime/explicit) + Arg(char value) // NOLINT(google-explicit-constructor) : piece_(scratch_, 1) { scratch_[0] = value; } Arg(short value) // NOLINT(*) : piece_(scratch_, - numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {} + static_cast( + numbers_internal::FastIntToBuffer(value, scratch_) - + scratch_)) {} Arg(unsigned short value) // NOLINT(*) : piece_(scratch_, - numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {} - Arg(int value) // NOLINT(runtime/explicit) + static_cast( + numbers_internal::FastIntToBuffer(value, scratch_) - + scratch_)) {} + Arg(int value) // NOLINT(google-explicit-constructor) : piece_(scratch_, - numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {} - Arg(unsigned int value) // NOLINT(runtime/explicit) + static_cast( + numbers_internal::FastIntToBuffer(value, scratch_) - + scratch_)) {} + Arg(unsigned int value) // NOLINT(google-explicit-constructor) : piece_(scratch_, - numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {} + static_cast( + numbers_internal::FastIntToBuffer(value, scratch_) - + scratch_)) {} Arg(long value) // NOLINT(*) : piece_(scratch_, - numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {} + static_cast( + numbers_internal::FastIntToBuffer(value, scratch_) - + scratch_)) {} Arg(unsigned long value) // NOLINT(*) : piece_(scratch_, - numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {} + static_cast( + numbers_internal::FastIntToBuffer(value, scratch_) - + scratch_)) {} Arg(long long value) // NOLINT(*) : piece_(scratch_, - numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {} + static_cast( + numbers_internal::FastIntToBuffer(value, scratch_) - + scratch_)) {} Arg(unsigned long long value) // NOLINT(*) : piece_(scratch_, - numbers_internal::FastIntToBuffer(value, scratch_) - scratch_) {} - Arg(float value) // NOLINT(runtime/explicit) + static_cast( + numbers_internal::FastIntToBuffer(value, scratch_) - + scratch_)) {} + Arg(float value) // NOLINT(google-explicit-constructor) : piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) { } - Arg(double value) // NOLINT(runtime/explicit) + Arg(double value) // NOLINT(google-explicit-constructor) : piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) { } - Arg(bool value) // NOLINT(runtime/explicit) + Arg(bool value) // NOLINT(google-explicit-constructor) : piece_(value ? "true" : "false") {} - Arg(Hex hex); // NOLINT(runtime/explicit) - Arg(Dec dec); // NOLINT(runtime/explicit) + template ::value>::type> + Arg( // NOLINT(google-explicit-constructor) + const T& v, strings_internal::StringifySink&& sink = {}) + : piece_(strings_internal::ExtractStringification(sink, v)) {} - // vector::reference and const_reference require special help to - // convert to `AlphaNum` because it requires two user defined conversions. + Arg(Hex hex); // NOLINT(google-explicit-constructor) + Arg(Dec dec); // NOLINT(google-explicit-constructor) + + // vector::reference and const_reference require special help to convert + // to `Arg` because it requires two user defined conversions. template ::value && @@ -172,7 +197,15 @@ class Arg { // `void*` values, with the exception of `char*`, are printed as // "0x". However, in the case of `nullptr`, "NULL" is printed. - Arg(const void* value); // NOLINT(runtime/explicit) + Arg(const void* value); // NOLINT(google-explicit-constructor) + + // Normal enums are already handled by the integer formatters. + // This overload matches only scoped enums. + template {} && !std::is_convertible{}>::type> + Arg(T value) // NOLINT(google-explicit-constructor) + : Arg(static_cast::type>(value)) {} Arg(const Arg&) = delete; Arg& operator=(const Arg&) = delete; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute_test.cc index 442c921528..9f04545f89 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/strings/substitute_test.cc @@ -22,6 +22,16 @@ namespace { +struct MyStruct { + template + friend void AbslStringify(Sink& sink, const MyStruct& s) { + sink.Append("MyStruct{.value = "); + sink.Append(absl::StrCat(s.value)); + sink.Append("}"); + } + int value; +}; + TEST(SubstituteTest, Substitute) { // Basic. EXPECT_EQ("Hello, world!", absl::Substitute("$0, $1!", "Hello", "world")); @@ -70,7 +80,7 @@ TEST(SubstituteTest, Substitute) { // Volatile Pointer. // Like C++ streamed I/O, such pointers implicitly become bool volatile int vol = 237; - volatile int *volatile volptr = &vol; + volatile int* volatile volptr = &vol; str = absl::Substitute("$0", volptr); EXPECT_EQ("true", str); @@ -128,6 +138,11 @@ TEST(SubstituteTest, Substitute) { const char* null_cstring = nullptr; EXPECT_EQ("Text: ''", absl::Substitute("Text: '$0'", null_cstring)); + + MyStruct s1 = MyStruct{17}; + MyStruct s2 = MyStruct{1043}; + EXPECT_EQ("MyStruct{.value = 17}, MyStruct{.value = 1043}", + absl::Substitute("$0, $1", s1, s2)); } TEST(SubstituteTest, SubstituteAndAppend) { @@ -171,6 +186,12 @@ TEST(SubstituteTest, SubstituteAndAppend) { absl::SubstituteAndAppend(&str, "$0 $1 $2 $3 $4 $5 $6 $7 $8 $9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j"); EXPECT_EQ("a b c d e f g h i j", str); + + str.clear(); + MyStruct s1 = MyStruct{17}; + MyStruct s2 = MyStruct{1043}; + absl::SubstituteAndAppend(&str, "$0, $1", s1, s2); + EXPECT_EQ("MyStruct{.value = 17}, MyStruct{.value = 1043}", str); } TEST(SubstituteTest, VectorBoolRef) { @@ -184,6 +205,54 @@ TEST(SubstituteTest, VectorBoolRef) { EXPECT_EQ("Logic be like: true false true false", str); } +TEST(SubstituteTest, Enums) { + enum UnscopedEnum { kEnum0 = 0, kEnum1 = 1 }; + EXPECT_EQ("0 1", absl::Substitute("$0 $1", UnscopedEnum::kEnum0, + UnscopedEnum::kEnum1)); + + enum class ScopedEnum { kEnum0 = 0, kEnum1 = 1 }; + EXPECT_EQ("0 1", + absl::Substitute("$0 $1", ScopedEnum::kEnum0, ScopedEnum::kEnum1)); + + enum class ScopedEnumInt32 : int32_t { kEnum0 = 989, kEnum1 = INT32_MIN }; + EXPECT_EQ("989 -2147483648", + absl::Substitute("$0 $1", ScopedEnumInt32::kEnum0, + ScopedEnumInt32::kEnum1)); + + enum class ScopedEnumUInt32 : uint32_t { kEnum0 = 1, kEnum1 = UINT32_MAX }; + EXPECT_EQ("1 4294967295", absl::Substitute("$0 $1", ScopedEnumUInt32::kEnum0, + ScopedEnumUInt32::kEnum1)); + + enum class ScopedEnumInt64 : int64_t { kEnum0 = -1, kEnum1 = 42949672950 }; + EXPECT_EQ("-1 42949672950", absl::Substitute("$0 $1", ScopedEnumInt64::kEnum0, + ScopedEnumInt64::kEnum1)); + + enum class ScopedEnumUInt64 : uint64_t { kEnum0 = 1, kEnum1 = 42949672950 }; + EXPECT_EQ("1 42949672950", absl::Substitute("$0 $1", ScopedEnumUInt64::kEnum0, + ScopedEnumUInt64::kEnum1)); + + enum class ScopedEnumChar : signed char { kEnum0 = -1, kEnum1 = 1 }; + EXPECT_EQ("-1 1", absl::Substitute("$0 $1", ScopedEnumChar::kEnum0, + ScopedEnumChar::kEnum1)); + + enum class ScopedEnumUChar : unsigned char { + kEnum0 = 0, + kEnum1 = 1, + kEnumMax = 255 + }; + EXPECT_EQ("0 1 255", absl::Substitute("$0 $1 $2", ScopedEnumUChar::kEnum0, + ScopedEnumUChar::kEnum1, + ScopedEnumUChar::kEnumMax)); + + enum class ScopedEnumInt16 : int16_t { kEnum0 = -100, kEnum1 = 10000 }; + EXPECT_EQ("-100 10000", absl::Substitute("$0 $1", ScopedEnumInt16::kEnum0, + ScopedEnumInt16::kEnum1)); + + enum class ScopedEnumUInt16 : uint16_t { kEnum0 = 0, kEnum1 = 10000 }; + EXPECT_EQ("0 10000", absl::Substitute("$0 $1", ScopedEnumUInt16::kEnum0, + ScopedEnumUInt16::kEnum1)); +} + #ifdef GTEST_HAS_DEATH_TEST TEST(SubstituteDeathTest, SubstituteDeath) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/CMakeLists.txt index 605efe2d02..f64653bbdf 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/CMakeLists.txt @@ -14,6 +14,7 @@ # limitations under the License. # +# Internal-only target, do not depend on directly. absl_cc_library( NAME graphcycles_internal @@ -32,6 +33,7 @@ absl_cc_library( absl::raw_logging_internal ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME kernel_timeout_internal @@ -125,6 +127,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME thread_pool @@ -133,8 +136,9 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS - absl::synchronization + absl::any_invocable absl::core_headers + absl::synchronization TESTONLY ) @@ -157,6 +161,20 @@ absl_cc_test( GTest::gmock_main ) +absl_cc_test( + NAME + mutex_method_pointer_test + SRCS + "mutex_method_pointer_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::synchronization + GTest::gmock_main +) + absl_cc_test( NAME notification_test @@ -170,6 +188,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME per_thread_sem_test_common diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc index 53a71b342b..44e6129bb0 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc @@ -38,7 +38,7 @@ ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist; // A per-thread destructor for reclaiming associated ThreadIdentity objects. // Since we must preserve their storage we cache them for re-use. -void ReclaimThreadIdentity(void* v) { +static void ReclaimThreadIdentity(void* v) { base_internal::ThreadIdentity* identity = static_cast(v); @@ -48,8 +48,6 @@ void ReclaimThreadIdentity(void* v) { base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks); } - PerThreadSem::Destroy(identity); - // We must explicitly clear the current thread's identity: // (a) Subsequent (unrelated) per-thread destructors may require an identity. // We must guarantee a new identity is used in this case (this instructor @@ -71,7 +69,12 @@ static intptr_t RoundUp(intptr_t addr, intptr_t align) { return (addr + align - 1) & ~(align - 1); } -static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) { +void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) { + PerThreadSem::Init(identity); +} + +static void ResetThreadIdentityBetweenReuse( + base_internal::ThreadIdentity* identity) { base_internal::PerThreadSynch* pts = &identity->per_thread_synch; pts->next = nullptr; pts->skip = nullptr; @@ -116,8 +119,9 @@ static base_internal::ThreadIdentity* NewThreadIdentity() { identity = reinterpret_cast( RoundUp(reinterpret_cast(allocation), base_internal::PerThreadSynch::kAlignment)); + OneTimeInitThreadIdentity(identity); } - ResetThreadIdentity(identity); + ResetThreadIdentityBetweenReuse(identity); return identity; } @@ -127,7 +131,6 @@ static base_internal::ThreadIdentity* NewThreadIdentity() { // REQUIRES: CurrentThreadIdentity(false) == nullptr base_internal::ThreadIdentity* CreateThreadIdentity() { base_internal::ThreadIdentity* identity = NewThreadIdentity(); - PerThreadSem::Init(identity); // Associate the value with the current thread, and attach our destructor. base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity); return identity; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.h index e121f68377..4cfde0913c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.h @@ -36,10 +36,6 @@ namespace synchronization_internal { // For private use only. base_internal::ThreadIdentity* CreateThreadIdentity(); -// A per-thread destructor for reclaiming associated ThreadIdentity objects. -// For private use only. -void ReclaimThreadIdentity(void* v); - // Returns the ThreadIdentity object representing the calling thread; guaranteed // to be unique for its lifetime. The returned object will remain valid for the // program's lifetime; although it may be re-assigned to a subsequent thread. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/futex.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/futex.h index 06fbd6d072..cb97da09ce 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/futex.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/futex.h @@ -87,7 +87,7 @@ class FutexImpl { public: static int WaitUntil(std::atomic *v, int32_t val, KernelTimeout t) { - int err = 0; + long err = 0; // NOLINT(runtime/int) if (t.has_timeout()) { // https://locklessinc.com/articles/futex_cheat_sheet/ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time. @@ -105,41 +105,44 @@ class FutexImpl { FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr); } if (ABSL_PREDICT_FALSE(err != 0)) { - err = -errno; + return -errno; } - return err; + return 0; } static int WaitBitsetAbsoluteTimeout(std::atomic *v, int32_t val, int32_t bits, const struct timespec *abstime) { - int err = syscall(SYS_futex, reinterpret_cast(v), - FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, - nullptr, bits); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast(v), + FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, + nullptr, bits); if (ABSL_PREDICT_FALSE(err != 0)) { - err = -errno; + return -errno; } - return err; + return 0; } static int Wake(std::atomic *v, int32_t count) { - int err = syscall(SYS_futex, reinterpret_cast(v), - FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast(v), + FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); if (ABSL_PREDICT_FALSE(err < 0)) { - err = -errno; + return -errno; } - return err; + return 0; } // FUTEX_WAKE_BITSET static int WakeBitset(std::atomic *v, int32_t count, int32_t bits) { - int err = syscall(SYS_futex, reinterpret_cast(v), - FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, - nullptr, bits); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast(v), + FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, + nullptr, bits); if (ABSL_PREDICT_FALSE(err < 0)) { - err = -errno; + return -errno; } - return err; + return 0; } }; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc index 27fec21681..feec4581fe 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc @@ -181,9 +181,9 @@ class NodeSet { return true; } - void erase(uint32_t v) { + void erase(int32_t v) { uint32_t i = FindIndex(v); - if (static_cast(table_[i]) == v) { + if (table_[i] == v) { table_[i] = kDel; } } @@ -195,7 +195,7 @@ class NodeSet { for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); ) bool Next(int32_t* cursor, int32_t* elem) { while (static_cast(*cursor) < table_.size()) { - int32_t v = table_[*cursor]; + int32_t v = table_[static_cast(*cursor)]; (*cursor)++; if (v >= 0) { *elem = v; @@ -210,24 +210,26 @@ class NodeSet { Vec table_; uint32_t occupied_; // Count of non-empty slots (includes deleted slots) - static uint32_t Hash(uint32_t a) { return a * 41; } + static uint32_t Hash(int32_t a) { return static_cast(a * 41); } // Return index for storing v. May return an empty index or deleted index - int FindIndex(int32_t v) const { + uint32_t FindIndex(int32_t v) const { // Search starting at hash index. const uint32_t mask = table_.size() - 1; uint32_t i = Hash(v) & mask; - int deleted_index = -1; // If >= 0, index of first deleted element we see + uint32_t deleted_index = 0; // index of first deleted element we see + bool seen_deleted_element = false; while (true) { int32_t e = table_[i]; if (v == e) { return i; } else if (e == kEmpty) { // Return any previously encountered deleted slot. - return (deleted_index >= 0) ? deleted_index : i; - } else if (e == kDel && deleted_index < 0) { + return seen_deleted_element ? deleted_index : i; + } else if (e == kDel && !seen_deleted_element) { // Keep searching since v might be present later. deleted_index = i; + seen_deleted_element = true; } i = (i + 1) & mask; // Linear probing; quadratic is slightly slower. } @@ -268,7 +270,7 @@ inline GraphId MakeId(int32_t index, uint32_t version) { } inline int32_t NodeIndex(GraphId id) { - return static_cast(id.handle & 0xfffffffful); + return static_cast(id.handle); } inline uint32_t NodeVersion(GraphId id) { @@ -298,7 +300,7 @@ class PointerMap { int32_t Find(void* ptr) { auto masked = base_internal::HidePtr(ptr); for (int32_t i = table_[Hash(ptr)]; i != -1;) { - Node* n = (*nodes_)[i]; + Node* n = (*nodes_)[static_cast(i)]; if (n->masked_ptr == masked) return i; i = n->next_hash; } @@ -307,7 +309,7 @@ class PointerMap { void Add(void* ptr, int32_t i) { int32_t* head = &table_[Hash(ptr)]; - (*nodes_)[i]->next_hash = *head; + (*nodes_)[static_cast(i)]->next_hash = *head; *head = i; } @@ -317,7 +319,7 @@ class PointerMap { auto masked = base_internal::HidePtr(ptr); for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) { int32_t index = *slot; - Node* n = (*nodes_)[index]; + Node* n = (*nodes_)[static_cast(index)]; if (n->masked_ptr == masked) { *slot = n->next_hash; // Remove n from linked list n->next_hash = -1; @@ -358,7 +360,7 @@ struct GraphCycles::Rep { }; static Node* FindNode(GraphCycles::Rep* rep, GraphId id) { - Node* n = rep->nodes_[NodeIndex(id)]; + Node* n = rep->nodes_[static_cast(NodeIndex(id))]; return (n->version == NodeVersion(id)) ? n : nullptr; } @@ -393,7 +395,7 @@ bool GraphCycles::CheckInvariants() const { ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank); } HASH_FOR_EACH(y, nx->out) { - Node* ny = r->nodes_[y]; + Node* ny = r->nodes_[static_cast(y)]; if (nx->rank >= ny->rank) { ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y, nx->rank, ny->rank); @@ -406,14 +408,14 @@ bool GraphCycles::CheckInvariants() const { GraphId GraphCycles::GetId(void* ptr) { int32_t i = rep_->ptrmap_.Find(ptr); if (i != -1) { - return MakeId(i, rep_->nodes_[i]->version); + return MakeId(i, rep_->nodes_[static_cast(i)]->version); } else if (rep_->free_nodes_.empty()) { Node* n = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena)) Node; n->version = 1; // Avoid 0 since it is used by InvalidGraphId() n->visited = false; - n->rank = rep_->nodes_.size(); + n->rank = static_cast(rep_->nodes_.size()); n->masked_ptr = base_internal::HidePtr(ptr); n->nstack = 0; n->priority = 0; @@ -425,7 +427,7 @@ GraphId GraphCycles::GetId(void* ptr) { // a permutation of [0,rep_->nodes_.size()-1]. int32_t r = rep_->free_nodes_.back(); rep_->free_nodes_.pop_back(); - Node* n = rep_->nodes_[r]; + Node* n = rep_->nodes_[static_cast(r)]; n->masked_ptr = base_internal::HidePtr(ptr); n->nstack = 0; n->priority = 0; @@ -439,12 +441,12 @@ void GraphCycles::RemoveNode(void* ptr) { if (i == -1) { return; } - Node* x = rep_->nodes_[i]; + Node* x = rep_->nodes_[static_cast(i)]; HASH_FOR_EACH(y, x->out) { - rep_->nodes_[y]->in.erase(i); + rep_->nodes_[static_cast(y)]->in.erase(i); } HASH_FOR_EACH(y, x->in) { - rep_->nodes_[y]->out.erase(i); + rep_->nodes_[static_cast(y)]->out.erase(i); } x->in.clear(); x->out.clear(); @@ -520,7 +522,7 @@ bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) { // Since we do not call Reorder() on this path, clear any visited // markers left by ForwardDFS. for (const auto& d : r->deltaf_) { - r->nodes_[d]->visited = false; + r->nodes_[static_cast(d)]->visited = false; } return false; } @@ -538,14 +540,14 @@ static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) { while (!r->stack_.empty()) { n = r->stack_.back(); r->stack_.pop_back(); - Node* nn = r->nodes_[n]; + Node* nn = r->nodes_[static_cast(n)]; if (nn->visited) continue; nn->visited = true; r->deltaf_.push_back(n); HASH_FOR_EACH(w, nn->out) { - Node* nw = r->nodes_[w]; + Node* nw = r->nodes_[static_cast(w)]; if (nw->rank == upper_bound) { return false; // Cycle } @@ -564,14 +566,14 @@ static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) { while (!r->stack_.empty()) { n = r->stack_.back(); r->stack_.pop_back(); - Node* nn = r->nodes_[n]; + Node* nn = r->nodes_[static_cast(n)]; if (nn->visited) continue; nn->visited = true; r->deltab_.push_back(n); HASH_FOR_EACH(w, nn->in) { - Node* nw = r->nodes_[w]; + Node* nw = r->nodes_[static_cast(w)]; if (!nw->visited && lower_bound < nw->rank) { r->stack_.push_back(w); } @@ -596,7 +598,7 @@ static void Reorder(GraphCycles::Rep* r) { // Assign the ranks in order to the collected list. for (uint32_t i = 0; i < r->list_.size(); i++) { - r->nodes_[r->list_[i]]->rank = r->merged_[i]; + r->nodes_[static_cast(r->list_[i])]->rank = r->merged_[i]; } } @@ -604,7 +606,8 @@ static void Sort(const Vec& nodes, Vec* delta) { struct ByRank { const Vec* nodes; bool operator()(int32_t a, int32_t b) const { - return (*nodes)[a]->rank < (*nodes)[b]->rank; + return (*nodes)[static_cast(a)]->rank < + (*nodes)[static_cast(b)]->rank; } }; ByRank cmp; @@ -616,8 +619,10 @@ static void MoveToList( GraphCycles::Rep* r, Vec* src, Vec* dst) { for (auto& v : *src) { int32_t w = v; - v = r->nodes_[w]->rank; // Replace v entry with its rank - r->nodes_[w]->visited = false; // Prepare for future DFS calls + // Replace v entry with its rank + v = r->nodes_[static_cast(w)]->rank; + // Prepare for future DFS calls + r->nodes_[static_cast(w)]->visited = false; dst->push_back(w); } } @@ -647,7 +652,8 @@ int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len, } if (path_len < max_path_len) { - path[path_len] = MakeId(n, rep_->nodes_[n]->version); + path[path_len] = + MakeId(n, rep_->nodes_[static_cast(n)]->version); } path_len++; r->stack_.push_back(-1); // Will remove tentative path entry @@ -656,7 +662,7 @@ int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len, return path_len; } - HASH_FOR_EACH(w, r->nodes_[n]->out) { + HASH_FOR_EACH(w, r->nodes_[static_cast(n)]->out) { if (seen.insert(w)) { r->stack_.push_back(w); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.h index bbd4d2d70f..44a3a2e802 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.h @@ -111,7 +111,8 @@ class KernelTimeout { constexpr uint64_t max_nanos = (std::numeric_limits::max)() - 999999u; uint64_t ms_from_now = - (std::min(max_nanos, ns_ - now) + 999999u) / 1000000u; + ((std::min)(max_nanos, static_cast(ns_ - now)) + 999999u) / + 1000000u; if (ms_from_now > kInfinite) { return kInfinite; } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc index a6031787e0..469e8f3298 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc @@ -47,10 +47,6 @@ void PerThreadSem::Init(base_internal::ThreadIdentity *identity) { identity->is_idle.store(false, std::memory_order_relaxed); } -void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) { - Waiter::GetWaiter(identity)->~Waiter(); -} - void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) { const int ticker = identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.h index 7beae8ef1d..90a88809e4 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.h @@ -66,10 +66,6 @@ class PerThreadSem { // REQUIRES: May only be called by ThreadIdentity. static void Init(base_internal::ThreadIdentity* identity); - // Destroy the PerThreadSem associated with "identity". - // REQUIRES: May only be called by ThreadIdentity. - static void Destroy(base_internal::ThreadIdentity* identity); - // Increments "identity"'s count. static inline void Post(base_internal::ThreadIdentity* identity); @@ -81,8 +77,7 @@ class PerThreadSem { // Permitted callers. friend class PerThreadSemTest; friend class absl::Mutex; - friend absl::base_internal::ThreadIdentity* CreateThreadIdentity(); - friend void ReclaimThreadIdentity(void* v); + friend void OneTimeInitThreadIdentity(absl::base_internal::ThreadIdentity*); }; } // namespace synchronization_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc index db1184e679..24a6b54827 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc @@ -174,6 +174,15 @@ TEST_F(PerThreadSemTest, Timeouts) { EXPECT_TRUE(Wait(negative_timeout)); } +TEST_F(PerThreadSemTest, ThreadIdentityReuse) { + // Create a base_internal::ThreadIdentity object and keep reusing it. There + // should be no memory or resource leaks. + for (int i = 0; i < 10000; i++) { + std::thread t([]() { GetOrCreateCurrentThreadIdentity(); }); + t.join(); + } +} + } // namespace } // namespace synchronization_internal diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/thread_pool.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/thread_pool.h index 0cb96dacde..5eb0bb605e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/thread_pool.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/thread_pool.h @@ -20,9 +20,11 @@ #include #include #include // NOLINT(build/c++11) +#include #include #include "absl/base/thread_annotations.h" +#include "absl/functional/any_invocable.h" #include "absl/synchronization/mutex.h" namespace absl { @@ -33,6 +35,7 @@ namespace synchronization_internal { class ThreadPool { public: explicit ThreadPool(int num_threads) { + threads_.reserve(num_threads); for (int i = 0; i < num_threads; ++i) { threads_.push_back(std::thread(&ThreadPool::WorkLoop, this)); } @@ -54,7 +57,7 @@ class ThreadPool { } // Schedule a function to be run on a ThreadPool thread immediately. - void Schedule(std::function func) { + void Schedule(absl::AnyInvocable func) { assert(func != nullptr); absl::MutexLock l(&mu_); queue_.push(std::move(func)); @@ -67,7 +70,7 @@ class ThreadPool { void WorkLoop() { while (true) { - std::function func; + absl::AnyInvocable func; { absl::MutexLock l(&mu_); mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable)); @@ -82,7 +85,7 @@ class ThreadPool { } absl::Mutex mu_; - std::queue> queue_ ABSL_GUARDED_BY(mu_); + std::queue> queue_ ABSL_GUARDED_BY(mu_); std::vector threads_; }; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc index 28ef311e4a..f2051d6725 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc @@ -71,8 +71,6 @@ Waiter::Waiter() { futex_.store(0, std::memory_order_relaxed); } -Waiter::~Waiter() = default; - bool Waiter::Wait(KernelTimeout t) { // Loop until we can atomically decrement futex from a positive // value, waiting on a futex while we believe it is zero. @@ -161,18 +159,6 @@ Waiter::Waiter() { wakeup_count_ = 0; } -Waiter::~Waiter() { - const int err = pthread_mutex_destroy(&mu_); - if (err != 0) { - ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err); - } - - const int err2 = pthread_cond_destroy(&cv_); - if (err2 != 0) { - ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2); - } -} - bool Waiter::Wait(KernelTimeout t) { struct timespec abs_timeout; if (t.has_timeout()) { @@ -240,12 +226,6 @@ Waiter::Waiter() { wakeups_.store(0, std::memory_order_relaxed); } -Waiter::~Waiter() { - if (sem_destroy(&sem_) != 0) { - ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno); - } -} - bool Waiter::Wait(KernelTimeout t) { struct timespec abs_timeout; if (t.has_timeout()) { @@ -363,11 +343,6 @@ Waiter::Waiter() { wakeup_count_ = 0; } -// SRW locks and condition variables do not need to be explicitly destroyed. -// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock -// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with -Waiter::~Waiter() = default; - bool Waiter::Wait(KernelTimeout t) { SRWLOCK *mu = WinHelper::GetLock(this); CONDITION_VARIABLE *cv = WinHelper::GetCond(this); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/waiter.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/waiter.h index be3df180d4..b8adfeb537 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/waiter.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/internal/waiter.h @@ -71,9 +71,6 @@ class Waiter { Waiter(const Waiter&) = delete; Waiter& operator=(const Waiter&) = delete; - // Destroy any data to track waits. - ~Waiter(); - // Blocks the calling thread until a matching call to `Post()` or // `t` has passed. Returns `true` if woken (`Post()` called), // `false` on timeout. @@ -106,6 +103,12 @@ class Waiter { #endif private: + // The destructor must not be called since Mutex/CondVar + // can use PerThreadSem/Waiter after the thread exits. + // Waiter objects are embedded in ThreadIdentity objects, + // which are reused via a freelist and are never destroyed. + ~Waiter() = delete; + #if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX // Futexes are defined by specification to be 32-bits. // Thus std::atomic must be just an int32_t with lockfree methods. @@ -136,8 +139,11 @@ class Waiter { // REQUIRES: WinHelper::GetLock(this) must be held. void InternalCondVarPoke(); - // We can't include Windows.h in our headers, so we use aligned charachter + // We can't include Windows.h in our headers, so we use aligned character // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE. + // SRW locks and condition variables do not need to be explicitly destroyed. + // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock + // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with alignas(void*) unsigned char mu_storage_[sizeof(void*)]; alignas(void*) unsigned char cv_storage_[sizeof(void*)]; int waiter_count_; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/lifetime_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/lifetime_test.cc index cc973a3290..e6274232f1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/lifetime_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/lifetime_test.cc @@ -123,10 +123,10 @@ class OnDestruction { }; // These tests require that the compiler correctly supports C++11 constant -// initialization... but MSVC has a known regression since v19.10: +// initialization... but MSVC has a known regression since v19.10 till v19.25: // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html -// TODO(epastor): Limit the affected range once MSVC fixes this bug. -#if defined(__clang__) || !(defined(_MSC_VER) && _MSC_VER > 1900) +#if defined(__clang__) || \ + !(defined(_MSC_VER) && _MSC_VER > 1900 && _MSC_VER < 1925) // kConstInit // Test early usage. (Declaration comes first; definitions must appear after // the test runner.) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex.cc index 76ad41fe16..dd771421f8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex.cc @@ -36,6 +36,9 @@ #include #include #include +#include +#include +#include #include // NOLINT(build/c++11) #include "absl/base/attributes.h" @@ -109,7 +112,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, bool locking, bool trylock, bool read_lock); -void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) { +void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) { submit_profile_data.Store(fn); } @@ -134,25 +137,42 @@ enum DelayMode { AGGRESSIVE, GENTLE }; struct ABSL_CACHELINE_ALIGNED MutexGlobals { absl::once_flag once; int spinloop_iterations = 0; - int32_t mutex_sleep_limit[2] = {}; + int32_t mutex_sleep_spins[2] = {}; + absl::Duration mutex_sleep_time; }; +absl::Duration MeasureTimeToYield() { + absl::Time before = absl::Now(); + ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)(); + return absl::Now() - before; +} + const MutexGlobals &GetMutexGlobals() { ABSL_CONST_INIT static MutexGlobals data; absl::base_internal::LowLevelCallOnce(&data.once, [&]() { const int num_cpus = absl::base_internal::NumCPUs(); data.spinloop_iterations = num_cpus > 1 ? 1500 : 0; - // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is + // If this a uniprocessor, only yield/sleep. + // Real-time threads are often unable to yield, so the sleep time needs + // to be long enough to keep the calling thread asleep until scheduling + // happens. + // If this is multiprocessor, allow spinning. If the mode is // aggressive then spin many times before yielding. If the mode is // gentle then spin only a few times before yielding. Aggressive spinning // is used to ensure that an Unlock() call, which must get the spin lock // for any thread to make progress gets it without undue delay. if (num_cpus > 1) { - data.mutex_sleep_limit[AGGRESSIVE] = 5000; - data.mutex_sleep_limit[GENTLE] = 250; + data.mutex_sleep_spins[AGGRESSIVE] = 5000; + data.mutex_sleep_spins[GENTLE] = 250; + data.mutex_sleep_time = absl::Microseconds(10); } else { - data.mutex_sleep_limit[AGGRESSIVE] = 0; - data.mutex_sleep_limit[GENTLE] = 0; + data.mutex_sleep_spins[AGGRESSIVE] = 0; + data.mutex_sleep_spins[GENTLE] = 0; + data.mutex_sleep_time = MeasureTimeToYield() * 5; + data.mutex_sleep_time = + std::min(data.mutex_sleep_time, absl::Milliseconds(1)); + data.mutex_sleep_time = + std::max(data.mutex_sleep_time, absl::Microseconds(10)); } }); return data; @@ -163,7 +183,8 @@ namespace synchronization_internal { // Returns the Mutex delay on iteration `c` depending on the given `mode`. // The returned value should be used as `c` for the next call to `MutexDelay`. int MutexDelay(int32_t c, int mode) { - const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode]; + const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode]; + const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time; if (c < limit) { // Spin. c++; @@ -176,7 +197,7 @@ int MutexDelay(int32_t c, int mode) { c++; } else { // Then wait. - absl::SleepFor(absl::Microseconds(10)); + absl::SleepFor(sleep_time); c = 0; } ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0); @@ -325,7 +346,7 @@ static struct SynchEvent { // this is a trivial hash table for the events static SynchEvent *EnsureSynchEvent(std::atomic *addr, const char *name, intptr_t bits, intptr_t lockbit) { - uint32_t h = reinterpret_cast(addr) % kNSynchEvent; + uint32_t h = reinterpret_cast(addr) % kNSynchEvent; SynchEvent *e; // first look for existing SynchEvent struct.. synch_event_mu.Lock(); @@ -378,7 +399,7 @@ static void UnrefSynchEvent(SynchEvent *e) { // is clear before doing so). static void ForgetSynchEvent(std::atomic *addr, intptr_t bits, intptr_t lockbit) { - uint32_t h = reinterpret_cast(addr) % kNSynchEvent; + uint32_t h = reinterpret_cast(addr) % kNSynchEvent; SynchEvent **pe; SynchEvent *e; synch_event_mu.Lock(); @@ -402,7 +423,7 @@ static void ForgetSynchEvent(std::atomic *addr, intptr_t bits, // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is // called. static SynchEvent *GetSynchEvent(const void *addr) { - uint32_t h = reinterpret_cast(addr) % kNSynchEvent; + uint32_t h = reinterpret_cast(addr) % kNSynchEvent; SynchEvent *e; synch_event_mu.Lock(); for (e = synch_event[h]; @@ -430,7 +451,13 @@ static void PostSynchEvent(void *obj, int ev) { char buffer[ABSL_ARRAYSIZE(pcs) * 24]; int pos = snprintf(buffer, sizeof (buffer), " @"); for (int i = 0; i != n; i++) { - pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]); + int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast(pos), + " %p", pcs[i]); + if (b < 0 || + static_cast(b) >= sizeof(buffer) - static_cast(pos)) { + break; + } + pos += b; } ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj, (e == nullptr ? "" : e->name), buffer); @@ -486,7 +513,8 @@ struct SynchWaitParams { cvmu(cvmu_arg), thread(thread_arg), cv_word(cv_word_arg), - contention_start_cycles(base_internal::CycleClock::Now()) {} + contention_start_cycles(base_internal::CycleClock::Now()), + should_submit_contention_data(false) {} const Mutex::MuHow how; // How this thread needs to wait. const Condition *cond; // The condition that this thread is waiting for. @@ -504,6 +532,7 @@ struct SynchWaitParams { int64_t contention_start_cycles; // Time (in cycles) when this thread started // to contend for the mutex. + bool should_submit_contention_data; }; struct SynchLocksHeld { @@ -1273,15 +1302,17 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen, char sym[kSymLen]; int len = 0; for (int i = 0; i != n; i++) { + if (len >= maxlen) + return buf; + size_t count = static_cast(maxlen - len); if (symbolize) { if (!symbolizer(pcs[i], sym, kSymLen)) { sym[0] = '\0'; } - snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n", - (i == 0 ? "\n" : ""), - pcs[i], sym); + snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i], + sym); } else { - snprintf(buf + len, maxlen - len, " %p", pcs[i]); + snprintf(buf + len, count, " %p", pcs[i]); } len += strlen(&buf[len]); } @@ -1366,12 +1397,12 @@ static GraphId DeadlockCheck(Mutex *mu) { bool symbolize = number_of_reported_deadlocks <= 2; ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s", CurrentStackString(b->buf, sizeof (b->buf), symbolize)); - int len = 0; + size_t len = 0; for (int j = 0; j != all_locks->n; j++) { void* pr = deadlock_graph->Ptr(all_locks->locks[j].id); if (pr != nullptr) { snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr); - len += static_cast(strlen(&b->buf[len])); + len += strlen(&b->buf[len]); } } ABSL_RAW_LOG(ERROR, @@ -1744,23 +1775,33 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() { ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock); } -// The zap_desig_waker bitmask is used to clear the designated waker flag in -// the mutex if this thread has blocked, and therefore may be the designated -// waker. -static const intptr_t zap_desig_waker[] = { - ~static_cast(0), // not blocked - ~static_cast( - kMuDesig) // blocked; turn off the designated waker bit -}; +// Clears the designated waker flag in the mutex if this thread has blocked, and +// therefore may be the designated waker. +static intptr_t ClearDesignatedWakerMask(int flag) { + assert(flag >= 0); + assert(flag <= 1); + switch (flag) { + case 0: // not blocked + return ~static_cast(0); + case 1: // blocked; turn off the designated waker bit + return ~static_cast(kMuDesig); + } + ABSL_INTERNAL_UNREACHABLE; +} -// The ignore_waiting_writers bitmask is used to ignore the existence -// of waiting writers if a reader that has already blocked once -// wakes up. -static const intptr_t ignore_waiting_writers[] = { - ~static_cast(0), // not blocked - ~static_cast( - kMuWrWait) // blocked; pretend there are no waiting writers -}; +// Conditionally ignores the existence of waiting writers if a reader that has +// already blocked once wakes up. +static intptr_t IgnoreWaitingWritersMask(int flag) { + assert(flag >= 0); + assert(flag <= 1); + switch (flag) { + case 0: // not blocked + return ~static_cast(0); + case 1: // blocked; pretend there are no waiting writers + return ~static_cast(kMuWrWait); + } + ABSL_INTERNAL_UNREACHABLE; +} // Internal version of LockWhen(). See LockSlowWithDeadline() ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond, @@ -1780,8 +1821,8 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, // operation tsan considers that we've already released the mutex. bool res = false; #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE - const int flags = read_lock ? __tsan_mutex_read_lock : 0; - const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0); + const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0; + const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0); #endif if (locking) { // For lock we pretend that we have finished the operation, @@ -1852,8 +1893,10 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond, bool unlock = false; if ((v & how->fast_need_zero) == 0 && // try fast acquire mu_.compare_exchange_strong( - v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) + - how->fast_add, + v, + (how->fast_or | + (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) + + how->fast_add, std::memory_order_acquire, std::memory_order_relaxed)) { if (cond == nullptr || EvalConditionAnnotated(cond, this, true, false, how == kShared)) { @@ -1892,7 +1935,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) { // Test for either of two situations that should not occur in v: // kMuWriter and kMuReader // kMuWrWait and !kMuWait - const uintptr_t w = v ^ kMuWait; + const uintptr_t w = static_cast(v ^ kMuWait); // By flipping that bit, we can now test for: // kMuWriter and kMuReader in w // kMuWrWait and kMuWait in w @@ -1927,9 +1970,10 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { CheckForMutexCorruption(v, "Lock"); if ((v & waitp->how->slow_need_zero) == 0) { if (mu_.compare_exchange_strong( - v, (waitp->how->fast_or | - (v & zap_desig_waker[flags & kMuHasBlocked])) + - waitp->how->fast_add, + v, + (waitp->how->fast_or | + (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) + + waitp->how->fast_add, std::memory_order_acquire, std::memory_order_relaxed)) { if (waitp->cond == nullptr || EvalConditionAnnotated(waitp->cond, this, true, false, @@ -1946,8 +1990,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters // This thread tries to become the one and only waiter. PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags); - intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) | - kMuWait; + intptr_t nv = + (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) | + kMuWait; ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed"); if (waitp->how == kExclusive && (v & kMuReader) != 0) { nv |= kMuWrWait; @@ -1961,12 +2006,13 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { waitp->thread->waitp = nullptr; } } else if ((v & waitp->how->slow_inc_need_zero & - ignore_waiting_writers[flags & kMuHasBlocked]) == 0) { + IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) { // This is a reader that needs to increment the reader count, // but the count is currently held in the last waiter. if (mu_.compare_exchange_strong( - v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin | - kMuReader, + v, + (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) | + kMuSpin | kMuReader, std::memory_order_acquire, std::memory_order_relaxed)) { PerThreadSynch *h = GetPerThreadSynch(v); h->readers += kMuOne; // inc reader count in waiter @@ -1987,8 +2033,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { } } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves mu_.compare_exchange_strong( - v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin | - kMuWait, + v, + (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) | + kMuSpin | kMuWait, std::memory_order_acquire, std::memory_order_relaxed)) { PerThreadSynch *h = GetPerThreadSynch(v); PerThreadSynch *new_h = Enqueue(h, waitp, v, flags); @@ -2315,19 +2362,26 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { } // end of for(;;)-loop if (wake_list != kPerThreadSynchNull) { - int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles; - bool cond_waiter = wake_list->cond_waiter; + int64_t total_wait_cycles = 0; + int64_t max_wait_cycles = 0; + int64_t now = base_internal::CycleClock::Now(); do { + // Profile lock contention events only if the waiter was trying to acquire + // the lock, not waiting on a condition variable or Condition. + if (!wake_list->cond_waiter) { + int64_t cycles_waited = + (now - wake_list->waitp->contention_start_cycles); + total_wait_cycles += cycles_waited; + if (max_wait_cycles == 0) max_wait_cycles = cycles_waited; + wake_list->waitp->contention_start_cycles = now; + wake_list->waitp->should_submit_contention_data = true; + } wake_list = Wakeup(wake_list); // wake waiters } while (wake_list != kPerThreadSynchNull); - if (!cond_waiter) { - // Sample lock contention events only if the (first) waiter was trying to - // acquire the lock, not waiting on a condition variable or Condition. - int64_t wait_cycles = - base_internal::CycleClock::Now() - enqueue_timestamp; - mutex_tracer("slow release", this, wait_cycles); + if (total_wait_cycles > 0) { + mutex_tracer("slow release", this, total_wait_cycles); ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); - submit_profile_data(enqueue_timestamp); + submit_profile_data(total_wait_cycles); ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); } } @@ -2492,9 +2546,9 @@ void CondVar::Remove(PerThreadSynch *s) { // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via // the logging code, or via a Condition function) and might potentially attempt // to block this thread. That would be a problem if the thread were already on -// a the condition variable waiter queue. Thus, we use the waitp->cv_word -// to tell the unlock code to call CondVarEnqueue() to queue the thread on the -// condition variable queue just before the mutex is to be unlocked, and (most +// a condition variable waiter queue. Thus, we use the waitp->cv_word to tell +// the unlock code to call CondVarEnqueue() to queue the thread on the condition +// variable queue just before the mutex is to be unlocked, and (most // importantly) after any call to an external routine that might re-enter the // mutex code. static void CondVarEnqueue(SynchWaitParams *waitp) { @@ -2557,6 +2611,23 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) { while (waitp.thread->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) { if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) { + // DecrementSynchSem returned due to timeout. + // Now we will either (1) remove ourselves from the wait list in Remove + // below, in which case Remove will set thread.state = kAvailable and + // we will not call DecrementSynchSem again; or (2) Signal/SignalAll + // has removed us concurrently and is calling Wakeup, which will set + // thread.state = kAvailable and post to the semaphore. + // It's important to reset the timeout for the case (2) because otherwise + // we can live-lock in this loop since DecrementSynchSem will always + // return immediately due to timeout, but Signal/SignalAll is not + // necessary set thread.state = kAvailable yet (and is not scheduled + // due to thread priorities or other scheduler artifacts). + // Note this could also be resolved if Signal/SignalAll would set + // thread.state = kAvailable while holding the wait list spin lock. + // But this can't be easily done for SignalAll since it grabs the whole + // wait list with a single compare-exchange and does not really grab + // the spin lock. + t = KernelTimeout::Never(); this->Remove(waitp.thread); rc = true; } @@ -2711,25 +2782,32 @@ static bool Dereference(void *arg) { return *(static_cast(arg)); } -Condition::Condition() {} // null constructor, used for kTrue only +Condition::Condition() = default; // null constructor, used for kTrue only const Condition Condition::kTrue; Condition::Condition(bool (*func)(void *), void *arg) : eval_(&CallVoidPtrFunction), - function_(func), - method_(nullptr), - arg_(arg) {} + arg_(arg) { + static_assert(sizeof(&func) <= sizeof(callback_), + "An overlarge function pointer passed to Condition."); + StoreCallback(func); +} bool Condition::CallVoidPtrFunction(const Condition *c) { - return (*c->function_)(c->arg_); + using FunctionPointer = bool (*)(void *); + FunctionPointer function_pointer; + std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer)); + return (*function_pointer)(c->arg_); } Condition::Condition(const bool *cond) : eval_(CallVoidPtrFunction), - function_(Dereference), - method_(nullptr), // const_cast is safe since Dereference does not modify arg - arg_(const_cast(cond)) {} + arg_(const_cast(cond)) { + using FunctionPointer = bool (*)(void *); + const FunctionPointer dereference = Dereference; + StoreCallback(dereference); +} bool Condition::Eval() const { // eval_ == null for kTrue @@ -2737,14 +2815,15 @@ bool Condition::Eval() const { } bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) { - if (a == nullptr) { + // kTrue logic. + if (a == nullptr || a->eval_ == nullptr) { return b == nullptr || b->eval_ == nullptr; + }else if (b == nullptr || b->eval_ == nullptr) { + return false; } - if (b == nullptr || b->eval_ == nullptr) { - return a->eval_ == nullptr; - } - return a->eval_ == b->eval_ && a->function_ == b->function_ && - a->arg_ == b->arg_ && a->method_ == b->method_; + // Check equality of the representative fields. + return a->eval_ == b->eval_ && a->arg_ == b->arg_ && + !memcmp(a->callback_, b->callback_, sizeof(a->callback_)); } ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex.h index f49e0c83d6..779aafa0ba 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex.h @@ -60,6 +60,8 @@ #include #include +#include +#include #include #include "absl/base/const_init.h" @@ -174,9 +176,12 @@ class ABSL_LOCKABLE Mutex { // Mutex::AssertHeld() // - // Return immediately if this thread holds the `Mutex` exclusively (in write - // mode). Otherwise, may report an error (typically by crashing with a - // diagnostic), or may return immediately. + // Require that the mutex be held exclusively (write mode) by this thread. + // + // If the mutex is not currently held by this thread, this function may report + // an error (typically by crashing with a diagnostic) or it may do nothing. + // This function is intended only as a tool to assist debugging; it doesn't + // guarantee correctness. void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK(); // --------------------------------------------------------------------------- @@ -236,9 +241,13 @@ class ABSL_LOCKABLE Mutex { // Mutex::AssertReaderHeld() // - // Returns immediately if this thread holds the `Mutex` in at least shared - // mode (read mode). Otherwise, may report an error (typically by - // crashing with a diagnostic), or may return immediately. + // Require that the mutex be held at least in shared mode (read mode) by this + // thread. + // + // If the mutex is not currently held by this thread, this function may report + // an error (typically by crashing with a diagnostic) or it may do nothing. + // This function is intended only as a tool to assist debugging; it doesn't + // guarantee correctness. void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK(); // Mutex::WriterLock() @@ -605,12 +614,12 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock { // Condition // ----------------------------------------------------------------------------- // -// As noted above, `Mutex` contains a number of member functions which take a -// `Condition` as an argument; clients can wait for conditions to become `true` -// before attempting to acquire the mutex. These sections are known as -// "condition critical" sections. To use a `Condition`, you simply need to -// construct it, and use within an appropriate `Mutex` member function; -// everything else in the `Condition` class is an implementation detail. +// `Mutex` contains a number of member functions which take a `Condition` as an +// argument; clients can wait for conditions to become `true` before attempting +// to acquire the mutex. These sections are known as "condition critical" +// sections. To use a `Condition`, you simply need to construct it, and use +// within an appropriate `Mutex` member function; everything else in the +// `Condition` class is an implementation detail. // // A `Condition` is specified as a function pointer which returns a boolean. // `Condition` functions should be pure functions -- their results should depend @@ -735,22 +744,53 @@ class Condition { static bool GuaranteedEqual(const Condition *a, const Condition *b); private: - typedef bool (*InternalFunctionType)(void * arg); - typedef bool (Condition::*InternalMethodType)(); - typedef bool (*InternalMethodCallerType)(void * arg, - InternalMethodType internal_method); + // Sizing an allocation for a method pointer can be subtle. In the Itanium + // specifications, a method pointer has a predictable, uniform size. On the + // other hand, MSVC ABI, method pointer sizes vary based on the + // inheritance of the class. Specifically, method pointers from classes with + // multiple inheritance are bigger than those of classes with single + // inheritance. Other variations also exist. - bool (*eval_)(const Condition*); // Actual evaluator - InternalFunctionType function_; // function taking pointer returning bool - InternalMethodType method_; // method returning bool - void *arg_; // arg of function_ or object of method_ +#ifndef _MSC_VER + // Allocation for a function pointer or method pointer. + // The {0} initializer ensures that all unused bytes of this buffer are + // always zeroed out. This is necessary, because GuaranteedEqual() compares + // all of the bytes, unaware of which bytes are relevant to a given `eval_`. + using MethodPtr = bool (Condition::*)(); + char callback_[sizeof(MethodPtr)] = {0}; +#else + // It is well known that the larget MSVC pointer-to-member is 24 bytes. This + // may be the largest known pointer-to-member of any platform. For this + // reason we will allocate 24 bytes for MSVC platform toolchains. + char callback_[24] = {0}; +#endif - Condition(); // null constructor used only to create kTrue + // Function with which to evaluate callbacks and/or arguments. + bool (*eval_)(const Condition*); + + // Either an argument for a function call or an object for a method call. + void *arg_; // Various functions eval_ can point to: static bool CallVoidPtrFunction(const Condition*); template static bool CastAndCallFunction(const Condition* c); template static bool CastAndCallMethod(const Condition* c); + + // Helper methods for storing, validating, and reading callback arguments. + template + inline void StoreCallback(T callback) { + static_assert( + sizeof(callback) <= sizeof(callback_), + "An overlarge pointer was passed as a callback to Condition."); + std::memcpy(callback_, &callback, sizeof(callback)); + } + + template + inline void ReadCallback(T *callback) const { + std::memcpy(callback, callback_, sizeof(*callback)); + } + + Condition(); // null constructor used only to create kTrue }; // ----------------------------------------------------------------------------- @@ -778,9 +818,9 @@ class Condition { // // Usage to wake T is: // mu.Lock(); -// // process data, possibly establishing C -// if (C) { cv->Signal(); } -// mu.Unlock(); +// // process data, possibly establishing C +// if (C) { cv->Signal(); } +// mu.Unlock(); // // If C may be useful to more than one waiter, use `SignalAll()` instead of // `Signal()`. @@ -942,56 +982,61 @@ inline CondVar::CondVar() : cv_(0) {} // static template bool Condition::CastAndCallMethod(const Condition *c) { - typedef bool (T::*MemberType)(); - MemberType rm = reinterpret_cast(c->method_); - T *x = static_cast(c->arg_); - return (x->*rm)(); + T *object = static_cast(c->arg_); + bool (T::*method_pointer)(); + c->ReadCallback(&method_pointer); + return (object->*method_pointer)(); } // static template bool Condition::CastAndCallFunction(const Condition *c) { - typedef bool (*FuncType)(T *); - FuncType fn = reinterpret_cast(c->function_); - T *x = static_cast(c->arg_); - return (*fn)(x); + bool (*function)(T *); + c->ReadCallback(&function); + T *argument = static_cast(c->arg_); + return (*function)(argument); } template inline Condition::Condition(bool (*func)(T *), T *arg) : eval_(&CastAndCallFunction), - function_(reinterpret_cast(func)), - method_(nullptr), - arg_(const_cast(static_cast(arg))) {} + arg_(const_cast(static_cast(arg))) { + static_assert(sizeof(&func) <= sizeof(callback_), + "An overlarge function pointer was passed to Condition."); + StoreCallback(func); +} template inline Condition::Condition(T *object, bool (absl::internal::identity::type::*method)()) : eval_(&CastAndCallMethod), - function_(nullptr), - method_(reinterpret_cast(method)), - arg_(object) {} + arg_(object) { + static_assert(sizeof(&method) <= sizeof(callback_), + "An overlarge method pointer was passed to Condition."); + StoreCallback(method); +} template inline Condition::Condition(const T *object, bool (absl::internal::identity::type::*method)() const) : eval_(&CastAndCallMethod), - function_(nullptr), - method_(reinterpret_cast(method)), - arg_(reinterpret_cast(const_cast(object))) {} + arg_(reinterpret_cast(const_cast(object))) { + StoreCallback(method); +} -// Register a hook for profiling support. +// Register hooks for profiling support. // // The function pointer registered here will be called whenever a mutex is -// contended. The callback is given the absl/base/cycleclock.h timestamp when -// waiting began. +// contended. The callback is given the cycles for which waiting happened (as +// measured by //absl/base/internal/cycleclock.h, and which may not +// be real "cycle" counts.) // // Calls to this function do not race or block, but there is no ordering // guaranteed between calls to this function and call to the provided hook. // In particular, the previously registered hook may still be called for some // time after this function returns. -void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)); +void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)); // Register a hook for Mutex tracing. // diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_benchmark.cc index e35aed8bd6..b5d2fbc454 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_benchmark.cc @@ -97,7 +97,7 @@ void BM_MutexEnqueue(benchmark::State& state) { // Mutex queueing behavior is modified. const bool multiple_priorities = state.range(0); ScopedThreadMutexPriority priority_setter( - (multiple_priorities && state.thread_index != 0) ? 1 : 0); + (multiple_priorities && state.thread_index() != 0) ? 1 : 0); struct Shared { absl::Mutex mu; @@ -176,7 +176,7 @@ BENCHMARK(BM_MutexEnqueue) template void BM_Contended(benchmark::State& state) { - int priority = state.thread_index % state.range(1); + int priority = state.thread_index() % state.range(1); ScopedThreadMutexPriority priority_setter(priority); struct Shared { @@ -196,7 +196,7 @@ void BM_Contended(benchmark::State& state) { // To achieve this amount of local work is multiplied by number of threads // to keep ratio between local work and critical section approximately // equal regardless of number of threads. - DelayNs(100 * state.threads, &local); + DelayNs(100 * state.threads(), &local); RaiiLocker locker(&shared->mu); DelayNs(state.range(0), &shared->data); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_method_pointer_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_method_pointer_test.cc new file mode 100644 index 0000000000..1ec801a001 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_method_pointer_test.cc @@ -0,0 +1,138 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/synchronization/mutex.h" + +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace { + +class IncompleteClass; + +#ifdef _MSC_VER +// These tests verify expectations about sizes of MSVC pointers to methods. +// Pointers to methods are distinguished by whether their class hierachies +// contain single inheritance, multiple inheritance, or virtual inheritence. + +// Declare classes of the various MSVC inheritance types. +class __single_inheritance SingleInheritance{}; +class __multiple_inheritance MultipleInheritance; +class __virtual_inheritance VirtualInheritance; + +TEST(MutexMethodPointerTest, MicrosoftMethodPointerSize) { + void (SingleInheritance::*single_inheritance_method_pointer)(); + void (MultipleInheritance::*multiple_inheritance_method_pointer)(); + void (VirtualInheritance::*virtual_inheritance_method_pointer)(); + +#if defined(_M_IX86) || defined(_M_ARM) + static_assert(sizeof(single_inheritance_method_pointer) == 4, + "Unexpected sizeof(single_inheritance_method_pointer)."); + static_assert(sizeof(multiple_inheritance_method_pointer) == 8, + "Unexpected sizeof(multiple_inheritance_method_pointer)."); + static_assert(sizeof(virtual_inheritance_method_pointer) == 12, + "Unexpected sizeof(virtual_inheritance_method_pointer)."); +#elif defined(_M_X64) || defined(__aarch64__) + static_assert(sizeof(single_inheritance_method_pointer) == 8, + "Unexpected sizeof(single_inheritance_method_pointer)."); + static_assert(sizeof(multiple_inheritance_method_pointer) == 16, + "Unexpected sizeof(multiple_inheritance_method_pointer)."); + static_assert(sizeof(virtual_inheritance_method_pointer) == 16, + "Unexpected sizeof(virtual_inheritance_method_pointer)."); +#endif + void (IncompleteClass::*incomplete_class_method_pointer)(); + static_assert(sizeof(incomplete_class_method_pointer) >= + sizeof(virtual_inheritance_method_pointer), + "Failed invariant: sizeof(incomplete_class_method_pointer) >= " + "sizeof(virtual_inheritance_method_pointer)!"); +} + +class Callback { + bool x = true; + + public: + Callback() {} + bool method() { + x = !x; + return x; + } +}; + +class M2 { + bool x = true; + + public: + M2() {} + bool method2() { + x = !x; + return x; + } +}; + +class MultipleInheritance : public Callback, public M2 {}; + +TEST(MutexMethodPointerTest, ConditionWithMultipleInheritanceMethod) { + // This test ensures that Condition can deal with method pointers from classes + // with multiple inheritance. + MultipleInheritance object = MultipleInheritance(); + absl::Condition condition(&object, &MultipleInheritance::method); + EXPECT_FALSE(condition.Eval()); + EXPECT_TRUE(condition.Eval()); +} + +class __virtual_inheritance VirtualInheritance : virtual public Callback { + bool x = false; + + public: + VirtualInheritance() {} + bool method() { + x = !x; + return x; + } +}; + +TEST(MutexMethodPointerTest, ConditionWithVirtualInheritanceMethod) { + // This test ensures that Condition can deal with method pointers from classes + // with virtual inheritance. + VirtualInheritance object = VirtualInheritance(); + absl::Condition condition(&object, &VirtualInheritance::method); + EXPECT_TRUE(condition.Eval()); + EXPECT_FALSE(condition.Eval()); +} +#endif // #ifdef _MSC_VER + +TEST(MutexMethodPointerTest, ConditionWithIncompleteClassMethod) { + using IncompleteClassMethodPointer = void (IncompleteClass::*)(); + + union CallbackSlot { + void (*anonymous_function_pointer)(); + IncompleteClassMethodPointer incomplete_class_method_pointer; + }; + + static_assert(sizeof(CallbackSlot) >= sizeof(IncompleteClassMethodPointer), + "The callback slot is not big enough for method pointers."); + static_assert( + sizeof(CallbackSlot) == sizeof(IncompleteClassMethodPointer), + "The callback slot is not big enough for anonymous function pointers."); + +#if defined(_MSC_VER) + static_assert(sizeof(IncompleteClassMethodPointer) <= 24, + "The pointer to a method of an incomplete class is too big."); +#endif +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_test.cc index f8fbf9488c..34751cb1be 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/mutex_test.cc @@ -26,6 +26,7 @@ #include #include #include // NOLINT(build/c++11) +#include #include #include "gtest/gtest.h" @@ -294,8 +295,9 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) { "TestTime failed"); } elapsed = absl::Now() - start; - ABSL_RAW_CHECK(absl::Seconds(0.9) <= elapsed && - elapsed <= absl::Seconds(2.0), "TestTime failed"); + ABSL_RAW_CHECK( + absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0), + "TestTime failed"); ABSL_RAW_CHECK(cxt->g0 == cxt->threads, "TestTime failed"); } else if (c == 1) { @@ -342,7 +344,7 @@ static void TestMuTime(TestContext *cxt, int c) { TestTime(cxt, c, false); } static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); } static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv, - const std::function& cb) { + const std::function &cb) { mu->Lock(); int c = (*c0)++; mu->Unlock(); @@ -365,9 +367,9 @@ static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int), cxt->threads = threads; absl::synchronization_internal::ThreadPool tp(threads); for (int i = 0; i != threads; i++) { - tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2, - std::function( - std::bind(test, cxt, std::placeholders::_1)))); + tp.Schedule(std::bind( + &EndTest, &c0, &c1, &mu2, &cv2, + std::function(std::bind(test, cxt, std::placeholders::_1)))); } mu2.Lock(); while (c1 != threads) { @@ -681,14 +683,14 @@ struct LockWhenTestStruct { bool waiting = false; }; -static bool LockWhenTestIsCond(LockWhenTestStruct* s) { +static bool LockWhenTestIsCond(LockWhenTestStruct *s) { s->mu2.Lock(); s->waiting = true; s->mu2.Unlock(); return s->cond; } -static void LockWhenTestWaitForIsCond(LockWhenTestStruct* s) { +static void LockWhenTestWaitForIsCond(LockWhenTestStruct *s) { s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s)); s->mu1.Unlock(); } @@ -870,33 +872,6 @@ TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS { } } -// -------------------------------------------------------- -// Test for bug with pattern of readers using a condvar. The bug was that if a -// reader went to sleep on a condition variable while one or more other readers -// held the lock, but there were no waiters, the reader count (held in the -// mutex word) would be lost. (This is because Enqueue() had at one time -// always placed the thread on the Mutex queue. Later (CL 4075610), to -// tolerate re-entry into Mutex from a Condition predicate, Enqueue() was -// changed so that it could also place a thread on a condition-variable. This -// introduced the case where Enqueue() returned with an empty queue, and this -// case was handled incorrectly in one place.) - -static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv, - int *running) { - std::random_device dev; - std::mt19937 gen(dev()); - std::uniform_int_distribution random_millis(0, 15); - mu->ReaderLock(); - while (*running == 3) { - absl::SleepFor(absl::Milliseconds(random_millis(gen))); - cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen))); - } - mu->ReaderUnlock(); - mu->Lock(); - (*running)--; - mu->Unlock(); -} - struct True { template bool operator()(Args...) const { @@ -945,6 +920,33 @@ TEST(Mutex, FunctorCondition) { } } +// -------------------------------------------------------- +// Test for bug with pattern of readers using a condvar. The bug was that if a +// reader went to sleep on a condition variable while one or more other readers +// held the lock, but there were no waiters, the reader count (held in the +// mutex word) would be lost. (This is because Enqueue() had at one time +// always placed the thread on the Mutex queue. Later (CL 4075610), to +// tolerate re-entry into Mutex from a Condition predicate, Enqueue() was +// changed so that it could also place a thread on a condition-variable. This +// introduced the case where Enqueue() returned with an empty queue, and this +// case was handled incorrectly in one place.) + +static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv, + int *running) { + std::random_device dev; + std::mt19937 gen(dev()); + std::uniform_int_distribution random_millis(0, 15); + mu->ReaderLock(); + while (*running == 3) { + absl::SleepFor(absl::Milliseconds(random_millis(gen))); + cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen))); + } + mu->ReaderUnlock(); + mu->Lock(); + (*running)--; + mu->Unlock(); +} + static bool IntIsZero(int *x) { return *x == 0; } // Test for reader waiting condition variable when there are other readers @@ -1693,8 +1695,7 @@ TEST(Mutex, Timed) { TEST(Mutex, CVTime) { int threads = 10; // Use a fixed thread count of 10 int iterations = 1; - EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), - threads * iterations); + EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), threads * iterations); } TEST(Mutex, MuTime) { @@ -1703,4 +1704,30 @@ TEST(Mutex, MuTime) { EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations); } +TEST(Mutex, SignalExitedThread) { + // The test may expose a race when Mutex::Unlock signals a thread + // that has already exited. +#if defined(__wasm__) || defined(__asmjs__) + constexpr int kThreads = 1; // OOMs under WASM +#else + constexpr int kThreads = 100; +#endif + std::vector top; + for (unsigned i = 0; i < 2 * std::thread::hardware_concurrency(); i++) { + top.emplace_back([&]() { + for (int i = 0; i < kThreads; i++) { + absl::Mutex mu; + std::thread t([&]() { + mu.Lock(); + mu.Unlock(); + }); + mu.Lock(); + mu.Unlock(); + t.join(); + } + }); + } + for (auto &th : top) th.join(); +} + } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/notification.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/notification.cc index e91b903822..165ba669fb 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/notification.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/notification.cc @@ -16,7 +16,6 @@ #include -#include "absl/base/attributes.h" #include "absl/base/internal/raw_logging.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/notification.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/notification.h index 9a354ca2c0..8986d9a408 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/notification.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/synchronization/notification.h @@ -22,7 +22,7 @@ // The `Notification` object maintains a private boolean "notified" state that // transitions to `true` at most once. The `Notification` class provides the // following primary member functions: -// * `HasBeenNotified() `to query its state +// * `HasBeenNotified()` to query its state // * `WaitForNotification*()` to have threads wait until the "notified" state // is `true`. // * `Notify()` to set the notification's "notified" state to `true` and @@ -52,7 +52,7 @@ #include -#include "absl/base/macros.h" +#include "absl/base/attributes.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" @@ -74,7 +74,7 @@ class Notification { // Notification::HasBeenNotified() // // Returns the value of the notification's internal "notified" state. - bool HasBeenNotified() const { + ABSL_MUST_USE_RESULT bool HasBeenNotified() const { return HasBeenNotifiedInternal(&this->notified_yet_); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/CMakeLists.txt index f6ff8bd127..7b720540fe 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/CMakeLists.txt @@ -87,6 +87,7 @@ absl_cc_library( $<$:${CoreFoundation}> ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME time_internal_test_util @@ -94,7 +95,6 @@ absl_cc_library( "internal/test_util.h" SRCS "internal/test_util.cc" - "internal/zoneinfo.inc" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -102,7 +102,6 @@ absl_cc_library( absl::config absl::raw_logging_internal absl::time_zone - GTest::gmock TESTONLY ) @@ -126,3 +125,16 @@ absl_cc_test( absl::time_zone GTest::gmock_main ) + +absl_cc_test( + NAME + flag_test + SRCS + "flag_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::flags + absl::flags_reflection + GTest::gmock_main +) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/civil_time.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/civil_time.cc index 6a231edb2d..65df39d731 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/civil_time.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/civil_time.cc @@ -15,6 +15,7 @@ #include "absl/time/civil_time.h" #include +#include #include #include "absl/strings/str_cat.h" @@ -167,6 +168,31 @@ std::ostream& operator<<(std::ostream& os, CivilSecond s) { return os << FormatCivilTime(s); } +bool AbslParseFlag(string_view s, CivilSecond* c, std::string*) { + return ParseLenientCivilTime(s, c); +} +bool AbslParseFlag(string_view s, CivilMinute* c, std::string*) { + return ParseLenientCivilTime(s, c); +} +bool AbslParseFlag(string_view s, CivilHour* c, std::string*) { + return ParseLenientCivilTime(s, c); +} +bool AbslParseFlag(string_view s, CivilDay* c, std::string*) { + return ParseLenientCivilTime(s, c); +} +bool AbslParseFlag(string_view s, CivilMonth* c, std::string*) { + return ParseLenientCivilTime(s, c); +} +bool AbslParseFlag(string_view s, CivilYear* c, std::string*) { + return ParseLenientCivilTime(s, c); +} +std::string AbslUnparseFlag(CivilSecond c) { return FormatCivilTime(c); } +std::string AbslUnparseFlag(CivilMinute c) { return FormatCivilTime(c); } +std::string AbslUnparseFlag(CivilHour c) { return FormatCivilTime(c); } +std::string AbslUnparseFlag(CivilDay c) { return FormatCivilTime(c); } +std::string AbslUnparseFlag(CivilMonth c) { return FormatCivilTime(c); } +std::string AbslUnparseFlag(CivilYear c) { return FormatCivilTime(c); } + } // namespace time_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/civil_time.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/civil_time.h index bb46004434..5855bc73a1 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/civil_time.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/civil_time.h @@ -70,8 +70,10 @@ #ifndef ABSL_TIME_CIVIL_TIME_H_ #define ABSL_TIME_CIVIL_TIME_H_ +#include #include +#include "absl/base/config.h" #include "absl/strings/string_view.h" #include "absl/time/internal/cctz/include/cctz/civil_time.h" @@ -530,6 +532,29 @@ std::ostream& operator<<(std::ostream& os, CivilHour h); std::ostream& operator<<(std::ostream& os, CivilMinute m); std::ostream& operator<<(std::ostream& os, CivilSecond s); +// AbslParseFlag() +// +// Parses the command-line flag string representation `s` into a civil-time +// value. Flags must be specified in a format that is valid for +// `absl::ParseLenientCivilTime()`. +bool AbslParseFlag(absl::string_view s, CivilSecond* c, std::string* error); +bool AbslParseFlag(absl::string_view s, CivilMinute* c, std::string* error); +bool AbslParseFlag(absl::string_view s, CivilHour* c, std::string* error); +bool AbslParseFlag(absl::string_view s, CivilDay* c, std::string* error); +bool AbslParseFlag(absl::string_view s, CivilMonth* c, std::string* error); +bool AbslParseFlag(absl::string_view s, CivilYear* c, std::string* error); + +// AbslUnparseFlag() +// +// Unparses a civil-time value into a command-line string representation using +// the format specified by `absl::ParseCivilTime()`. +std::string AbslUnparseFlag(CivilSecond c); +std::string AbslUnparseFlag(CivilMinute c); +std::string AbslUnparseFlag(CivilHour c); +std::string AbslUnparseFlag(CivilDay c); +std::string AbslUnparseFlag(CivilMonth c); +std::string AbslUnparseFlag(CivilYear c); + } // namespace time_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/clock.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/clock.cc index 7b204c4ee0..2bf53d9c61 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/clock.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/clock.cc @@ -196,7 +196,7 @@ struct ABSL_CACHELINE_ALIGNED TimeState { absl::base_internal::SpinLock lock{absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY}; }; -ABSL_CONST_INIT static TimeState time_state{}; +ABSL_CONST_INIT static TimeState time_state; // Return the time in ns as told by the kernel interface. Place in *cycleclock // the value of the cycleclock at about the time of the syscall. @@ -217,9 +217,11 @@ static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock, uint64_t elapsed_cycles; int loops = 0; do { - before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); + before_cycles = + static_cast(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW()); current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); - after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); + after_cycles = + static_cast(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW()); // elapsed_cycles is unsigned, so is large on overflow elapsed_cycles = after_cycles - before_cycles; if (elapsed_cycles >= local_approx_syscall_time_in_cycles && @@ -316,7 +318,8 @@ int64_t GetCurrentTimeNanos() { // contribute to register pressure - reading it early before initializing // the other pieces of the calculation minimizes spill/restore instructions, // minimizing icache cost. - uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); + uint64_t now_cycles = + static_cast(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW()); // Acquire pairs with the barrier in SeqRelease - if this load sees that // store, the shared-data reads necessarily see that SeqRelease's updates @@ -356,7 +359,8 @@ int64_t GetCurrentTimeNanos() { uint64_t delta_cycles; if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 && (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) { - return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale); + return static_cast( + base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale)); } return GetCurrentTimeNanosSlowPath(); } @@ -404,8 +408,8 @@ static int64_t GetCurrentTimeNanosSlowPath() // Sample the kernel time base. This is the definition of // "now" if we take the slow path. uint64_t now_cycles; - uint64_t now_ns = - GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles); + uint64_t now_ns = static_cast( + GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles)); time_state.last_now_cycles = now_cycles; uint64_t estimated_base_ns; @@ -432,7 +436,7 @@ static int64_t GetCurrentTimeNanosSlowPath() time_state.lock.Unlock(); - return estimated_base_ns; + return static_cast(estimated_base_ns); } // Main part of the algorithm. Locks out readers, updates the approximation @@ -489,7 +493,8 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, uint64_t assumed_next_sample_delta_cycles = SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle); - int64_t diff_ns = now_ns - estimated_base_ns; // estimate low by this much + // Estimate low by this much. + int64_t diff_ns = static_cast(now_ns - estimated_base_ns); // We want to set nsscaled_per_cycle so that our estimate of the ns time // at the assumed cycle time is the assumed ns time. @@ -500,7 +505,8 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, // of our current error, by solving: // kMinNSBetweenSamples + diff_ns - (diff_ns / 16) == // (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale - ns = kMinNSBetweenSamples + diff_ns - (diff_ns / 16); + ns = static_cast(static_cast(kMinNSBetweenSamples) + + diff_ns - (diff_ns / 16)); uint64_t new_nsscaled_per_cycle = SafeDivideAndScale(ns, assumed_next_sample_delta_cycles); if (new_nsscaled_per_cycle != 0 && @@ -558,7 +564,7 @@ constexpr absl::Duration MaxSleep() { // REQUIRES: to_sleep <= MaxSleep(). void SleepOnce(absl::Duration to_sleep) { #ifdef _WIN32 - Sleep(to_sleep / absl::Milliseconds(1)); + Sleep(static_cast(to_sleep / absl::Milliseconds(1))); #else struct timespec sleep_time = absl::ToTimespec(to_sleep); while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/clock_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/clock_test.cc index 4bcfc6bc72..bc77dbc27b 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/clock_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/clock_test.cc @@ -18,6 +18,10 @@ #if defined(ABSL_HAVE_ALARM) #include #include +#ifdef _AIX +// sig_t is not defined in AIX. +typedef void (*sig_t)(int); +#endif #elif defined(__linux__) || defined(__APPLE__) #error all known Linux and Apple targets have alarm #endif diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/duration.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/duration.cc index 4443109a51..911e80f834 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/duration.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/duration.cc @@ -617,7 +617,7 @@ timespec ToTimespec(Duration d) { rep_lo -= kTicksPerSecond; } } - ts.tv_sec = rep_hi; + ts.tv_sec = static_cast(rep_hi); if (ts.tv_sec == rep_hi) { // no time_t narrowing ts.tv_nsec = rep_lo / kTicksPerNanosecond; return ts; @@ -645,7 +645,7 @@ timeval ToTimeval(Duration d) { ts.tv_nsec -= 1000 * 1000 * 1000; } } - tv.tv_sec = ts.tv_sec; + tv.tv_sec = static_cast(ts.tv_sec); if (tv.tv_sec != ts.tv_sec) { // narrowing if (ts.tv_sec < 0) { tv.tv_sec = std::numeric_limits::min(); @@ -691,7 +691,7 @@ namespace { char* Format64(char* ep, int width, int64_t v) { do { --width; - *--ep = '0' + (v % 10); // contiguous digits + *--ep = static_cast('0' + (v % 10)); // contiguous digits } while (v /= 10); while (--width >= 0) *--ep = '0'; // zero pad return ep; @@ -728,7 +728,7 @@ void AppendNumberUnit(std::string* out, int64_t n, DisplayUnit unit) { char* const ep = buf + sizeof(buf); char* bp = Format64(ep, 0, n); if (*bp != '0' || bp + 1 != ep) { - out->append(bp, ep - bp); + out->append(bp, static_cast(ep - bp)); out->append(unit.abbr.data(), unit.abbr.size()); } } @@ -745,12 +745,12 @@ void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) { int64_t int_part = d; if (int_part != 0 || frac_part != 0) { char* bp = Format64(ep, 0, int_part); // always < 1000 - out->append(bp, ep - bp); + out->append(bp, static_cast(ep - bp)); if (frac_part != 0) { out->push_back('.'); bp = Format64(ep, prec, frac_part); while (ep[-1] == '0') --ep; - out->append(bp, ep - bp); + out->append(bp, static_cast(ep - bp)); } out->append(unit.abbr.data(), unit.abbr.size()); } @@ -766,13 +766,14 @@ void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) { // is non-zero. // Unlike Go, we format the zero duration as 0, with no unit. std::string FormatDuration(Duration d) { - const Duration min_duration = Seconds(kint64min); - if (d == min_duration) { + constexpr Duration kMinDuration = Seconds(kint64min); + std::string s; + if (d == kMinDuration) { // Avoid needing to negate kint64min by directly returning what the // following code should produce in that case. - return "-2562047788015215h30m8s"; + s = "-2562047788015215h30m8s"; + return s; } - std::string s; if (d < ZeroDuration()) { s.append("-"); d = -d; @@ -840,7 +841,7 @@ bool ConsumeDurationNumber(const char** dpp, const char* ep, int64_t* int_part, // in "*unit". The given string pointer is modified to point to the first // unconsumed char. bool ConsumeDurationUnit(const char** start, const char* end, Duration* unit) { - size_t size = end - *start; + size_t size = static_cast(end - *start); switch (size) { case 0: return false; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/duration_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/duration_test.cc index b7209e1c0a..b7abf4baa2 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/duration_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/duration_test.cc @@ -349,6 +349,11 @@ TEST(Duration, ToChrono) { } TEST(Duration, FactoryOverloads) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + enum E { kOne = 1 }; #define TEST_FACTORY_OVERLOADS(NAME) \ EXPECT_EQ(1, NAME(kOne) / NAME(kOne)); \ @@ -879,6 +884,11 @@ TEST(Duration, RelationalOperators) { } TEST(Duration, Addition) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + #define TEST_ADD_OPS(UNIT) \ do { \ EXPECT_EQ(UNIT(2), UNIT(1) + UNIT(1)); \ @@ -972,6 +982,11 @@ TEST(Duration, Negation) { } TEST(Duration, AbsoluteValue) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + EXPECT_EQ(absl::ZeroDuration(), AbsDuration(absl::ZeroDuration())); EXPECT_EQ(absl::Seconds(1), AbsDuration(absl::Seconds(1))); EXPECT_EQ(absl::Seconds(1), AbsDuration(absl::Seconds(-1))); @@ -989,6 +1004,11 @@ TEST(Duration, AbsoluteValue) { } TEST(Duration, Multiplication) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + #define TEST_MUL_OPS(UNIT) \ do { \ EXPECT_EQ(UNIT(5), UNIT(2) * 2.5); \ @@ -1241,6 +1261,11 @@ TEST(Duration, RoundTripUnits) { } TEST(Duration, TruncConversions) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + // Tests ToTimespec()/DurationFromTimespec() const struct { absl::Duration d; @@ -1537,6 +1562,11 @@ TEST(Duration, ConversionSaturation) { } TEST(Duration, FormatDuration) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + // Example from Go's docs. EXPECT_EQ("72h3m0.5s", absl::FormatDuration(absl::Hours(72) + absl::Minutes(3) + @@ -1671,6 +1701,11 @@ TEST(Duration, FormatDuration) { } TEST(Duration, ParseDuration) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + absl::Duration d; // No specified unit. Should only work for zero and infinity. diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/flag_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/flag_test.cc new file mode 100644 index 0000000000..8f8532b7eb --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/flag_test.cc @@ -0,0 +1,147 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/flags/flag.h" + +#include + +#include "gtest/gtest.h" +#include "absl/flags/reflection.h" +#include "absl/time/civil_time.h" +#include "absl/time/time.h" + +ABSL_FLAG(absl::CivilSecond, test_flag_civil_second, + absl::CivilSecond(2015, 1, 2, 3, 4, 5), ""); +ABSL_FLAG(absl::CivilMinute, test_flag_civil_minute, + absl::CivilMinute(2015, 1, 2, 3, 4), ""); +ABSL_FLAG(absl::CivilHour, test_flag_civil_hour, absl::CivilHour(2015, 1, 2, 3), + ""); +ABSL_FLAG(absl::CivilDay, test_flag_civil_day, absl::CivilDay(2015, 1, 2), ""); +ABSL_FLAG(absl::CivilMonth, test_flag_civil_month, absl::CivilMonth(2015, 1), + ""); +ABSL_FLAG(absl::CivilYear, test_flag_civil_year, absl::CivilYear(2015), ""); + +ABSL_FLAG(absl::Duration, test_duration_flag, absl::Seconds(5), + "For testing support for Duration flags"); +ABSL_FLAG(absl::Time, test_time_flag, absl::InfinitePast(), + "For testing support for Time flags"); + +namespace { + +bool SetFlagValue(absl::string_view flag_name, absl::string_view value) { + auto* flag = absl::FindCommandLineFlag(flag_name); + if (!flag) return false; + std::string err; + return flag->ParseFrom(value, &err); +} + +bool GetFlagValue(absl::string_view flag_name, std::string& value) { + auto* flag = absl::FindCommandLineFlag(flag_name); + if (!flag) return false; + value = flag->CurrentValue(); + return true; +} + +TEST(CivilTime, FlagSupport) { + // Tests the default setting of the flags. + const absl::CivilSecond kDefaultSec(2015, 1, 2, 3, 4, 5); + EXPECT_EQ(absl::CivilSecond(kDefaultSec), + absl::GetFlag(FLAGS_test_flag_civil_second)); + EXPECT_EQ(absl::CivilMinute(kDefaultSec), + absl::GetFlag(FLAGS_test_flag_civil_minute)); + EXPECT_EQ(absl::CivilHour(kDefaultSec), + absl::GetFlag(FLAGS_test_flag_civil_hour)); + EXPECT_EQ(absl::CivilDay(kDefaultSec), + absl::GetFlag(FLAGS_test_flag_civil_day)); + EXPECT_EQ(absl::CivilMonth(kDefaultSec), + absl::GetFlag(FLAGS_test_flag_civil_month)); + EXPECT_EQ(absl::CivilYear(kDefaultSec), + absl::GetFlag(FLAGS_test_flag_civil_year)); + + // Sets flags to a new value. + const absl::CivilSecond kNewSec(2016, 6, 7, 8, 9, 10); + absl::SetFlag(&FLAGS_test_flag_civil_second, absl::CivilSecond(kNewSec)); + absl::SetFlag(&FLAGS_test_flag_civil_minute, absl::CivilMinute(kNewSec)); + absl::SetFlag(&FLAGS_test_flag_civil_hour, absl::CivilHour(kNewSec)); + absl::SetFlag(&FLAGS_test_flag_civil_day, absl::CivilDay(kNewSec)); + absl::SetFlag(&FLAGS_test_flag_civil_month, absl::CivilMonth(kNewSec)); + absl::SetFlag(&FLAGS_test_flag_civil_year, absl::CivilYear(kNewSec)); + + EXPECT_EQ(absl::CivilSecond(kNewSec), + absl::GetFlag(FLAGS_test_flag_civil_second)); + EXPECT_EQ(absl::CivilMinute(kNewSec), + absl::GetFlag(FLAGS_test_flag_civil_minute)); + EXPECT_EQ(absl::CivilHour(kNewSec), + absl::GetFlag(FLAGS_test_flag_civil_hour)); + EXPECT_EQ(absl::CivilDay(kNewSec), absl::GetFlag(FLAGS_test_flag_civil_day)); + EXPECT_EQ(absl::CivilMonth(kNewSec), + absl::GetFlag(FLAGS_test_flag_civil_month)); + EXPECT_EQ(absl::CivilYear(kNewSec), + absl::GetFlag(FLAGS_test_flag_civil_year)); +} + +TEST(Duration, FlagSupport) { + EXPECT_EQ(absl::Seconds(5), absl::GetFlag(FLAGS_test_duration_flag)); + + absl::SetFlag(&FLAGS_test_duration_flag, absl::Seconds(10)); + EXPECT_EQ(absl::Seconds(10), absl::GetFlag(FLAGS_test_duration_flag)); + + EXPECT_TRUE(SetFlagValue("test_duration_flag", "20s")); + EXPECT_EQ(absl::Seconds(20), absl::GetFlag(FLAGS_test_duration_flag)); + + std::string current_flag_value; + EXPECT_TRUE(GetFlagValue("test_duration_flag", current_flag_value)); + EXPECT_EQ("20s", current_flag_value); +} + +TEST(Time, FlagSupport) { + EXPECT_EQ(absl::InfinitePast(), absl::GetFlag(FLAGS_test_time_flag)); + + const absl::Time t = absl::FromCivil(absl::CivilSecond(2016, 1, 2, 3, 4, 5), + absl::UTCTimeZone()); + absl::SetFlag(&FLAGS_test_time_flag, t); + EXPECT_EQ(t, absl::GetFlag(FLAGS_test_time_flag)); + + // Successful parse + EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:06Z")); + EXPECT_EQ(t + absl::Seconds(1), absl::GetFlag(FLAGS_test_time_flag)); + EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:07.0Z")); + EXPECT_EQ(t + absl::Seconds(2), absl::GetFlag(FLAGS_test_time_flag)); + EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:08.000Z")); + EXPECT_EQ(t + absl::Seconds(3), absl::GetFlag(FLAGS_test_time_flag)); + EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:09+00:00")); + EXPECT_EQ(t + absl::Seconds(4), absl::GetFlag(FLAGS_test_time_flag)); + EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:05.123+00:00")); + EXPECT_EQ(t + absl::Milliseconds(123), absl::GetFlag(FLAGS_test_time_flag)); + EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:05.123+08:00")); + EXPECT_EQ(t + absl::Milliseconds(123) - absl::Hours(8), + absl::GetFlag(FLAGS_test_time_flag)); + EXPECT_TRUE(SetFlagValue("test_time_flag", "infinite-future")); + EXPECT_EQ(absl::InfiniteFuture(), absl::GetFlag(FLAGS_test_time_flag)); + EXPECT_TRUE(SetFlagValue("test_time_flag", "infinite-past")); + EXPECT_EQ(absl::InfinitePast(), absl::GetFlag(FLAGS_test_time_flag)); + + EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-01-02T03:04:06")); + EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-01-02")); + EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-01-02Z")); + EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-01-02+00:00")); + EXPECT_FALSE(SetFlagValue("test_time_flag", "2016-99-99T03:04:06Z")); + + EXPECT_TRUE(SetFlagValue("test_time_flag", "2016-01-02T03:04:05Z")); + std::string current_flag_value; + EXPECT_TRUE(GetFlagValue("test_time_flag", current_flag_value)); + EXPECT_EQ("2016-01-02T03:04:05+00:00", current_flag_value); +} + +} // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/format.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/format.cc index 4005fb704c..15a26b14f7 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/format.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/format.cc @@ -64,7 +64,8 @@ cctz_parts Split(absl::Time t) { // details about rep_hi and rep_lo. absl::Time Join(const cctz_parts& parts) { const int64_t rep_hi = (parts.sec - unix_epoch()).count(); - const uint32_t rep_lo = parts.fem.count() / (1000 * 1000 / 4); + const uint32_t rep_lo = + static_cast(parts.fem.count() / (1000 * 1000 / 4)); const auto d = time_internal::MakeDuration(rep_hi, rep_lo); return time_internal::FromUnixDuration(d); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h index 8aadde57ca..a5b084e6be 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h @@ -84,14 +84,13 @@ CONSTEXPR_F bool is_leap_year(year_t y) noexcept { return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); } CONSTEXPR_F int year_index(year_t y, month_t m) noexcept { - return (static_cast((y + (m > 2)) % 400) + 400) % 400; + const int yi = static_cast((y + (m > 2)) % 400); + return yi < 0 ? yi + 400 : yi; } -CONSTEXPR_F int days_per_century(year_t y, month_t m) noexcept { - const int yi = year_index(y, m); +CONSTEXPR_F int days_per_century(int yi) noexcept { return 36524 + (yi == 0 || yi > 300); } -CONSTEXPR_F int days_per_4years(year_t y, month_t m) noexcept { - const int yi = year_index(y, m); +CONSTEXPR_F int days_per_4years(int yi) noexcept { return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96); } CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept { @@ -133,17 +132,22 @@ CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh, } } if (d > 365) { + int yi = year_index(ey, m); // Index into Gregorian 400 year cycle. for (;;) { - int n = days_per_century(ey, m); + int n = days_per_century(yi); if (d <= n) break; d -= n; ey += 100; + yi += 100; + if (yi >= 400) yi -= 400; } for (;;) { - int n = days_per_4years(ey, m); + int n = days_per_4years(yi); if (d <= n) break; d -= n; ey += 4; + yi += 4; + if (yi >= 400) yi -= 400; } for (;;) { int n = days_per_year(ey, m); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h index 5562a37bc8..6e382dc6c9 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -41,20 +42,9 @@ using sys_seconds = seconds; // Deprecated. Use cctz::seconds instead. namespace detail { template -inline std::pair, D> split_seconds( - const time_point& tp) { - auto sec = std::chrono::time_point_cast(tp); - auto sub = tp - sec; - if (sub.count() < 0) { - sec -= seconds(1); - sub += seconds(1); - } - return {sec, std::chrono::duration_cast(sub)}; -} -inline std::pair, seconds> split_seconds( - const time_point& tp) { - return {tp, seconds::zero()}; -} +std::pair, D> split_seconds(const time_point& tp); +std::pair, seconds> split_seconds( + const time_point& tp); } // namespace detail // cctz::time_zone is an opaque, small, value-type class representing a @@ -279,6 +269,20 @@ std::string format(const std::string&, const time_point&, const femtoseconds&, const time_zone&); bool parse(const std::string&, const std::string&, const time_zone&, time_point*, femtoseconds*, std::string* err = nullptr); +template +bool join_seconds( + const time_point& sec, const femtoseconds& fs, + time_point>>* tpp); +template +bool join_seconds( + const time_point& sec, const femtoseconds& fs, + time_point>>* tpp); +template +bool join_seconds( + const time_point& sec, const femtoseconds& fs, + time_point>>* tpp); +bool join_seconds(const time_point& sec, const femtoseconds&, + time_point* tpp); } // namespace detail // Formats the given time_point in the given cctz::time_zone according to @@ -369,15 +373,84 @@ inline bool parse(const std::string& fmt, const std::string& input, const time_zone& tz, time_point* tpp) { time_point sec; detail::femtoseconds fs; - const bool b = detail::parse(fmt, input, tz, &sec, &fs); - if (b) { - // TODO: Return false if unrepresentable as a time_point. - *tpp = std::chrono::time_point_cast(sec); - *tpp += std::chrono::duration_cast(fs); - } - return b; + return detail::parse(fmt, input, tz, &sec, &fs) && + detail::join_seconds(sec, fs, tpp); } +namespace detail { + +// Split a time_point into a time_point and a D subseconds. +// Undefined behavior if time_point is not of sufficient range. +// Note that this means it is UB to call cctz::time_zone::lookup(tp) or +// cctz::format(fmt, tp, tz) with a time_point that is outside the range +// of a 64-bit std::time_t. +template +std::pair, D> split_seconds(const time_point& tp) { + auto sec = std::chrono::time_point_cast(tp); + auto sub = tp - sec; + if (sub.count() < 0) { + sec -= seconds(1); + sub += seconds(1); + } + return {sec, std::chrono::duration_cast(sub)}; +} + +inline std::pair, seconds> split_seconds( + const time_point& tp) { + return {tp, seconds::zero()}; +} + +// Join a time_point and femto subseconds into a time_point. +// Floors to the resolution of time_point. Returns false if time_point +// is not of sufficient range. +template +bool join_seconds( + const time_point& sec, const femtoseconds& fs, + time_point>>* tpp) { + using D = std::chrono::duration>; + // TODO(#199): Return false if result unrepresentable as a time_point. + *tpp = std::chrono::time_point_cast(sec); + *tpp += std::chrono::duration_cast(fs); + return true; +} + +template +bool join_seconds( + const time_point& sec, const femtoseconds&, + time_point>>* tpp) { + using D = std::chrono::duration>; + auto count = sec.time_since_epoch().count(); + if (count >= 0 || count % Num == 0) { + count /= Num; + } else { + count /= Num; + count -= 1; + } + if (count > (std::numeric_limits::max)()) return false; + if (count < (std::numeric_limits::min)()) return false; + *tpp = time_point() + D{static_cast(count)}; + return true; +} + +template +bool join_seconds( + const time_point& sec, const femtoseconds&, + time_point>>* tpp) { + using D = std::chrono::duration>; + auto count = sec.time_since_epoch().count(); + if (count > (std::numeric_limits::max)()) return false; + if (count < (std::numeric_limits::min)()) return false; + *tpp = time_point() + D{static_cast(count)}; + return true; +} + +inline bool join_seconds(const time_point& sec, const femtoseconds&, + time_point* tpp) { + *tpp = sec; + return true; +} + +} // namespace detail } // namespace cctz } // namespace time_internal ABSL_NAMESPACE_END diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc index 4e39188ff3..c64f3801db 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc @@ -554,6 +554,7 @@ const char* const kTimeZoneNames[] = {"Africa/Abidjan", "Europe/Kaliningrad", "Europe/Kiev", "Europe/Kirov", + "Europe/Kyiv", "Europe/Lisbon", "Europe/Ljubljana", "Europe/London", @@ -593,6 +594,7 @@ const char* const kTimeZoneNames[] = {"Africa/Abidjan", "Europe/Zagreb", "Europe/Zaporozhye", "Europe/Zurich", + "Factory", "GB", "GB-Eire", "GMT", @@ -648,6 +650,7 @@ const char* const kTimeZoneNames[] = {"Africa/Abidjan", "Pacific/Guam", "Pacific/Honolulu", "Pacific/Johnston", + "Pacific/Kanton", "Pacific/Kiritimati", "Pacific/Kosrae", "Pacific/Kwajalein", diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc index d8cb047425..2e5f532911 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc @@ -19,7 +19,7 @@ #endif #if defined(HAS_STRPTIME) && HAS_STRPTIME -#if !defined(_XOPEN_SOURCE) +#if !defined(_XOPEN_SOURCE) && !defined(__OpenBSD__) #define _XOPEN_SOURCE // Definedness suffices for strptime. #endif #endif diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc index 294f2e2284..f1f79a20fc 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc @@ -13,15 +13,20 @@ // limitations under the License. #include +#include #include #include #include +#include "absl/base/config.h" +#include "absl/time/internal/cctz/include/cctz/time_zone.h" +#if defined(__linux__) +#include +#endif + #include "gmock/gmock.h" #include "gtest/gtest.h" -#include "absl/base/config.h" #include "absl/time/internal/cctz/include/cctz/civil_time.h" -#include "absl/time/internal/cctz/include/cctz/time_zone.h" namespace chrono = std::chrono; @@ -182,8 +187,10 @@ TEST(Format, PosixConversions) { TestFormatSpecifier(tp, tz, "%F", "1970-01-01"); TestFormatSpecifier(tp, tz, "%g", "70"); TestFormatSpecifier(tp, tz, "%G", "1970"); +#if defined(__GLIBC__) TestFormatSpecifier(tp, tz, "%k", " 0"); TestFormatSpecifier(tp, tz, "%l", "12"); +#endif TestFormatSpecifier(tp, tz, "%n", "\n"); TestFormatSpecifier(tp, tz, "%R", "00:00"); TestFormatSpecifier(tp, tz, "%t", "\t"); @@ -215,7 +222,9 @@ TEST(Format, LocaleSpecific) { #if defined(__linux__) // SU/C99/TZ extensions TestFormatSpecifier(tp, tz, "%h", "Jan"); // Same as %b +#if defined(__GLIBC__) TestFormatSpecifier(tp, tz, "%P", "am"); +#endif TestFormatSpecifier(tp, tz, "%r", "12:00:00 AM"); // Modified conversion specifiers %E_ @@ -1044,9 +1053,11 @@ TEST(Parse, LocaleSpecific) { EXPECT_TRUE(parse("%h", "Feb", tz, &tp)); EXPECT_EQ(2, convert(tp, tz).month()); // Equivalent to %b +#if defined(__GLIBC__) tp = reset; EXPECT_TRUE(parse("%l %p", "5 PM", tz, &tp)); EXPECT_EQ(17, convert(tp, tz).hour()); +#endif tp = reset; EXPECT_TRUE(parse("%r", "03:44:55 PM", tz, &tp)); @@ -1054,6 +1065,7 @@ TEST(Parse, LocaleSpecific) { EXPECT_EQ(44, convert(tp, tz).minute()); EXPECT_EQ(55, convert(tp, tz).second()); +#if defined(__GLIBC__) tp = reset; EXPECT_TRUE(parse("%Ec", "Tue Nov 19 05:06:07 2013", tz, &tp)); EXPECT_EQ(convert(civil_second(2013, 11, 19, 5, 6, 7), tz), tp); @@ -1125,6 +1137,7 @@ TEST(Parse, LocaleSpecific) { EXPECT_TRUE(parse("%Oy", "04", tz, &tp)); EXPECT_EQ(2004, convert(tp, tz).year()); #endif +#endif } TEST(Parse, ExtendedSeconds) { @@ -1504,7 +1517,7 @@ TEST(Parse, MaxRange) { parse(RFC3339_sec, "292277026596-12-04T14:30:07-01:00", utc, &tp)); EXPECT_EQ(tp, time_point::max()); EXPECT_FALSE( - parse(RFC3339_sec, "292277026596-12-04T15:30:07-01:00", utc, &tp)); + parse(RFC3339_sec, "292277026596-12-04T14:30:08-01:00", utc, &tp)); // tests the lower limit using +00:00 offset EXPECT_TRUE( @@ -1525,10 +1538,82 @@ TEST(Parse, MaxRange) { parse(RFC3339_sec, "9223372036854775807-12-31T23:59:59-00:01", utc, &tp)); EXPECT_FALSE(parse(RFC3339_sec, "-9223372036854775808-01-01T00:00:00+00:01", utc, &tp)); +} - // TODO: Add tests that parsing times with fractional seconds overflow - // appropriately. This can't be done until cctz::parse() properly detects - // overflow when combining the chrono seconds and femto. +TEST(Parse, TimePointOverflow) { + const time_zone utc = utc_time_zone(); + + using D = chrono::duration; + time_point tp; + + EXPECT_TRUE( + parse(RFC3339_full, "2262-04-11T23:47:16.8547758079+00:00", utc, &tp)); + EXPECT_EQ(tp, time_point::max()); + EXPECT_EQ("2262-04-11T23:47:16.854775807+00:00", + format(RFC3339_full, tp, utc)); +#if 0 + // TODO(#199): Will fail until cctz::parse() properly detects overflow. + EXPECT_FALSE( + parse(RFC3339_full, "2262-04-11T23:47:16.8547758080+00:00", utc, &tp)); + EXPECT_TRUE( + parse(RFC3339_full, "1677-09-21T00:12:43.1452241920+00:00", utc, &tp)); + EXPECT_EQ(tp, time_point::min()); + EXPECT_EQ("1677-09-21T00:12:43.145224192+00:00", + format(RFC3339_full, tp, utc)); + EXPECT_FALSE( + parse(RFC3339_full, "1677-09-21T00:12:43.1452241919+00:00", utc, &tp)); +#endif + + using DS = chrono::duration; + time_point stp; + + EXPECT_TRUE(parse(RFC3339_full, "1970-01-01T00:02:07.9+00:00", utc, &stp)); + EXPECT_EQ(stp, time_point::max()); + EXPECT_EQ("1970-01-01T00:02:07+00:00", format(RFC3339_full, stp, utc)); + EXPECT_FALSE(parse(RFC3339_full, "1970-01-01T00:02:08+00:00", utc, &stp)); + + EXPECT_TRUE(parse(RFC3339_full, "1969-12-31T23:57:52+00:00", utc, &stp)); + EXPECT_EQ(stp, time_point::min()); + EXPECT_EQ("1969-12-31T23:57:52+00:00", format(RFC3339_full, stp, utc)); + EXPECT_FALSE(parse(RFC3339_full, "1969-12-31T23:57:51.9+00:00", utc, &stp)); + + using DM = chrono::duration; + time_point mtp; + + EXPECT_TRUE(parse(RFC3339_full, "1970-01-01T02:07:59+00:00", utc, &mtp)); + EXPECT_EQ(mtp, time_point::max()); + EXPECT_EQ("1970-01-01T02:07:00+00:00", format(RFC3339_full, mtp, utc)); + EXPECT_FALSE(parse(RFC3339_full, "1970-01-01T02:08:00+00:00", utc, &mtp)); + + EXPECT_TRUE(parse(RFC3339_full, "1969-12-31T21:52:00+00:00", utc, &mtp)); + EXPECT_EQ(mtp, time_point::min()); + EXPECT_EQ("1969-12-31T21:52:00+00:00", format(RFC3339_full, mtp, utc)); + EXPECT_FALSE(parse(RFC3339_full, "1969-12-31T21:51:59+00:00", utc, &mtp)); +} + +TEST(Parse, TimePointOverflowFloor) { + const time_zone utc = utc_time_zone(); + + using D = chrono::duration; + time_point tp; + + EXPECT_TRUE( + parse(RFC3339_full, "294247-01-10T04:00:54.7758079+00:00", utc, &tp)); + EXPECT_EQ(tp, time_point::max()); + EXPECT_EQ("294247-01-10T04:00:54.775807+00:00", + format(RFC3339_full, tp, utc)); +#if 0 + // TODO(#199): Will fail until cctz::parse() properly detects overflow. + EXPECT_FALSE( + parse(RFC3339_full, "294247-01-10T04:00:54.7758080+00:00", utc, &tp)); + EXPECT_TRUE( + parse(RFC3339_full, "-290308-12-21T19:59:05.2241920+00:00", utc, &tp)); + EXPECT_EQ(tp, time_point::min()); + EXPECT_EQ("-290308-12-21T19:59:05.224192+00:00", + format(RFC3339_full, tp, utc)); + EXPECT_FALSE( + parse(RFC3339_full, "-290308-12-21T19:59:05.2241919+00:00", utc, &tp)); +#endif } // diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h index 32c0891c1e..7d3e42d3cd 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h @@ -56,7 +56,8 @@ class TimeZoneIf { // Convert between time_point and a count of seconds since the // Unix epoch. We assume that the std::chrono::system_clock and the -// Unix clock are second aligned, but not that they share an epoch. +// Unix clock are second aligned, and that the results are representable. +// (That is, that they share an epoch, which is required since C++20.) inline std::int_fast64_t ToUnixSeconds(const time_point& tp) { return (tp - std::chrono::time_point_cast( std::chrono::system_clock::from_time_t(0))) diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc index f2777d91ef..787426f755 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -133,6 +134,21 @@ std::int_fast64_t Decode64(const char* cp) { return static_cast(v - s64maxU - 1) - s64max - 1; } +// Does the rule for future transitions call for year-round daylight time? +// See tz/zic.c:stringzone() for the details on how such rules are encoded. +bool AllYearDST(const PosixTimeZone& posix) { + if (posix.dst_start.date.fmt != PosixTransition::N) return false; + if (posix.dst_start.date.n.day != 0) return false; + if (posix.dst_start.time.offset != 0) return false; + + if (posix.dst_end.date.fmt != PosixTransition::J) return false; + if (posix.dst_end.date.j.day != kDaysPerYear[0]) return false; + const auto offset = posix.std_offset - posix.dst_offset; + if (posix.dst_end.time.offset + offset != kSecsPerDay) return false; + + return true; +} + // Generate a year-relative offset for a PosixTransition. std::int_fast64_t TransOffset(bool leap_year, int jan1_weekday, const PosixTransition& pt) { @@ -350,6 +366,12 @@ bool TimeZoneInfo::ExtendTransitions() { if (!GetTransitionType(posix.dst_offset, true, posix.dst_abbr, &dst_ti)) return false; + if (AllYearDST(posix)) { // dst only + // The future specification should match the last transition, and + // that means that handling the future will fall out naturally. + return EquivTransitions(transitions_.back().type_index, dst_ti); + } + // Extend the transitions for an additional 400 years using the // future specification. Years beyond those can be handled by // mapping back to a cycle-equivalent year within that range. @@ -480,9 +502,9 @@ bool TimeZoneInfo::Load(ZoneInfoSource* zip) { // encoded zoneinfo. The ttisstd/ttisgmt indicators only apply when // interpreting a POSIX spec that does not include start/end rules, and // that isn't the case here (see "zic -p"). - bp += (8 + 4) * hdr.leapcnt; // leap-time + TAI-UTC - bp += 1 * hdr.ttisstdcnt; // UTC/local indicators - bp += 1 * hdr.ttisutcnt; // standard/wall indicators + bp += (time_len + 4) * hdr.leapcnt; // leap-time + TAI-UTC + bp += 1 * hdr.ttisstdcnt; // UTC/local indicators + bp += 1 * hdr.ttisutcnt; // standard/wall indicators assert(bp == tbuf.data() + tbuf.size()); future_spec_.clear(); @@ -511,8 +533,8 @@ bool TimeZoneInfo::Load(ZoneInfoSource* zip) { // Trim redundant transitions. zic may have added these to work around // differences between the glibc and reference implementations (see - // zic.c:dontmerge) and the Qt library (see zic.c:WORK_AROUND_QTBUG_53071). - // For us, they just get in the way when we do future_spec_ extension. + // zic.c:dontmerge) or to avoid bugs in old readers. For us, they just + // get in the way when we do future_spec_ extension. while (hdr.timecnt > 1) { if (!EquivTransitions(transitions_[hdr.timecnt - 1].type_index, transitions_[hdr.timecnt - 2].type_index)) { @@ -649,7 +671,7 @@ std::unique_ptr FileZoneInfoSource::Open( // Open the zoneinfo file. auto fp = FOpen(path.c_str(), "rb"); - if (fp.get() == nullptr) return nullptr; + if (fp == nullptr) return nullptr; return std::unique_ptr(new FileZoneInfoSource(std::move(fp))); } @@ -674,7 +696,7 @@ std::unique_ptr AndroidZoneInfoSource::Open( for (const char* tzdata : {"/data/misc/zoneinfo/current/tzdata", "/system/usr/share/zoneinfo/tzdata"}) { auto fp = FOpen(tzdata, "rb"); - if (fp.get() == nullptr) continue; + if (fp == nullptr) continue; char hbuf[24]; // covers header.zonetab_offset too if (fread(hbuf, 1, sizeof(hbuf), fp.get()) != sizeof(hbuf)) continue; @@ -708,6 +730,69 @@ std::unique_ptr AndroidZoneInfoSource::Open( return nullptr; } +// A zoneinfo source for use inside Fuchsia components. This attempts to +// read zoneinfo files from one of several known paths in a component's +// incoming namespace. [Config data][1] is preferred, but package-specific +// resources are also supported. +// +// Fuchsia's implementation supports `FileZoneInfoSource::Version()`. +// +// [1]: +// https://fuchsia.dev/fuchsia-src/development/components/data#using_config_data_in_your_component +class FuchsiaZoneInfoSource : public FileZoneInfoSource { + public: + static std::unique_ptr Open(const std::string& name); + std::string Version() const override { return version_; } + + private: + explicit FuchsiaZoneInfoSource(FilePtr fp, std::string version) + : FileZoneInfoSource(std::move(fp)), version_(std::move(version)) {} + std::string version_; +}; + +std::unique_ptr FuchsiaZoneInfoSource::Open( + const std::string& name) { + // Use of the "file:" prefix is intended for testing purposes only. + const std::size_t pos = (name.compare(0, 5, "file:") == 0) ? 5 : 0; + + // Prefixes where a Fuchsia component might find zoneinfo files, + // in descending order of preference. + const auto kTzdataPrefixes = { + "/config/data/tzdata/", + "/pkg/data/tzdata/", + "/data/tzdata/", + }; + const auto kEmptyPrefix = {""}; + const bool name_absolute = (pos != name.size() && name[pos] == '/'); + const auto prefixes = name_absolute ? kEmptyPrefix : kTzdataPrefixes; + + // Fuchsia builds place zoneinfo files at "". + for (const std::string prefix : prefixes) { + std::string path = prefix; + if (!prefix.empty()) path += "zoneinfo/tzif2/"; // format + path.append(name, pos, std::string::npos); + + auto fp = FOpen(path.c_str(), "rb"); + if (fp == nullptr) continue; + + std::string version; + if (!prefix.empty()) { + // Fuchsia builds place the version in "revision.txt". + std::ifstream version_stream(prefix + "revision.txt"); + if (version_stream.is_open()) { + // revision.txt should contain no newlines, but to be + // defensive we read just the first line. + std::getline(version_stream, version); + } + } + + return std::unique_ptr( + new FuchsiaZoneInfoSource(std::move(fp), std::move(version))); + } + + return nullptr; +} + } // namespace bool TimeZoneInfo::Load(const std::string& name) { @@ -725,6 +810,7 @@ bool TimeZoneInfo::Load(const std::string& name) { name, [](const std::string& n) -> std::unique_ptr { if (auto z = FileZoneInfoSource::Open(n)) return z; if (auto z = AndroidZoneInfoSource::Open(n)) return z; + if (auto z = FuchsiaZoneInfoSource::Open(n)) return z; return nullptr; }); return zip != nullptr && Load(zip.get()); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc index efdea64b4e..f6983aeb95 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc @@ -28,6 +28,13 @@ #include #endif +#if defined(__Fuchsia__) +#include +#include +#include +#include +#endif + #include #include #include @@ -133,13 +140,56 @@ time_zone local_time_zone() { if (CFStringRef tz_name = CFTimeZoneGetName(tz_default)) { CFStringEncoding encoding = kCFStringEncodingUTF8; CFIndex length = CFStringGetLength(tz_name); - buffer.resize(CFStringGetMaximumSizeForEncoding(length, encoding) + 1); - if (CFStringGetCString(tz_name, &buffer[0], buffer.size(), encoding)) { + CFIndex max_size = CFStringGetMaximumSizeForEncoding(length, encoding) + 1; + buffer.resize(static_cast(max_size)); + if (CFStringGetCString(tz_name, &buffer[0], max_size, encoding)) { zone = &buffer[0]; } } CFRelease(tz_default); #endif +#if defined(__Fuchsia__) + std::string primary_tz; + [&]() { + // Note: We can't use the synchronous FIDL API here because it doesn't + // allow timeouts; if the FIDL call failed, local_time_zone() would never + // return. + + const zx::duration kTimeout = zx::msec(500); + + // Don't attach to the thread because otherwise the thread's dispatcher + // would be set to null when the loop is destroyed, causing any other FIDL + // code running on the same thread to crash. + async::Loop loop(&kAsyncLoopConfigNeverAttachToThread); + + fuchsia::intl::PropertyProviderHandle handle; + zx_status_t status = fdio_service_connect_by_name( + fuchsia::intl::PropertyProvider::Name_, + handle.NewRequest().TakeChannel().release()); + if (status != ZX_OK) { + return; + } + + fuchsia::intl::PropertyProviderPtr intl_provider; + status = intl_provider.Bind(std::move(handle), loop.dispatcher()); + if (status != ZX_OK) { + return; + } + + intl_provider->GetProfile( + [&loop, &primary_tz](fuchsia::intl::Profile profile) { + if (!profile.time_zones().empty()) { + primary_tz = profile.time_zones()[0].id; + } + loop.Quit(); + }); + loop.Run(zx::deadline_after(kTimeout)); + }(); + + if (!primary_tz.empty()) { + zone = primary_tz.c_str(); + } +#endif // Allow ${TZ} to override to default zone. char* tz_env = nullptr; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc index 6948c3ea2c..ab461f0451 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc @@ -21,10 +21,14 @@ #include #include -#include "gtest/gtest.h" #include "absl/base/config.h" -#include "absl/time/internal/cctz/include/cctz/civil_time.h" #include "absl/time/internal/cctz/include/cctz/time_zone.h" +#if defined(__linux__) +#include +#endif + +#include "gtest/gtest.h" +#include "absl/time/internal/cctz/include/cctz/civil_time.h" namespace chrono = std::chrono; @@ -485,6 +489,7 @@ const char* const kTimeZoneNames[] = {"Africa/Abidjan", "Europe/Kaliningrad", "Europe/Kiev", "Europe/Kirov", + "Europe/Kyiv", "Europe/Lisbon", "Europe/Ljubljana", "Europe/London", @@ -524,6 +529,7 @@ const char* const kTimeZoneNames[] = {"Africa/Abidjan", "Europe/Zagreb", "Europe/Zaporozhye", "Europe/Zurich", + "Factory", "GB", "GB-Eire", "GMT", @@ -579,6 +585,7 @@ const char* const kTimeZoneNames[] = {"Africa/Abidjan", "Pacific/Guam", "Pacific/Honolulu", "Pacific/Johnston", + "Pacific/Kanton", "Pacific/Kiritimati", "Pacific/Kosrae", "Pacific/Kwajalein", @@ -1026,7 +1033,11 @@ TEST(MakeTime, SysSecondsLimits) { #endif const year_t min_tm_year = year_t{std::numeric_limits::min()} + 1900; tp = convert(civil_second(min_tm_year, 1, 1, 0, 0, 0), cut); +#if defined(__Fuchsia__) + // Fuchsia's gmtime_r() fails on extreme negative values (fxbug.dev/78527). +#else EXPECT_EQ("-2147481748-01-01T00:00:00+00:00", format(RFC3339, tp, cut)); +#endif #endif } } @@ -1038,7 +1049,7 @@ TEST(MakeTime, LocalTimeLibC) { // 1) we know how to change the time zone used by localtime()/mktime(), // 2) cctz and localtime()/mktime() will use similar-enough tzdata, and // 3) we have some idea about how mktime() behaves during transitions. -#if defined(__linux__) && !defined(__ANDROID__) +#if defined(__linux__) && defined(__GLIBC__) && !defined(__ANDROID__) const char* const ep = getenv("TZ"); std::string tz_name = (ep != nullptr) ? ep : ""; for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) { @@ -1177,6 +1188,45 @@ TEST(PrevTransition, AmericaNewYork) { // We have a transition but we don't know which one. } +TEST(NextTransition, Scan) { + for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) { + SCOPED_TRACE(testing::Message() << "In " << *np); + time_zone tz; + // EXPECT_TRUE(load_time_zone(*np, &tz)); + if (!load_time_zone(*np, &tz)) { + continue; // tolerate kTimeZoneNames/zoneinfo skew + } + + auto tp = time_point::min(); + time_zone::civil_transition trans; + while (tz.next_transition(tp, &trans)) { + time_zone::civil_lookup from_cl = tz.lookup(trans.from); + EXPECT_NE(from_cl.kind, time_zone::civil_lookup::REPEATED); + time_zone::civil_lookup to_cl = tz.lookup(trans.to); + EXPECT_NE(to_cl.kind, time_zone::civil_lookup::SKIPPED); + + auto trans_tp = to_cl.trans; + time_zone::absolute_lookup trans_al = tz.lookup(trans_tp); + EXPECT_EQ(trans_al.cs, trans.to); + auto pre_trans_tp = trans_tp - absl::time_internal::cctz::seconds(1); + time_zone::absolute_lookup pre_trans_al = tz.lookup(pre_trans_tp); + EXPECT_EQ(pre_trans_al.cs + 1, trans.from); + + auto offset_delta = trans_al.offset - pre_trans_al.offset; + EXPECT_EQ(offset_delta, trans.to - trans.from); + if (offset_delta == 0) { + // This "transition" is only an is_dst or abbr change. + EXPECT_EQ(to_cl.kind, time_zone::civil_lookup::UNIQUE); + if (trans_al.is_dst == pre_trans_al.is_dst) { + EXPECT_STRNE(trans_al.abbr, pre_trans_al.abbr); + } + } + + tp = trans_tp; // continue scan from transition + } + } +} + TEST(TimeZoneEdgeCase, AmericaNewYork) { const time_zone tz = LoadZone("America/New_York"); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/tzfile.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/tzfile.h index 269fa36c53..31e8598257 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/tzfile.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/tzfile.h @@ -43,7 +43,7 @@ struct tzhead { char tzh_magic[4]; /* TZ_MAGIC */ - char tzh_version[1]; /* '\0' or '2' or '3' as of 2013 */ + char tzh_version[1]; /* '\0' or '2'-'4' as of 2021 */ char tzh_reserved[15]; /* reserved; must be zero */ char tzh_ttisutcnt[4]; /* coded number of trans. time flags */ char tzh_ttisstdcnt[4]; /* coded number of trans. time flags */ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc index 72095339c3..b818c21381 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc @@ -65,43 +65,42 @@ ZoneInfoSourceFactory zone_info_source_factory __attribute__((weak)) = extern ZoneInfoSourceFactory zone_info_source_factory; extern ZoneInfoSourceFactory default_factory; ZoneInfoSourceFactory default_factory = DefaultFactory; -#if defined(_M_IX86) -#pragma comment( \ - linker, \ - "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ - "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ - "@@ZA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ - "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ - "@@ZA") -#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM) || \ - defined(_M_ARM64) -#pragma comment( \ - linker, \ - "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ - "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ - "@@ZEA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ - "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ - "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ - "@@ZEA") +#if defined(_M_IX86) || defined(_M_ARM) +#pragma comment( \ + linker, \ + "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ + "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ + "@@ZA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ + "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ + "@@ZA") +#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM64) +#pragma comment( \ + linker, \ + "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ + "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ + "@@ZEA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ + "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \ + "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \ + "@@ZEA") #else #error Unsupported MSVC platform #endif // _M_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo index 95fb4a91d1..67e9c404ac 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo @@ -21,6 +21,7 @@ New versions can be generated using the following shell script. REDO=posix_only \ LOCALTIME=Factory \ TZDATA_TEXT= \ + PACKRATDATA=backzone PACKRATLIST=zone.tab \ ZONETABLES=zone1970.tab tar --create --dereference --hard-dereference --file tzfile.tar \ --directory=tz tzfile.h diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/version b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/version index 1d590958af..5c8fbb478a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/version +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/version @@ -1 +1 @@ -2021a +2022f diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa index 5f4ebcb7f9..4e8951f5bb 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara index 5f4ebcb7f9..194e98690f 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera index 5f4ebcb7f9..194e98690f 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako index 8906e88c81..3cb875fade 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui index 3d7a71ba0e..0021d2d7fd 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul index 8906e88c81..b235744351 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre index 651e5cf67a..d7bca1e603 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville index 3d7a71ba0e..57a723b2ed 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura index 651e5cf67a..90b8679b36 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry index 8906e88c81..c22c328b29 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar index 8906e88c81..1f04c586ac 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam index 5f4ebcb7f9..b37c2b4464 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti index 5f4ebcb7f9..e9bbc7ad9f 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala index 3d7a71ba0e..65001f609d 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown index 8906e88c81..8431ed65b9 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone index 651e5cf67a..e44209890b 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare index 651e5cf67a..c4a502cfde 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala index 5f4ebcb7f9..3021d844d9 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali index 651e5cf67a..b2eff5701e 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa index 3d7a71ba0e..8d6f2a8c03 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville index 3d7a71ba0e..1544cf5b85 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome index 8906e88c81..8e2b700119 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda index 3d7a71ba0e..226d87f06f 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi index 651e5cf67a..14e1ee1602 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka index 651e5cf67a..18fcb1689f 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo index 3d7a71ba0e..8a3f4e9adf 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru index bada0638f8..820d852155 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane index bada0638f8..d57a53c48c 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu index 5f4ebcb7f9..25a597399b 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey index 3d7a71ba0e..bdf222af21 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott index 8906e88c81..faa6f324cb 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou index 8906e88c81..f4e55aeb8a 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo index 3d7a71ba0e..a869ec3f1f 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu index 8906e88c81..3cb875fade 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla index f4fe590342..d057735073 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua index f4fe590342..7ef2cc9972 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba index d6ddf7d8f6..6158ca50eb 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas index cbe22a7622..48faea2ece 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados index 9d3afa6a53..720c9863f2 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman index 9154643f4c..8be5515631 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua index e1780a5750..5e0a54f004 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica index f4fe590342..7c7cebfa84 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada index 19ccd3576d..e8be26b139 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada index f4fe590342..a58e63a44f 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe index f4fe590342..717473833e 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana index ebd85d0f3e..bcc66881c1 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo index 8283239eca..5c92e2967e 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros index 722751b20e..88cabcd152 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan index 4c819fab02..97d4d36c13 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida index d3b0ca12c9..e5de1131dc 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City index ffcf8bee10..80a415c70c 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey index dea9e3f586..a5822e2c62 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat index f4fe590342..41bf898bd7 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon index b9f67a9f94..fe6be8ea8c 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga index da0909cb21..560b8674f7 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas index 5c9a20b947..aa839ea7d4 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River index d6ddda4822..7e646d18e1 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel index 19ccd3576d..e8be26b139 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago index 8d6032264b..d3fc9b8343 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts index f4fe590342..6170b6c09d 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia index f4fe590342..e265baffbf 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas index f4fe590342..0e62d30bb5 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent index f4fe590342..64cbf90248 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay index fcb0328043..fe6be8ea8c 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana index 19ccd3576d..e8be26b139 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola index f4fe590342..a0a5d6021a 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin index f4fe590342..0e62d30bb5 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo index afb3929318..ea1f8f8a77 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole index afb3929318..ea1f8f8a77 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden index 01c47ccb86..ac571479d1 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman index 1bd09fef27..a3f9dff571 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain index 7409d74983..33f7a2073b 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus index 168ef9baa4..bd1624de51 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza index 58e9fdf42d..bed968e7ec 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron index aeda06b522..3ce1bac631 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh index 7ca9972502..de53596d68 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait index 01c47ccb86..5c7f106a35 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat index 58d75bc26e..cce5e19345 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh index ed687d2985..c49800e7ee 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon index 7ca9972502..de53596d68 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran index f1555f0032..824acb0426 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane index ed687d2985..659e511dff 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores index b7f75a9cf6..e6e2616e98 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira index 7c3a49c0e8..cf965c3f92 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena index 8906e88c81..6f7506807a 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental index 8d6032264b..d3fc9b8343 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland index d29bcd68b0..54dff005b8 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire index 4a45ea8f73..17d2b1582d 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin index 4a45ea8f73..17d2b1582d 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey index 323cd3818a..d40bcaa316 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man index 323cd3818a..b0a37e7e09 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey index 323cd3818a..9a10a2ec0a 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev index 8f83cefbcc..4e026859fd 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kyiv b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kyiv new file mode 100644 index 0000000000..4e026859fd Binary files /dev/null and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kyiv differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon index 64841661a0..f0c70b6906 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana index a1bf9281ed..fdb9e86d4a 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo index a1bf9281ed..53db056883 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol index 88a6f3bdb4..298b8326ca 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje index a1bf9281ed..036361cfba 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod index a5755685e3..4e026859fd 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz index 388df2969f..28465d83a9 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb index a1bf9281ed..8e13ede8d4 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye index 4ea8dae45a..4e026859fd 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo index 5f4ebcb7f9..0bf86f024a 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro index 5f4ebcb7f9..640b3e8848 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte index 5f4ebcb7f9..7a009c3122 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran index f1555f0032..824acb0426 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte index 19ccd3576d..e8be26b139 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur index 4c819fab02..97d4d36c13 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General index ffcf8bee10..80a415c70c 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia index 244af26f8a..a6b835aab4 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter index d29bcd68b0..54dff005b8 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury index b22ab147c3..2b6a06088e 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji index e3934e423c..610b850b1d 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kanton b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kanton new file mode 100644 index 0000000000..2b6a06088e Binary files /dev/null and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kanton differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway index 001289ceec..b25364c599 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue index 7b35793513..be874e2472 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga index 143a1883b0..7220bda0ad 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan index bf9a2d955f..9539353b2c 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu index 54aeb0ffa2..f28c840184 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal index 64841661a0..f0c70b6906 100644 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal and b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/localtime b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/localtime deleted file mode 100644 index afeeb88d06..0000000000 Binary files a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/localtime and /dev/null differ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab index 396e4d3810..75372e3fa4 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab @@ -34,19 +34,16 @@ #country- #codes coordinates TZ comments AD +4230+00131 Europe/Andorra -AE,OM +2518+05518 Asia/Dubai +AE,OM,RE,SC,TF +2518+05518 Asia/Dubai UAE, Oman, Réunion, Seychelles, Crozet, Scattered Is AF +3431+06912 Asia/Kabul AL +4120+01950 Europe/Tirane AM +4011+04430 Asia/Yerevan AQ -6617+11031 Antarctica/Casey Casey AQ -6835+07758 Antarctica/Davis Davis -AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville AQ -6736+06253 Antarctica/Mawson Mawson AQ -6448-06406 Antarctica/Palmer Palmer AQ -6734-06808 Antarctica/Rothera Rothera -AQ -690022+0393524 Antarctica/Syowa Syowa AQ -720041+0023206 Antarctica/Troll Troll -AQ -7824+10654 Antarctica/Vostok Vostok AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF) AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF) AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN) @@ -76,10 +73,9 @@ AU -3143+12852 Australia/Eucla Western Australia (Eucla) AZ +4023+04951 Asia/Baku BB +1306-05937 America/Barbados BD +2343+09025 Asia/Dhaka -BE +5050+00420 Europe/Brussels +BE,LU,NL +5050+00420 Europe/Brussels BG +4241+02319 Europe/Sofia BM +3217-06446 Atlantic/Bermuda -BN +0456+11455 Asia/Brunei BO -1630-06809 America/La_Paz BR -0351-03225 America/Noronha Atlantic islands BR -0127-04829 America/Belem Pará (east); Amapá @@ -97,7 +93,6 @@ BR +0249-06040 America/Boa_Vista Roraima BR -0308-06001 America/Manaus Amazonas (east) BR -0640-06952 America/Eirunepe Amazonas (west) BR -0958-06748 America/Rio_Branco Acre -BS +2505-07721 America/Nassau BT +2728+08939 Asia/Thimphu BY +5354+02734 Europe/Minsk BZ +1730-08812 America/Belize @@ -106,15 +101,10 @@ CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton) CA +4606-06447 America/Moncton Atlantic - New Brunswick CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas) -CA +5125-05707 America/Blanc-Sablon AST - QC (Lower North Shore) -CA +4339-07923 America/Toronto Eastern - ON, QC (most areas) -CA +4901-08816 America/Nipigon Eastern - ON, QC (no DST 1967-73) -CA +4823-08915 America/Thunder_Bay Eastern - ON (Thunder Bay) +CA,BS +4339-07923 America/Toronto Eastern - ON, QC (most areas), Bahamas CA +6344-06828 America/Iqaluit Eastern - NU (most east areas) CA +6608-06544 America/Pangnirtung Eastern - NU (Pangnirtung) -CA +484531-0913718 America/Atikokan EST - ON (Atikokan); NU (Coral H) CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba -CA +4843-09434 America/Rainy_River Central - ON (Rainy R, Ft Frances) CA +744144-0944945 America/Resolute Central - NU (Resolute) CA +624900-0920459 America/Rankin_Inlet Central - NU (central) CA +5024-10439 America/Regina CST - SK (most areas) @@ -123,32 +113,27 @@ CA +5333-11328 America/Edmonton Mountain - AB; BC (E); SK (W) CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west) CA +6227-11421 America/Yellowknife Mountain - NT (central) CA +682059-1334300 America/Inuvik Mountain - NT (west) -CA +4906-11631 America/Creston MST - BC (Creston) -CA +5946-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John) +CA +5546-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John) CA +5848-12242 America/Fort_Nelson MST - BC (Ft Nelson) CA +6043-13503 America/Whitehorse MST - Yukon (east) CA +6404-13925 America/Dawson MST - Yukon (west) CA +4916-12307 America/Vancouver Pacific - BC (most areas) -CC -1210+09655 Indian/Cocos CH,DE,LI +4723+00832 Europe/Zurich Swiss time -CI,BF,GM,GN,ML,MR,SH,SL,SN,TG +0519-00402 Africa/Abidjan +CI,BF,GH,GM,GN,IS,ML,MR,SH,SL,SN,TG +0519-00402 Africa/Abidjan CK -2114-15946 Pacific/Rarotonga CL -3327-07040 America/Santiago Chile (most areas) CL -5309-07055 America/Punta_Arenas Region of Magallanes CL -2709-10926 Pacific/Easter Easter Island CN +3114+12128 Asia/Shanghai Beijing Time -CN +4348+08735 Asia/Urumqi Xinjiang Time +CN,AQ +4348+08735 Asia/Urumqi Xinjiang Time, Vostok CO +0436-07405 America/Bogota CR +0956-08405 America/Costa_Rica CU +2308-08222 America/Havana CV +1455-02331 Atlantic/Cape_Verde -CW,AW,BQ,SX +1211-06900 America/Curacao -CX -1025+10543 Indian/Christmas CY +3510+03322 Asia/Nicosia Cyprus (most areas) CY +3507+03357 Asia/Famagusta Northern Cyprus CZ,SK +5005+01426 Europe/Prague -DE +5230+01322 Europe/Berlin Germany (most areas) -DK +5540+01235 Europe/Copenhagen +DE,DK,NO,SE,SJ +5230+01322 Europe/Berlin Germany (most areas), Scandinavia DO +1828-06954 America/Santo_Domingo DZ +3647+00303 Africa/Algiers EC -0210-07950 America/Guayaquil Ecuador (mainland) @@ -162,15 +147,12 @@ ES +2806-01524 Atlantic/Canary Canary Islands FI,AX +6010+02458 Europe/Helsinki FJ -1808+17825 Pacific/Fiji FK -5142-05751 Atlantic/Stanley -FM +0725+15147 Pacific/Chuuk Chuuk/Truk, Yap -FM +0658+15813 Pacific/Pohnpei Pohnpei/Ponape FM +0519+16259 Pacific/Kosrae Kosrae FO +6201-00646 Atlantic/Faroe -FR +4852+00220 Europe/Paris +FR,MC +4852+00220 Europe/Paris GB,GG,IM,JE +513030-0000731 Europe/London GE +4143+04449 Asia/Tbilisi GF +0456-05220 America/Cayenne -GH +0533-00013 Africa/Accra GI +3608-00521 Europe/Gibraltar GL +6411-05144 America/Nuuk Greenland (most areas) GL +7646-01840 America/Danmarkshavn National Park (east coast) @@ -196,15 +178,14 @@ IN +2232+08822 Asia/Kolkata IO -0720+07225 Indian/Chagos IQ +3321+04425 Asia/Baghdad IR +3540+05126 Asia/Tehran -IS +6409-02151 Atlantic/Reykjavik IT,SM,VA +4154+01229 Europe/Rome JM +175805-0764736 America/Jamaica JO +3157+03556 Asia/Amman JP +353916+1394441 Asia/Tokyo KE,DJ,ER,ET,KM,MG,SO,TZ,UG,YT -0117+03649 Africa/Nairobi KG +4254+07436 Asia/Bishkek -KI +0125+17300 Pacific/Tarawa Gilbert Islands -KI -0308-17105 Pacific/Enderbury Phoenix Islands +KI,MH,TV,UM,WF +0125+17300 Pacific/Tarawa Gilberts, Marshalls, Tuvalu, Wallis & Futuna, Wake +KI -0247-17143 Pacific/Kanton Phoenix Islands KI +0152-15720 Pacific/Kiritimati Line Islands KP +3901+12545 Asia/Pyongyang KR +3733+12658 Asia/Seoul @@ -219,15 +200,12 @@ LB +3353+03530 Asia/Beirut LK +0656+07951 Asia/Colombo LR +0618-01047 Africa/Monrovia LT +5441+02519 Europe/Vilnius -LU +4936+00609 Europe/Luxembourg LV +5657+02406 Europe/Riga LY +3254+01311 Africa/Tripoli MA +3339-00735 Africa/Casablanca -MC +4342+00723 Europe/Monaco MD +4700+02850 Europe/Chisinau -MH +0709+17112 Pacific/Majuro Marshall Islands (most areas) MH +0905+16720 Pacific/Kwajalein Kwajalein -MM +1647+09610 Asia/Yangon +MM,CC +1647+09610 Asia/Yangon MN +4755+10653 Asia/Ulaanbaatar Mongolia (most areas) MN +4801+09139 Asia/Hovd Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan MN +4804+11430 Asia/Choibalsan Dornod, Sükhbaatar @@ -235,7 +213,7 @@ MO +221150+1133230 Asia/Macau MQ +1436-06105 America/Martinique MT +3554+01431 Europe/Malta MU -2010+05730 Indian/Mauritius -MV +0410+07330 Indian/Maldives +MV,TF +0410+07330 Indian/Maldives Maldives, Kerguelen, St Paul I, Amsterdam I MX +1924-09909 America/Mexico_City Central Time MX +2105-08646 America/Cancun Eastern Standard Time - Quintana Roo MX +2058-08937 America/Merida Central Time - Campeche, Yucatán @@ -247,34 +225,31 @@ MX +2934-10425 America/Ojinaga Mountain Time US - Chihuahua (US border) MX +2904-11058 America/Hermosillo Mountain Standard Time - Sonora MX +3232-11701 America/Tijuana Pacific Time US - Baja California MX +2048-10515 America/Bahia_Banderas Central Time - Bahía de Banderas -MY +0310+10142 Asia/Kuala_Lumpur Malaysia (peninsula) -MY +0133+11020 Asia/Kuching Sabah, Sarawak +MY,BN +0133+11020 Asia/Kuching Sabah, Sarawak, Brunei MZ,BI,BW,CD,MW,RW,ZM,ZW -2558+03235 Africa/Maputo Central Africa Time NA -2234+01706 Africa/Windhoek NC -2216+16627 Pacific/Noumea NF -2903+16758 Pacific/Norfolk NG,AO,BJ,CD,CF,CG,CM,GA,GQ,NE +0627+00324 Africa/Lagos West Africa Time NI +1209-08617 America/Managua -NL +5222+00454 Europe/Amsterdam -NO,SJ +5955+01045 Europe/Oslo NP +2743+08519 Asia/Kathmandu NR -0031+16655 Pacific/Nauru NU -1901-16955 Pacific/Niue NZ,AQ -3652+17446 Pacific/Auckland New Zealand time NZ -4357-17633 Pacific/Chatham Chatham Islands -PA,KY +0858-07932 America/Panama +PA,CA,KY +0858-07932 America/Panama EST - Panama, Cayman, ON (Atikokan), NU (Coral H) PE -1203-07703 America/Lima PF -1732-14934 Pacific/Tahiti Society Islands PF -0900-13930 Pacific/Marquesas Marquesas Islands PF -2308-13457 Pacific/Gambier Gambier Islands -PG -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas) +PG,AQ,FM -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas), Chuuk, Yap, Dumont d'Urville PG -0613+15534 Pacific/Bougainville Bougainville PH +1435+12100 Asia/Manila PK +2452+06703 Asia/Karachi PL +5215+02100 Europe/Warsaw PM +4703-05620 America/Miquelon PN -2504-13005 Pacific/Pitcairn -PR +182806-0660622 America/Puerto_Rico +PR,AG,CA,AI,AW,BL,BQ,CW,DM,GD,GP,KN,LC,MF,MS,SX,TT,VC,VG,VI +182806-0660622 America/Puerto_Rico AST PS +3130+03428 Asia/Gaza Gaza Strip PS +313200+0350542 Asia/Hebron West Bank PT +3843-00908 Europe/Lisbon Portugal (mainland) @@ -283,7 +258,6 @@ PT +3744-02540 Atlantic/Azores Azores PW +0720+13429 Pacific/Palau PY -2516-05740 America/Asuncion QA,BH +2517+05132 Asia/Qatar -RE,TF -2052+05528 Indian/Reunion Réunion, Crozet, Scattered Islands RO +4426+02606 Europe/Bucharest RS,BA,HR,ME,MK,SI +4450+02030 Europe/Belgrade RU +5443+02030 Europe/Kaliningrad MSK-01 - Kaliningrad @@ -314,12 +288,10 @@ RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); North Kuril Is RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea -SA,KW,YE +2438+04643 Asia/Riyadh -SB -0932+16012 Pacific/Guadalcanal -SC -0440+05528 Indian/Mahe +SA,AQ,KW,YE +2438+04643 Asia/Riyadh Arabia, Syowa +SB,FM -0932+16012 Pacific/Guadalcanal Solomons, Pohnpei SD +1536+03232 Africa/Khartoum -SE +5920+01803 Europe/Stockholm -SG +0117+10351 Asia/Singapore +SG,MY +0117+10351 Asia/Singapore Singapore, peninsular Malaysia SR +0550-05510 America/Paramaribo SS +0451+03137 Africa/Juba ST +0020+00644 Africa/Sao_Tome @@ -327,22 +299,16 @@ SV +1342-08912 America/El_Salvador SY +3330+03618 Asia/Damascus TC +2128-07108 America/Grand_Turk TD +1207+01503 Africa/Ndjamena -TF -492110+0701303 Indian/Kerguelen Kerguelen, St Paul Island, Amsterdam Island -TH,KH,LA,VN +1345+10031 Asia/Bangkok Indochina (most areas) +TH,CX,KH,LA,VN +1345+10031 Asia/Bangkok Indochina (most areas) TJ +3835+06848 Asia/Dushanbe TK -0922-17114 Pacific/Fakaofo TL -0833+12535 Asia/Dili TM +3757+05823 Asia/Ashgabat TN +3648+01011 Africa/Tunis -TO -2110-17510 Pacific/Tongatapu +TO -210800-1751200 Pacific/Tongatapu TR +4101+02858 Europe/Istanbul -TT,AG,AI,BL,DM,GD,GP,KN,LC,MF,MS,VC,VG,VI +1039-06131 America/Port_of_Spain -TV -0831+17913 Pacific/Funafuti TW +2503+12130 Asia/Taipei -UA +5026+03031 Europe/Kiev Ukraine (most areas) -UA +4837+02218 Europe/Uzhgorod Transcarpathia -UA +4750+03510 Europe/Zaporozhye Zaporozhye and east Lugansk -UM +1917+16637 Pacific/Wake Wake Island +UA +5026+03031 Europe/Kyiv Ukraine (most areas) US +404251-0740023 America/New_York Eastern (most areas) US +421953-0830245 America/Detroit Eastern - MI (most areas) US +381515-0854534 America/Kentucky/Louisville Eastern - KY (Louisville area) @@ -362,7 +328,7 @@ US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural) US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer) US +394421-1045903 America/Denver Mountain (most areas) US +433649-1161209 America/Boise Mountain - ID (south); OR (east) -US +332654-1120424 America/Phoenix MST - Arizona (except Navajo) +US,CA +332654-1120424 America/Phoenix MST - Arizona (except Navajo), Creston BC US +340308-1181434 America/Los_Angeles Pacific US +611305-1495401 America/Anchorage Alaska (most areas) US +581807-1342511 America/Juneau Alaska - Juneau area @@ -378,6 +344,29 @@ UZ +4120+06918 Asia/Tashkent Uzbekistan (east) VE +1030-06656 America/Caracas VN +1045+10640 Asia/Ho_Chi_Minh Vietnam (south) VU -1740+16825 Pacific/Efate -WF -1318-17610 Pacific/Wallis WS -1350-17144 Pacific/Apia ZA,LS,SZ -2615+02800 Africa/Johannesburg +# +# The next section contains experimental tab-separated comments for +# use by user agents like tzselect that identify continents and oceans. +# +# For example, the comment "#@AQAntarctica/" means the country code +# AQ is in the continent Antarctica regardless of the Zone name, +# so Pacific/Auckland should be listed under Antarctica as well as +# under the Pacific because its line's country codes include AQ. +# +# If more than one country code is affected each is listed separated +# by commas, e.g., #@IS,SHAtlantic/". If a country code is in +# more than one continent or ocean, each is listed separated by +# commas, e.g., the second column of "#@CY,TRAsia/,Europe/". +# +# These experimental comments are present only for country codes where +# the continent or ocean is not already obvious from the Zone name. +# For example, there is no such comment for RU since it already +# corresponds to Zone names starting with both "Europe/" and "Asia/". +# +#@AQ Antarctica/ +#@IS,SH Atlantic/ +#@CY,TR Asia/,Europe/ +#@SJ Arctic/ +#@CC,CX,KM,MG,YT Indian/ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/test_util.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/test_util.cc index 9a485a0750..3e2452e9c3 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/test_util.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/test_util.cc @@ -14,15 +14,8 @@ #include "absl/time/internal/test_util.h" -#include -#include -#include - #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" -#include "absl/time/internal/cctz/include/cctz/zone_info_source.h" - -namespace cctz = absl::time_internal::cctz; namespace absl { ABSL_NAMESPACE_BEGIN @@ -37,95 +30,3 @@ TimeZone LoadTimeZone(const std::string& name) { } // namespace time_internal ABSL_NAMESPACE_END } // namespace absl - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace time_internal { -namespace cctz_extension { -namespace { - -// Embed the zoneinfo data for time zones used during tests and benchmarks. -// The data was generated using "xxd -i zoneinfo-file". There is no need -// to update the data as long as the tests do not depend on recent changes -// (and the past rules remain the same). -#include "absl/time/internal/zoneinfo.inc" - -const struct ZoneInfo { - const char* name; - const char* data; - std::size_t length; -} kZoneInfo[] = { - // The three real time zones used by :time_test and :time_benchmark. - {"America/Los_Angeles", // - reinterpret_cast(America_Los_Angeles), America_Los_Angeles_len}, - {"America/New_York", // - reinterpret_cast(America_New_York), America_New_York_len}, - {"Australia/Sydney", // - reinterpret_cast(Australia_Sydney), Australia_Sydney_len}, - - // Other zones named in tests but which should fail to load. - {"Invalid/TimeZone", nullptr, 0}, - {"", nullptr, 0}, - - // Also allow for loading the local time zone under TZ=US/Pacific. - {"US/Pacific", // - reinterpret_cast(America_Los_Angeles), America_Los_Angeles_len}, - - // Allows use of the local time zone from a system-specific location. -#ifdef _MSC_VER - {"localtime", // - reinterpret_cast(America_Los_Angeles), America_Los_Angeles_len}, -#else - {"/etc/localtime", // - reinterpret_cast(America_Los_Angeles), America_Los_Angeles_len}, -#endif -}; - -class TestZoneInfoSource : public cctz::ZoneInfoSource { - public: - TestZoneInfoSource(const char* data, std::size_t size) - : data_(data), end_(data + size) {} - - std::size_t Read(void* ptr, std::size_t size) override { - const std::size_t len = std::min(size, end_ - data_); - memcpy(ptr, data_, len); - data_ += len; - return len; - } - - int Skip(std::size_t offset) override { - data_ += std::min(offset, end_ - data_); - return 0; - } - - private: - const char* data_; - const char* const end_; -}; - -std::unique_ptr TestFactory( - const std::string& name, - const std::function( - const std::string& name)>& /*fallback_factory*/) { - for (const ZoneInfo& zoneinfo : kZoneInfo) { - if (name == zoneinfo.name) { - if (zoneinfo.data == nullptr) return nullptr; - return std::unique_ptr( - new TestZoneInfoSource(zoneinfo.data, zoneinfo.length)); - } - } - ABSL_RAW_LOG(FATAL, "Unexpected time zone \"%s\" in test", name.c_str()); - return nullptr; -} - -} // namespace - -#if !defined(__MINGW32__) -// MinGW does not support the weak symbol extension mechanism. -ZoneInfoSourceFactory zone_info_source_factory = TestFactory; -#endif - -} // namespace cctz_extension -} // namespace time_internal -ABSL_NAMESPACE_END -} // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/zoneinfo.inc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/zoneinfo.inc deleted file mode 100644 index bfed82990d..0000000000 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/internal/zoneinfo.inc +++ /dev/null @@ -1,729 +0,0 @@ -unsigned char America_Los_Angeles[] = { - 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00, - 0x9e, 0xa6, 0x48, 0xa0, 0x9f, 0xbb, 0x15, 0x90, 0xa0, 0x86, 0x2a, 0xa0, - 0xa1, 0x9a, 0xf7, 0x90, 0xcb, 0x89, 0x1a, 0xa0, 0xd2, 0x23, 0xf4, 0x70, - 0xd2, 0x61, 0x26, 0x10, 0xd6, 0xfe, 0x74, 0x5c, 0xd8, 0x80, 0xad, 0x90, - 0xda, 0xfe, 0xc3, 0x90, 0xdb, 0xc0, 0x90, 0x10, 0xdc, 0xde, 0xa5, 0x90, - 0xdd, 0xa9, 0xac, 0x90, 0xde, 0xbe, 0x87, 0x90, 0xdf, 0x89, 0x8e, 0x90, - 0xe0, 0x9e, 0x69, 0x90, 0xe1, 0x69, 0x70, 0x90, 0xe2, 0x7e, 0x4b, 0x90, - 0xe3, 0x49, 0x52, 0x90, 0xe4, 0x5e, 0x2d, 0x90, 0xe5, 0x29, 0x34, 0x90, - 0xe6, 0x47, 0x4a, 0x10, 0xe7, 0x12, 0x51, 0x10, 0xe8, 0x27, 0x2c, 0x10, - 0xe8, 0xf2, 0x33, 0x10, 0xea, 0x07, 0x0e, 0x10, 0xea, 0xd2, 0x15, 0x10, - 0xeb, 0xe6, 0xf0, 0x10, 0xec, 0xb1, 0xf7, 0x10, 0xed, 0xc6, 0xd2, 0x10, - 0xee, 0x91, 0xd9, 0x10, 0xef, 0xaf, 0xee, 0x90, 0xf0, 0x71, 0xbb, 0x10, - 0xf1, 0x8f, 0xd0, 0x90, 0xf2, 0x7f, 0xc1, 0x90, 0xf3, 0x6f, 0xb2, 0x90, - 0xf4, 0x5f, 0xa3, 0x90, 0xf5, 0x4f, 0x94, 0x90, 0xf6, 0x3f, 0x85, 0x90, - 0xf7, 0x2f, 0x76, 0x90, 0xf8, 0x28, 0xa2, 0x10, 0xf9, 0x0f, 0x58, 0x90, - 0xfa, 0x08, 0x84, 0x10, 0xfa, 0xf8, 0x83, 0x20, 0xfb, 0xe8, 0x66, 0x10, - 0xfc, 0xd8, 0x65, 0x20, 0xfd, 0xc8, 0x48, 0x10, 0xfe, 0xb8, 0x47, 0x20, - 0xff, 0xa8, 0x2a, 0x10, 0x00, 0x98, 0x29, 0x20, 0x01, 0x88, 0x0c, 0x10, - 0x02, 0x78, 0x0b, 0x20, 0x03, 0x71, 0x28, 0x90, 0x04, 0x61, 0x27, 0xa0, - 0x05, 0x51, 0x0a, 0x90, 0x06, 0x41, 0x09, 0xa0, 0x07, 0x30, 0xec, 0x90, - 0x07, 0x8d, 0x43, 0xa0, 0x09, 0x10, 0xce, 0x90, 0x09, 0xad, 0xbf, 0x20, - 0x0a, 0xf0, 0xb0, 0x90, 0x0b, 0xe0, 0xaf, 0xa0, 0x0c, 0xd9, 0xcd, 0x10, - 0x0d, 0xc0, 0x91, 0xa0, 0x0e, 0xb9, 0xaf, 0x10, 0x0f, 0xa9, 0xae, 0x20, - 0x10, 0x99, 0x91, 0x10, 0x11, 0x89, 0x90, 0x20, 0x12, 0x79, 0x73, 0x10, - 0x13, 0x69, 0x72, 0x20, 0x14, 0x59, 0x55, 0x10, 0x15, 0x49, 0x54, 0x20, - 0x16, 0x39, 0x37, 0x10, 0x17, 0x29, 0x36, 0x20, 0x18, 0x22, 0x53, 0x90, - 0x19, 0x09, 0x18, 0x20, 0x1a, 0x02, 0x35, 0x90, 0x1a, 0xf2, 0x34, 0xa0, - 0x1b, 0xe2, 0x17, 0x90, 0x1c, 0xd2, 0x16, 0xa0, 0x1d, 0xc1, 0xf9, 0x90, - 0x1e, 0xb1, 0xf8, 0xa0, 0x1f, 0xa1, 0xdb, 0x90, 0x20, 0x76, 0x2b, 0x20, - 0x21, 0x81, 0xbd, 0x90, 0x22, 0x56, 0x0d, 0x20, 0x23, 0x6a, 0xda, 0x10, - 0x24, 0x35, 0xef, 0x20, 0x25, 0x4a, 0xbc, 0x10, 0x26, 0x15, 0xd1, 0x20, - 0x27, 0x2a, 0x9e, 0x10, 0x27, 0xfe, 0xed, 0xa0, 0x29, 0x0a, 0x80, 0x10, - 0x29, 0xde, 0xcf, 0xa0, 0x2a, 0xea, 0x62, 0x10, 0x2b, 0xbe, 0xb1, 0xa0, - 0x2c, 0xd3, 0x7e, 0x90, 0x2d, 0x9e, 0x93, 0xa0, 0x2e, 0xb3, 0x60, 0x90, - 0x2f, 0x7e, 0x75, 0xa0, 0x30, 0x93, 0x42, 0x90, 0x31, 0x67, 0x92, 0x20, - 0x32, 0x73, 0x24, 0x90, 0x33, 0x47, 0x74, 0x20, 0x34, 0x53, 0x06, 0x90, - 0x35, 0x27, 0x56, 0x20, 0x36, 0x32, 0xe8, 0x90, 0x37, 0x07, 0x38, 0x20, - 0x38, 0x1c, 0x05, 0x10, 0x38, 0xe7, 0x1a, 0x20, 0x39, 0xfb, 0xe7, 0x10, - 0x3a, 0xc6, 0xfc, 0x20, 0x3b, 0xdb, 0xc9, 0x10, 0x3c, 0xb0, 0x18, 0xa0, - 0x3d, 0xbb, 0xab, 0x10, 0x3e, 0x8f, 0xfa, 0xa0, 0x3f, 0x9b, 0x8d, 0x10, - 0x40, 0x6f, 0xdc, 0xa0, 0x41, 0x84, 0xa9, 0x90, 0x42, 0x4f, 0xbe, 0xa0, - 0x43, 0x64, 0x8b, 0x90, 0x44, 0x2f, 0xa0, 0xa0, 0x45, 0x44, 0x6d, 0x90, - 0x45, 0xf3, 0xd3, 0x20, 0x47, 0x2d, 0x8a, 0x10, 0x47, 0xd3, 0xb5, 0x20, - 0x49, 0x0d, 0x6c, 0x10, 0x49, 0xb3, 0x97, 0x20, 0x4a, 0xed, 0x4e, 0x10, - 0x4b, 0x9c, 0xb3, 0xa0, 0x4c, 0xd6, 0x6a, 0x90, 0x4d, 0x7c, 0x95, 0xa0, - 0x4e, 0xb6, 0x4c, 0x90, 0x4f, 0x5c, 0x77, 0xa0, 0x50, 0x96, 0x2e, 0x90, - 0x51, 0x3c, 0x59, 0xa0, 0x52, 0x76, 0x10, 0x90, 0x53, 0x1c, 0x3b, 0xa0, - 0x54, 0x55, 0xf2, 0x90, 0x54, 0xfc, 0x1d, 0xa0, 0x56, 0x35, 0xd4, 0x90, - 0x56, 0xe5, 0x3a, 0x20, 0x58, 0x1e, 0xf1, 0x10, 0x58, 0xc5, 0x1c, 0x20, - 0x59, 0xfe, 0xd3, 0x10, 0x5a, 0xa4, 0xfe, 0x20, 0x5b, 0xde, 0xb5, 0x10, - 0x5c, 0x84, 0xe0, 0x20, 0x5d, 0xbe, 0x97, 0x10, 0x5e, 0x64, 0xc2, 0x20, - 0x5f, 0x9e, 0x79, 0x10, 0x60, 0x4d, 0xde, 0xa0, 0x61, 0x87, 0x95, 0x90, - 0x62, 0x2d, 0xc0, 0xa0, 0x63, 0x67, 0x77, 0x90, 0x64, 0x0d, 0xa2, 0xa0, - 0x65, 0x47, 0x59, 0x90, 0x65, 0xed, 0x84, 0xa0, 0x67, 0x27, 0x3b, 0x90, - 0x67, 0xcd, 0x66, 0xa0, 0x69, 0x07, 0x1d, 0x90, 0x69, 0xad, 0x48, 0xa0, - 0x6a, 0xe6, 0xff, 0x90, 0x6b, 0x96, 0x65, 0x20, 0x6c, 0xd0, 0x1c, 0x10, - 0x6d, 0x76, 0x47, 0x20, 0x6e, 0xaf, 0xfe, 0x10, 0x6f, 0x56, 0x29, 0x20, - 0x70, 0x8f, 0xe0, 0x10, 0x71, 0x36, 0x0b, 0x20, 0x72, 0x6f, 0xc2, 0x10, - 0x73, 0x15, 0xed, 0x20, 0x74, 0x4f, 0xa4, 0x10, 0x74, 0xff, 0x09, 0xa0, - 0x76, 0x38, 0xc0, 0x90, 0x76, 0xde, 0xeb, 0xa0, 0x78, 0x18, 0xa2, 0x90, - 0x78, 0xbe, 0xcd, 0xa0, 0x79, 0xf8, 0x84, 0x90, 0x7a, 0x9e, 0xaf, 0xa0, - 0x7b, 0xd8, 0x66, 0x90, 0x7c, 0x7e, 0x91, 0xa0, 0x7d, 0xb8, 0x48, 0x90, - 0x7e, 0x5e, 0x73, 0xa0, 0x7f, 0x98, 0x2a, 0x90, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0xff, 0xff, 0x91, 0x26, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x90, - 0x01, 0x04, 0xff, 0xff, 0x8f, 0x80, 0x00, 0x08, 0xff, 0xff, 0x9d, 0x90, - 0x01, 0x0c, 0xff, 0xff, 0x9d, 0x90, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, - 0x50, 0x44, 0x54, 0x00, 0x50, 0x53, 0x54, 0x00, 0x50, 0x57, 0x54, 0x00, - 0x50, 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0xbb, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xf8, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x04, - 0x1a, 0xc0, 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x48, 0xa0, 0xff, 0xff, - 0xff, 0xff, 0x9f, 0xbb, 0x15, 0x90, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x86, - 0x2a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xf7, 0x90, 0xff, 0xff, - 0xff, 0xff, 0xcb, 0x89, 0x1a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x23, - 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x61, 0x26, 0x10, 0xff, 0xff, - 0xff, 0xff, 0xd6, 0xfe, 0x74, 0x5c, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x80, - 0xad, 0x90, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe, 0xc3, 0x90, 0xff, 0xff, - 0xff, 0xff, 0xdb, 0xc0, 0x90, 0x10, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xde, - 0xa5, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9, 0xac, 0x90, 0xff, 0xff, - 0xff, 0xff, 0xde, 0xbe, 0x87, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x89, - 0x8e, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e, 0x69, 0x90, 0xff, 0xff, - 0xff, 0xff, 0xe1, 0x69, 0x70, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e, - 0x4b, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x52, 0x90, 0xff, 0xff, - 0xff, 0xff, 0xe4, 0x5e, 0x2d, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe5, 0x29, - 0x34, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47, 0x4a, 0x10, 0xff, 0xff, - 0xff, 0xff, 0xe7, 0x12, 0x51, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x27, - 0x2c, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xf2, 0x33, 0x10, 0xff, 0xff, - 0xff, 0xff, 0xea, 0x07, 0x0e, 0x10, 0xff, 0xff, 0xff, 0xff, 0xea, 0xd2, - 0x15, 0x10, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6, 0xf0, 0x10, 0xff, 0xff, - 0xff, 0xff, 0xec, 0xb1, 0xf7, 0x10, 0xff, 0xff, 0xff, 0xff, 0xed, 0xc6, - 0xd2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xee, 0x91, 0xd9, 0x10, 0xff, 0xff, - 0xff, 0xff, 0xef, 0xaf, 0xee, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x71, - 0xbb, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f, 0xd0, 0x90, 0xff, 0xff, - 0xff, 0xff, 0xf2, 0x7f, 0xc1, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x6f, - 0xb2, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f, 0xa3, 0x90, 0xff, 0xff, - 0xff, 0xff, 0xf5, 0x4f, 0x94, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x3f, - 0x85, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f, 0x76, 0x90, 0xff, 0xff, - 0xff, 0xff, 0xf8, 0x28, 0xa2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x0f, - 0x58, 0x90, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08, 0x84, 0x10, 0xff, 0xff, - 0xff, 0xff, 0xfa, 0xf8, 0x83, 0x20, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe8, - 0x66, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0x65, 0x20, 0xff, 0xff, - 0xff, 0xff, 0xfd, 0xc8, 0x48, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb8, - 0x47, 0x20, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa8, 0x2a, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x98, 0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x01, 0x88, - 0x0c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x02, 0x78, 0x0b, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x03, 0x71, 0x28, 0x90, 0x00, 0x00, 0x00, 0x00, 0x04, 0x61, - 0x27, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x05, 0x51, 0x0a, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x06, 0x41, 0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x07, 0x30, - 0xec, 0x90, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d, 0x43, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x09, 0x10, 0xce, 0x90, 0x00, 0x00, 0x00, 0x00, 0x09, 0xad, - 0xbf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0, 0xb0, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x0b, 0xe0, 0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd9, - 0xcd, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0, 0x91, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0xb9, 0xaf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xa9, - 0xae, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99, 0x91, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x11, 0x89, 0x90, 0x20, 0x00, 0x00, 0x00, 0x00, 0x12, 0x79, - 0x73, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69, 0x72, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x14, 0x59, 0x55, 0x10, 0x00, 0x00, 0x00, 0x00, 0x15, 0x49, - 0x54, 0x20, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39, 0x37, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x17, 0x29, 0x36, 0x20, 0x00, 0x00, 0x00, 0x00, 0x18, 0x22, - 0x53, 0x90, 0x00, 0x00, 0x00, 0x00, 0x19, 0x09, 0x18, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x1a, 0x02, 0x35, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xf2, - 0x34, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe2, 0x17, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x1c, 0xd2, 0x16, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1, - 0xf9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1, 0xf8, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x1f, 0xa1, 0xdb, 0x90, 0x00, 0x00, 0x00, 0x00, 0x20, 0x76, - 0x2b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81, 0xbd, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x22, 0x56, 0x0d, 0x20, 0x00, 0x00, 0x00, 0x00, 0x23, 0x6a, - 0xda, 0x10, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35, 0xef, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x25, 0x4a, 0xbc, 0x10, 0x00, 0x00, 0x00, 0x00, 0x26, 0x15, - 0xd1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a, 0x9e, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x27, 0xfe, 0xed, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x29, 0x0a, - 0x80, 0x10, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde, 0xcf, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x2a, 0xea, 0x62, 0x10, 0x00, 0x00, 0x00, 0x00, 0x2b, 0xbe, - 0xb1, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3, 0x7e, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x2d, 0x9e, 0x93, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb3, - 0x60, 0x90, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e, 0x75, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x30, 0x93, 0x42, 0x90, 0x00, 0x00, 0x00, 0x00, 0x31, 0x67, - 0x92, 0x20, 0x00, 0x00, 0x00, 0x00, 0x32, 0x73, 0x24, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x33, 0x47, 0x74, 0x20, 0x00, 0x00, 0x00, 0x00, 0x34, 0x53, - 0x06, 0x90, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27, 0x56, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x36, 0x32, 0xe8, 0x90, 0x00, 0x00, 0x00, 0x00, 0x37, 0x07, - 0x38, 0x20, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1c, 0x05, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x38, 0xe7, 0x1a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x39, 0xfb, - 0xe7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6, 0xfc, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x3b, 0xdb, 0xc9, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb0, - 0x18, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb, 0xab, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x3e, 0x8f, 0xfa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9b, - 0x8d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f, 0xdc, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x41, 0x84, 0xa9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x42, 0x4f, - 0xbe, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64, 0x8b, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x44, 0x2f, 0xa0, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x45, 0x44, - 0x6d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3, 0xd3, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x47, 0x2d, 0x8a, 0x10, 0x00, 0x00, 0x00, 0x00, 0x47, 0xd3, - 0xb5, 0x20, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d, 0x6c, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x49, 0xb3, 0x97, 0x20, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xed, - 0x4e, 0x10, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c, 0xb3, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x4c, 0xd6, 0x6a, 0x90, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x7c, - 0x95, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6, 0x4c, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x4f, 0x5c, 0x77, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x50, 0x96, - 0x2e, 0x90, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c, 0x59, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x52, 0x76, 0x10, 0x90, 0x00, 0x00, 0x00, 0x00, 0x53, 0x1c, - 0x3b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0xf2, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x54, 0xfc, 0x1d, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x56, 0x35, - 0xd4, 0x90, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5, 0x3a, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x58, 0x1e, 0xf1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x58, 0xc5, - 0x1c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe, 0xd3, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x5a, 0xa4, 0xfe, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xde, - 0xb5, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84, 0xe0, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x5d, 0xbe, 0x97, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x64, - 0xc2, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e, 0x79, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x60, 0x4d, 0xde, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x61, 0x87, - 0x95, 0x90, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d, 0xc0, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x63, 0x67, 0x77, 0x90, 0x00, 0x00, 0x00, 0x00, 0x64, 0x0d, - 0xa2, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47, 0x59, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x65, 0xed, 0x84, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x67, 0x27, - 0x3b, 0x90, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd, 0x66, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x69, 0x07, 0x1d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x69, 0xad, - 0x48, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6, 0xff, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x6b, 0x96, 0x65, 0x20, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xd0, - 0x1c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76, 0x47, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x6e, 0xaf, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x56, - 0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f, 0xe0, 0x10, 0x00, 0x00, - 0x00, 0x00, 0x71, 0x36, 0x0b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x72, 0x6f, - 0xc2, 0x10, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15, 0xed, 0x20, 0x00, 0x00, - 0x00, 0x00, 0x74, 0x4f, 0xa4, 0x10, 0x00, 0x00, 0x00, 0x00, 0x74, 0xff, - 0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38, 0xc0, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x76, 0xde, 0xeb, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x78, 0x18, - 0xa2, 0x90, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe, 0xcd, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x79, 0xf8, 0x84, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x9e, - 0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8, 0x66, 0x90, 0x00, 0x00, - 0x00, 0x00, 0x7c, 0x7e, 0x91, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7d, 0xb8, - 0x48, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e, 0x73, 0xa0, 0x00, 0x00, - 0x00, 0x00, 0x7f, 0x98, 0x2a, 0x90, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0xff, 0xff, 0x91, 0x26, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x90, 0x01, - 0x04, 0xff, 0xff, 0x8f, 0x80, 0x00, 0x08, 0xff, 0xff, 0x9d, 0x90, 0x01, - 0x0c, 0xff, 0xff, 0x9d, 0x90, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, 0x50, - 0x44, 0x54, 0x00, 0x50, 0x53, 0x54, 0x00, 0x50, 0x57, 0x54, 0x00, 0x50, - 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x0a, 0x50, 0x53, 0x54, 0x38, 0x50, 0x44, 0x54, 0x2c, 0x4d, 0x33, - 0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31, 0x2e, 0x31, 0x2e, 0x30, - 0x0a -}; -unsigned int America_Los_Angeles_len = 2845; -unsigned char America_New_York[] = { - 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00, - 0x9e, 0xa6, 0x1e, 0x70, 0x9f, 0xba, 0xeb, 0x60, 0xa0, 0x86, 0x00, 0x70, - 0xa1, 0x9a, 0xcd, 0x60, 0xa2, 0x65, 0xe2, 0x70, 0xa3, 0x83, 0xe9, 0xe0, - 0xa4, 0x6a, 0xae, 0x70, 0xa5, 0x35, 0xa7, 0x60, 0xa6, 0x53, 0xca, 0xf0, - 0xa7, 0x15, 0x89, 0x60, 0xa8, 0x33, 0xac, 0xf0, 0xa8, 0xfe, 0xa5, 0xe0, - 0xaa, 0x13, 0x8e, 0xf0, 0xaa, 0xde, 0x87, 0xe0, 0xab, 0xf3, 0x70, 0xf0, - 0xac, 0xbe, 0x69, 0xe0, 0xad, 0xd3, 0x52, 0xf0, 0xae, 0x9e, 0x4b, 0xe0, - 0xaf, 0xb3, 0x34, 0xf0, 0xb0, 0x7e, 0x2d, 0xe0, 0xb1, 0x9c, 0x51, 0x70, - 0xb2, 0x67, 0x4a, 0x60, 0xb3, 0x7c, 0x33, 0x70, 0xb4, 0x47, 0x2c, 0x60, - 0xb5, 0x5c, 0x15, 0x70, 0xb6, 0x27, 0x0e, 0x60, 0xb7, 0x3b, 0xf7, 0x70, - 0xb8, 0x06, 0xf0, 0x60, 0xb9, 0x1b, 0xd9, 0x70, 0xb9, 0xe6, 0xd2, 0x60, - 0xbb, 0x04, 0xf5, 0xf0, 0xbb, 0xc6, 0xb4, 0x60, 0xbc, 0xe4, 0xd7, 0xf0, - 0xbd, 0xaf, 0xd0, 0xe0, 0xbe, 0xc4, 0xb9, 0xf0, 0xbf, 0x8f, 0xb2, 0xe0, - 0xc0, 0xa4, 0x9b, 0xf0, 0xc1, 0x6f, 0x94, 0xe0, 0xc2, 0x84, 0x7d, 0xf0, - 0xc3, 0x4f, 0x76, 0xe0, 0xc4, 0x64, 0x5f, 0xf0, 0xc5, 0x2f, 0x58, 0xe0, - 0xc6, 0x4d, 0x7c, 0x70, 0xc7, 0x0f, 0x3a, 0xe0, 0xc8, 0x2d, 0x5e, 0x70, - 0xc8, 0xf8, 0x57, 0x60, 0xca, 0x0d, 0x40, 0x70, 0xca, 0xd8, 0x39, 0x60, - 0xcb, 0x88, 0xf0, 0x70, 0xd2, 0x23, 0xf4, 0x70, 0xd2, 0x60, 0xfb, 0xe0, - 0xd3, 0x75, 0xe4, 0xf0, 0xd4, 0x40, 0xdd, 0xe0, 0xd5, 0x55, 0xc6, 0xf0, - 0xd6, 0x20, 0xbf, 0xe0, 0xd7, 0x35, 0xa8, 0xf0, 0xd8, 0x00, 0xa1, 0xe0, - 0xd9, 0x15, 0x8a, 0xf0, 0xd9, 0xe0, 0x83, 0xe0, 0xda, 0xfe, 0xa7, 0x70, - 0xdb, 0xc0, 0x65, 0xe0, 0xdc, 0xde, 0x89, 0x70, 0xdd, 0xa9, 0x82, 0x60, - 0xde, 0xbe, 0x6b, 0x70, 0xdf, 0x89, 0x64, 0x60, 0xe0, 0x9e, 0x4d, 0x70, - 0xe1, 0x69, 0x46, 0x60, 0xe2, 0x7e, 0x2f, 0x70, 0xe3, 0x49, 0x28, 0x60, - 0xe4, 0x5e, 0x11, 0x70, 0xe5, 0x57, 0x2e, 0xe0, 0xe6, 0x47, 0x2d, 0xf0, - 0xe7, 0x37, 0x10, 0xe0, 0xe8, 0x27, 0x0f, 0xf0, 0xe9, 0x16, 0xf2, 0xe0, - 0xea, 0x06, 0xf1, 0xf0, 0xea, 0xf6, 0xd4, 0xe0, 0xeb, 0xe6, 0xd3, 0xf0, - 0xec, 0xd6, 0xb6, 0xe0, 0xed, 0xc6, 0xb5, 0xf0, 0xee, 0xbf, 0xd3, 0x60, - 0xef, 0xaf, 0xd2, 0x70, 0xf0, 0x9f, 0xb5, 0x60, 0xf1, 0x8f, 0xb4, 0x70, - 0xf2, 0x7f, 0x97, 0x60, 0xf3, 0x6f, 0x96, 0x70, 0xf4, 0x5f, 0x79, 0x60, - 0xf5, 0x4f, 0x78, 0x70, 0xf6, 0x3f, 0x5b, 0x60, 0xf7, 0x2f, 0x5a, 0x70, - 0xf8, 0x28, 0x77, 0xe0, 0xf9, 0x0f, 0x3c, 0x70, 0xfa, 0x08, 0x59, 0xe0, - 0xfa, 0xf8, 0x58, 0xf0, 0xfb, 0xe8, 0x3b, 0xe0, 0xfc, 0xd8, 0x3a, 0xf0, - 0xfd, 0xc8, 0x1d, 0xe0, 0xfe, 0xb8, 0x1c, 0xf0, 0xff, 0xa7, 0xff, 0xe0, - 0x00, 0x97, 0xfe, 0xf0, 0x01, 0x87, 0xe1, 0xe0, 0x02, 0x77, 0xe0, 0xf0, - 0x03, 0x70, 0xfe, 0x60, 0x04, 0x60, 0xfd, 0x70, 0x05, 0x50, 0xe0, 0x60, - 0x06, 0x40, 0xdf, 0x70, 0x07, 0x30, 0xc2, 0x60, 0x07, 0x8d, 0x19, 0x70, - 0x09, 0x10, 0xa4, 0x60, 0x09, 0xad, 0x94, 0xf0, 0x0a, 0xf0, 0x86, 0x60, - 0x0b, 0xe0, 0x85, 0x70, 0x0c, 0xd9, 0xa2, 0xe0, 0x0d, 0xc0, 0x67, 0x70, - 0x0e, 0xb9, 0x84, 0xe0, 0x0f, 0xa9, 0x83, 0xf0, 0x10, 0x99, 0x66, 0xe0, - 0x11, 0x89, 0x65, 0xf0, 0x12, 0x79, 0x48, 0xe0, 0x13, 0x69, 0x47, 0xf0, - 0x14, 0x59, 0x2a, 0xe0, 0x15, 0x49, 0x29, 0xf0, 0x16, 0x39, 0x0c, 0xe0, - 0x17, 0x29, 0x0b, 0xf0, 0x18, 0x22, 0x29, 0x60, 0x19, 0x08, 0xed, 0xf0, - 0x1a, 0x02, 0x0b, 0x60, 0x1a, 0xf2, 0x0a, 0x70, 0x1b, 0xe1, 0xed, 0x60, - 0x1c, 0xd1, 0xec, 0x70, 0x1d, 0xc1, 0xcf, 0x60, 0x1e, 0xb1, 0xce, 0x70, - 0x1f, 0xa1, 0xb1, 0x60, 0x20, 0x76, 0x00, 0xf0, 0x21, 0x81, 0x93, 0x60, - 0x22, 0x55, 0xe2, 0xf0, 0x23, 0x6a, 0xaf, 0xe0, 0x24, 0x35, 0xc4, 0xf0, - 0x25, 0x4a, 0x91, 0xe0, 0x26, 0x15, 0xa6, 0xf0, 0x27, 0x2a, 0x73, 0xe0, - 0x27, 0xfe, 0xc3, 0x70, 0x29, 0x0a, 0x55, 0xe0, 0x29, 0xde, 0xa5, 0x70, - 0x2a, 0xea, 0x37, 0xe0, 0x2b, 0xbe, 0x87, 0x70, 0x2c, 0xd3, 0x54, 0x60, - 0x2d, 0x9e, 0x69, 0x70, 0x2e, 0xb3, 0x36, 0x60, 0x2f, 0x7e, 0x4b, 0x70, - 0x30, 0x93, 0x18, 0x60, 0x31, 0x67, 0x67, 0xf0, 0x32, 0x72, 0xfa, 0x60, - 0x33, 0x47, 0x49, 0xf0, 0x34, 0x52, 0xdc, 0x60, 0x35, 0x27, 0x2b, 0xf0, - 0x36, 0x32, 0xbe, 0x60, 0x37, 0x07, 0x0d, 0xf0, 0x38, 0x1b, 0xda, 0xe0, - 0x38, 0xe6, 0xef, 0xf0, 0x39, 0xfb, 0xbc, 0xe0, 0x3a, 0xc6, 0xd1, 0xf0, - 0x3b, 0xdb, 0x9e, 0xe0, 0x3c, 0xaf, 0xee, 0x70, 0x3d, 0xbb, 0x80, 0xe0, - 0x3e, 0x8f, 0xd0, 0x70, 0x3f, 0x9b, 0x62, 0xe0, 0x40, 0x6f, 0xb2, 0x70, - 0x41, 0x84, 0x7f, 0x60, 0x42, 0x4f, 0x94, 0x70, 0x43, 0x64, 0x61, 0x60, - 0x44, 0x2f, 0x76, 0x70, 0x45, 0x44, 0x43, 0x60, 0x45, 0xf3, 0xa8, 0xf0, - 0x47, 0x2d, 0x5f, 0xe0, 0x47, 0xd3, 0x8a, 0xf0, 0x49, 0x0d, 0x41, 0xe0, - 0x49, 0xb3, 0x6c, 0xf0, 0x4a, 0xed, 0x23, 0xe0, 0x4b, 0x9c, 0x89, 0x70, - 0x4c, 0xd6, 0x40, 0x60, 0x4d, 0x7c, 0x6b, 0x70, 0x4e, 0xb6, 0x22, 0x60, - 0x4f, 0x5c, 0x4d, 0x70, 0x50, 0x96, 0x04, 0x60, 0x51, 0x3c, 0x2f, 0x70, - 0x52, 0x75, 0xe6, 0x60, 0x53, 0x1c, 0x11, 0x70, 0x54, 0x55, 0xc8, 0x60, - 0x54, 0xfb, 0xf3, 0x70, 0x56, 0x35, 0xaa, 0x60, 0x56, 0xe5, 0x0f, 0xf0, - 0x58, 0x1e, 0xc6, 0xe0, 0x58, 0xc4, 0xf1, 0xf0, 0x59, 0xfe, 0xa8, 0xe0, - 0x5a, 0xa4, 0xd3, 0xf0, 0x5b, 0xde, 0x8a, 0xe0, 0x5c, 0x84, 0xb5, 0xf0, - 0x5d, 0xbe, 0x6c, 0xe0, 0x5e, 0x64, 0x97, 0xf0, 0x5f, 0x9e, 0x4e, 0xe0, - 0x60, 0x4d, 0xb4, 0x70, 0x61, 0x87, 0x6b, 0x60, 0x62, 0x2d, 0x96, 0x70, - 0x63, 0x67, 0x4d, 0x60, 0x64, 0x0d, 0x78, 0x70, 0x65, 0x47, 0x2f, 0x60, - 0x65, 0xed, 0x5a, 0x70, 0x67, 0x27, 0x11, 0x60, 0x67, 0xcd, 0x3c, 0x70, - 0x69, 0x06, 0xf3, 0x60, 0x69, 0xad, 0x1e, 0x70, 0x6a, 0xe6, 0xd5, 0x60, - 0x6b, 0x96, 0x3a, 0xf0, 0x6c, 0xcf, 0xf1, 0xe0, 0x6d, 0x76, 0x1c, 0xf0, - 0x6e, 0xaf, 0xd3, 0xe0, 0x6f, 0x55, 0xfe, 0xf0, 0x70, 0x8f, 0xb5, 0xe0, - 0x71, 0x35, 0xe0, 0xf0, 0x72, 0x6f, 0x97, 0xe0, 0x73, 0x15, 0xc2, 0xf0, - 0x74, 0x4f, 0x79, 0xe0, 0x74, 0xfe, 0xdf, 0x70, 0x76, 0x38, 0x96, 0x60, - 0x76, 0xde, 0xc1, 0x70, 0x78, 0x18, 0x78, 0x60, 0x78, 0xbe, 0xa3, 0x70, - 0x79, 0xf8, 0x5a, 0x60, 0x7a, 0x9e, 0x85, 0x70, 0x7b, 0xd8, 0x3c, 0x60, - 0x7c, 0x7e, 0x67, 0x70, 0x7d, 0xb8, 0x1e, 0x60, 0x7e, 0x5e, 0x49, 0x70, - 0x7f, 0x98, 0x00, 0x60, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0xff, 0xff, 0xba, 0x9e, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x04, - 0xff, 0xff, 0xb9, 0xb0, 0x00, 0x08, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x0c, - 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, 0x45, 0x44, - 0x54, 0x00, 0x45, 0x53, 0x54, 0x00, 0x45, 0x57, 0x54, 0x00, 0x45, 0x50, - 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xf8, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x03, 0xf0, 0x90, - 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x1e, 0x70, 0xff, 0xff, 0xff, 0xff, - 0x9f, 0xba, 0xeb, 0x60, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x86, 0x00, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xcd, 0x60, 0xff, 0xff, 0xff, 0xff, - 0xa2, 0x65, 0xe2, 0x70, 0xff, 0xff, 0xff, 0xff, 0xa3, 0x83, 0xe9, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xa4, 0x6a, 0xae, 0x70, 0xff, 0xff, 0xff, 0xff, - 0xa5, 0x35, 0xa7, 0x60, 0xff, 0xff, 0xff, 0xff, 0xa6, 0x53, 0xca, 0xf0, - 0xff, 0xff, 0xff, 0xff, 0xa7, 0x15, 0x89, 0x60, 0xff, 0xff, 0xff, 0xff, - 0xa8, 0x33, 0xac, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xa8, 0xfe, 0xa5, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xaa, 0x13, 0x8e, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xaa, 0xde, 0x87, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xab, 0xf3, 0x70, 0xf0, - 0xff, 0xff, 0xff, 0xff, 0xac, 0xbe, 0x69, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xad, 0xd3, 0x52, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xae, 0x9e, 0x4b, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xaf, 0xb3, 0x34, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xb0, 0x7e, 0x2d, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9c, 0x51, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xb2, 0x67, 0x4a, 0x60, 0xff, 0xff, 0xff, 0xff, - 0xb3, 0x7c, 0x33, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x47, 0x2c, 0x60, - 0xff, 0xff, 0xff, 0xff, 0xb5, 0x5c, 0x15, 0x70, 0xff, 0xff, 0xff, 0xff, - 0xb6, 0x27, 0x0e, 0x60, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x3b, 0xf7, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xb8, 0x06, 0xf0, 0x60, 0xff, 0xff, 0xff, 0xff, - 0xb9, 0x1b, 0xd9, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb9, 0xe6, 0xd2, 0x60, - 0xff, 0xff, 0xff, 0xff, 0xbb, 0x04, 0xf5, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xbb, 0xc6, 0xb4, 0x60, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xe4, 0xd7, 0xf0, - 0xff, 0xff, 0xff, 0xff, 0xbd, 0xaf, 0xd0, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xbe, 0xc4, 0xb9, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8f, 0xb2, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xc0, 0xa4, 0x9b, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xc1, 0x6f, 0x94, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x84, 0x7d, 0xf0, - 0xff, 0xff, 0xff, 0xff, 0xc3, 0x4f, 0x76, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xc4, 0x64, 0x5f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x2f, 0x58, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xc6, 0x4d, 0x7c, 0x70, 0xff, 0xff, 0xff, 0xff, - 0xc7, 0x0f, 0x3a, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x2d, 0x5e, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xc8, 0xf8, 0x57, 0x60, 0xff, 0xff, 0xff, 0xff, - 0xca, 0x0d, 0x40, 0x70, 0xff, 0xff, 0xff, 0xff, 0xca, 0xd8, 0x39, 0x60, - 0xff, 0xff, 0xff, 0xff, 0xcb, 0x88, 0xf0, 0x70, 0xff, 0xff, 0xff, 0xff, - 0xd2, 0x23, 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x60, 0xfb, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xd3, 0x75, 0xe4, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xd4, 0x40, 0xdd, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x55, 0xc6, 0xf0, - 0xff, 0xff, 0xff, 0xff, 0xd6, 0x20, 0xbf, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xd7, 0x35, 0xa8, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x00, 0xa1, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xd9, 0x15, 0x8a, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xd9, 0xe0, 0x83, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe, 0xa7, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xdb, 0xc0, 0x65, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xdc, 0xde, 0x89, 0x70, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9, 0x82, 0x60, - 0xff, 0xff, 0xff, 0xff, 0xde, 0xbe, 0x6b, 0x70, 0xff, 0xff, 0xff, 0xff, - 0xdf, 0x89, 0x64, 0x60, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e, 0x4d, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xe1, 0x69, 0x46, 0x60, 0xff, 0xff, 0xff, 0xff, - 0xe2, 0x7e, 0x2f, 0x70, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x28, 0x60, - 0xff, 0xff, 0xff, 0xff, 0xe4, 0x5e, 0x11, 0x70, 0xff, 0xff, 0xff, 0xff, - 0xe5, 0x57, 0x2e, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47, 0x2d, 0xf0, - 0xff, 0xff, 0xff, 0xff, 0xe7, 0x37, 0x10, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xe8, 0x27, 0x0f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x16, 0xf2, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xea, 0x06, 0xf1, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xea, 0xf6, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6, 0xd3, 0xf0, - 0xff, 0xff, 0xff, 0xff, 0xec, 0xd6, 0xb6, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xed, 0xc6, 0xb5, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xee, 0xbf, 0xd3, 0x60, - 0xff, 0xff, 0xff, 0xff, 0xef, 0xaf, 0xd2, 0x70, 0xff, 0xff, 0xff, 0xff, - 0xf0, 0x9f, 0xb5, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f, 0xb4, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xf2, 0x7f, 0x97, 0x60, 0xff, 0xff, 0xff, 0xff, - 0xf3, 0x6f, 0x96, 0x70, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f, 0x79, 0x60, - 0xff, 0xff, 0xff, 0xff, 0xf5, 0x4f, 0x78, 0x70, 0xff, 0xff, 0xff, 0xff, - 0xf6, 0x3f, 0x5b, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f, 0x5a, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0x28, 0x77, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xf9, 0x0f, 0x3c, 0x70, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08, 0x59, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf8, 0x58, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xfb, 0xe8, 0x3b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0x3a, 0xf0, - 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc8, 0x1d, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xb8, 0x1c, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, 0xff, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x87, 0xe1, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x02, 0x77, 0xe0, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x03, 0x70, 0xfe, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x60, 0xfd, 0x70, 0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0xe0, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x06, 0x40, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x07, 0x30, 0xc2, 0x60, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d, 0x19, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x09, 0x10, 0xa4, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x09, 0xad, 0x94, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0, 0x86, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x0b, 0xe0, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x0c, 0xd9, 0xa2, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0, 0x67, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x0e, 0xb9, 0x84, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x0f, 0xa9, 0x83, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99, 0x66, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x11, 0x89, 0x65, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x12, 0x79, 0x48, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69, 0x47, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x14, 0x59, 0x2a, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x15, 0x49, 0x29, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39, 0x0c, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x17, 0x29, 0x0b, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x18, 0x22, 0x29, 0x60, 0x00, 0x00, 0x00, 0x00, 0x19, 0x08, 0xed, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x1a, 0x02, 0x0b, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x1a, 0xf2, 0x0a, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0xed, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x1c, 0xd1, 0xec, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x1d, 0xc1, 0xcf, 0x60, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1, 0xce, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x1f, 0xa1, 0xb1, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x20, 0x76, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81, 0x93, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x22, 0x55, 0xe2, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x23, 0x6a, 0xaf, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35, 0xc4, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x25, 0x4a, 0x91, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x26, 0x15, 0xa6, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a, 0x73, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x27, 0xfe, 0xc3, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x29, 0x0a, 0x55, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde, 0xa5, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x2a, 0xea, 0x37, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x2b, 0xbe, 0x87, 0x70, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3, 0x54, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x2d, 0x9e, 0x69, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x2e, 0xb3, 0x36, 0x60, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e, 0x4b, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x30, 0x93, 0x18, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x31, 0x67, 0x67, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0xfa, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x33, 0x47, 0x49, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x34, 0x52, 0xdc, 0x60, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27, 0x2b, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x36, 0x32, 0xbe, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x37, 0x07, 0x0d, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0xda, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x38, 0xe6, 0xef, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x39, 0xfb, 0xbc, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6, 0xd1, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xdb, 0x9e, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x3c, 0xaf, 0xee, 0x70, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb, 0x80, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x3e, 0x8f, 0xd0, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x3f, 0x9b, 0x62, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f, 0xb2, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x41, 0x84, 0x7f, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x42, 0x4f, 0x94, 0x70, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64, 0x61, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x44, 0x2f, 0x76, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x45, 0x44, 0x43, 0x60, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3, 0xa8, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x47, 0x2d, 0x5f, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x47, 0xd3, 0x8a, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d, 0x41, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x49, 0xb3, 0x6c, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x4a, 0xed, 0x23, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c, 0x89, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x4c, 0xd6, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x4d, 0x7c, 0x6b, 0x70, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6, 0x22, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x4f, 0x5c, 0x4d, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x50, 0x96, 0x04, 0x60, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c, 0x2f, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x52, 0x75, 0xe6, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x53, 0x1c, 0x11, 0x70, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0xc8, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x54, 0xfb, 0xf3, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x56, 0x35, 0xaa, 0x60, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5, 0x0f, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x58, 0x1e, 0xc6, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x58, 0xc4, 0xf1, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe, 0xa8, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x5a, 0xa4, 0xd3, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x5b, 0xde, 0x8a, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84, 0xb5, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbe, 0x6c, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x5e, 0x64, 0x97, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e, 0x4e, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x60, 0x4d, 0xb4, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x61, 0x87, 0x6b, 0x60, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d, 0x96, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x63, 0x67, 0x4d, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x64, 0x0d, 0x78, 0x70, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47, 0x2f, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x65, 0xed, 0x5a, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x67, 0x27, 0x11, 0x60, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd, 0x3c, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x69, 0x06, 0xf3, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x69, 0xad, 0x1e, 0x70, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6, 0xd5, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x6b, 0x96, 0x3a, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x6c, 0xcf, 0xf1, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76, 0x1c, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x6e, 0xaf, 0xd3, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x6f, 0x55, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f, 0xb5, 0xe0, - 0x00, 0x00, 0x00, 0x00, 0x71, 0x35, 0xe0, 0xf0, 0x00, 0x00, 0x00, 0x00, - 0x72, 0x6f, 0x97, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15, 0xc2, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x74, 0x4f, 0x79, 0xe0, 0x00, 0x00, 0x00, 0x00, - 0x74, 0xfe, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38, 0x96, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x76, 0xde, 0xc1, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x78, 0x18, 0x78, 0x60, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe, 0xa3, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x79, 0xf8, 0x5a, 0x60, 0x00, 0x00, 0x00, 0x00, - 0x7a, 0x9e, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8, 0x3c, 0x60, - 0x00, 0x00, 0x00, 0x00, 0x7c, 0x7e, 0x67, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x7d, 0xb8, 0x1e, 0x60, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e, 0x49, 0x70, - 0x00, 0x00, 0x00, 0x00, 0x7f, 0x98, 0x00, 0x60, 0x00, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, - 0x02, 0x01, 0x02, 0x01, 0x02, 0xff, 0xff, 0xba, 0x9e, 0x00, 0x00, 0xff, - 0xff, 0xc7, 0xc0, 0x01, 0x04, 0xff, 0xff, 0xb9, 0xb0, 0x00, 0x08, 0xff, - 0xff, 0xc7, 0xc0, 0x01, 0x0c, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x10, 0x4c, - 0x4d, 0x54, 0x00, 0x45, 0x44, 0x54, 0x00, 0x45, 0x53, 0x54, 0x00, 0x45, - 0x57, 0x54, 0x00, 0x45, 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x45, 0x53, 0x54, 0x35, 0x45, 0x44, - 0x54, 0x2c, 0x4d, 0x33, 0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31, - 0x2e, 0x31, 0x2e, 0x30, 0x0a -}; -unsigned int America_New_York_len = 3545; -unsigned char Australia_Sydney[] = { - 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0e, 0x80, 0x00, 0x00, 0x00, - 0x9c, 0x4e, 0xa6, 0x9c, 0x9c, 0xbc, 0x20, 0xf0, 0xcb, 0x54, 0xb3, 0x00, - 0xcb, 0xc7, 0x57, 0x70, 0xcc, 0xb7, 0x56, 0x80, 0xcd, 0xa7, 0x39, 0x70, - 0xce, 0xa0, 0x73, 0x00, 0xcf, 0x87, 0x1b, 0x70, 0x03, 0x70, 0x39, 0x80, - 0x04, 0x0d, 0x1c, 0x00, 0x05, 0x50, 0x1b, 0x80, 0x05, 0xf6, 0x38, 0x80, - 0x07, 0x2f, 0xfd, 0x80, 0x07, 0xd6, 0x1a, 0x80, 0x09, 0x0f, 0xdf, 0x80, - 0x09, 0xb5, 0xfc, 0x80, 0x0a, 0xef, 0xc1, 0x80, 0x0b, 0x9f, 0x19, 0x00, - 0x0c, 0xd8, 0xde, 0x00, 0x0d, 0x7e, 0xfb, 0x00, 0x0e, 0xb8, 0xc0, 0x00, - 0x0f, 0x5e, 0xdd, 0x00, 0x10, 0x98, 0xa2, 0x00, 0x11, 0x3e, 0xbf, 0x00, - 0x12, 0x78, 0x84, 0x00, 0x13, 0x1e, 0xa1, 0x00, 0x14, 0x58, 0x66, 0x00, - 0x14, 0xfe, 0x83, 0x00, 0x16, 0x38, 0x48, 0x00, 0x17, 0x0c, 0x89, 0x80, - 0x18, 0x21, 0x64, 0x80, 0x18, 0xc7, 0x81, 0x80, 0x1a, 0x01, 0x46, 0x80, - 0x1a, 0xa7, 0x63, 0x80, 0x1b, 0xe1, 0x28, 0x80, 0x1c, 0x87, 0x45, 0x80, - 0x1d, 0xc1, 0x0a, 0x80, 0x1e, 0x79, 0x9c, 0x80, 0x1f, 0x97, 0xb2, 0x00, - 0x20, 0x59, 0x7e, 0x80, 0x21, 0x80, 0xce, 0x80, 0x22, 0x42, 0x9b, 0x00, - 0x23, 0x69, 0xeb, 0x00, 0x24, 0x22, 0x7d, 0x00, 0x25, 0x49, 0xcd, 0x00, - 0x25, 0xef, 0xea, 0x00, 0x27, 0x29, 0xaf, 0x00, 0x27, 0xcf, 0xcc, 0x00, - 0x29, 0x09, 0x91, 0x00, 0x29, 0xaf, 0xae, 0x00, 0x2a, 0xe9, 0x73, 0x00, - 0x2b, 0x98, 0xca, 0x80, 0x2c, 0xd2, 0x8f, 0x80, 0x2d, 0x78, 0xac, 0x80, - 0x2e, 0xb2, 0x71, 0x80, 0x2f, 0x58, 0x8e, 0x80, 0x30, 0x92, 0x53, 0x80, - 0x31, 0x5d, 0x5a, 0x80, 0x32, 0x72, 0x35, 0x80, 0x33, 0x3d, 0x3c, 0x80, - 0x34, 0x52, 0x17, 0x80, 0x35, 0x1d, 0x1e, 0x80, 0x36, 0x31, 0xf9, 0x80, - 0x36, 0xfd, 0x00, 0x80, 0x38, 0x1b, 0x16, 0x00, 0x38, 0xdc, 0xe2, 0x80, - 0x39, 0xa7, 0xe9, 0x80, 0x3a, 0xbc, 0xc4, 0x80, 0x3b, 0xda, 0xda, 0x00, - 0x3c, 0xa5, 0xe1, 0x00, 0x3d, 0xba, 0xbc, 0x00, 0x3e, 0x85, 0xc3, 0x00, - 0x3f, 0x9a, 0x9e, 0x00, 0x40, 0x65, 0xa5, 0x00, 0x41, 0x83, 0xba, 0x80, - 0x42, 0x45, 0x87, 0x00, 0x43, 0x63, 0x9c, 0x80, 0x44, 0x2e, 0xa3, 0x80, - 0x45, 0x43, 0x7e, 0x80, 0x46, 0x05, 0x4b, 0x00, 0x47, 0x23, 0x60, 0x80, - 0x47, 0xf7, 0xa2, 0x00, 0x48, 0xe7, 0x93, 0x00, 0x49, 0xd7, 0x84, 0x00, - 0x4a, 0xc7, 0x75, 0x00, 0x4b, 0xb7, 0x66, 0x00, 0x4c, 0xa7, 0x57, 0x00, - 0x4d, 0x97, 0x48, 0x00, 0x4e, 0x87, 0x39, 0x00, 0x4f, 0x77, 0x2a, 0x00, - 0x50, 0x70, 0x55, 0x80, 0x51, 0x60, 0x46, 0x80, 0x52, 0x50, 0x37, 0x80, - 0x53, 0x40, 0x28, 0x80, 0x54, 0x30, 0x19, 0x80, 0x55, 0x20, 0x0a, 0x80, - 0x56, 0x0f, 0xfb, 0x80, 0x56, 0xff, 0xec, 0x80, 0x57, 0xef, 0xdd, 0x80, - 0x58, 0xdf, 0xce, 0x80, 0x59, 0xcf, 0xbf, 0x80, 0x5a, 0xbf, 0xb0, 0x80, - 0x5b, 0xb8, 0xdc, 0x00, 0x5c, 0xa8, 0xcd, 0x00, 0x5d, 0x98, 0xbe, 0x00, - 0x5e, 0x88, 0xaf, 0x00, 0x5f, 0x78, 0xa0, 0x00, 0x60, 0x68, 0x91, 0x00, - 0x61, 0x58, 0x82, 0x00, 0x62, 0x48, 0x73, 0x00, 0x63, 0x38, 0x64, 0x00, - 0x64, 0x28, 0x55, 0x00, 0x65, 0x18, 0x46, 0x00, 0x66, 0x11, 0x71, 0x80, - 0x67, 0x01, 0x62, 0x80, 0x67, 0xf1, 0x53, 0x80, 0x68, 0xe1, 0x44, 0x80, - 0x69, 0xd1, 0x35, 0x80, 0x6a, 0xc1, 0x26, 0x80, 0x6b, 0xb1, 0x17, 0x80, - 0x6c, 0xa1, 0x08, 0x80, 0x6d, 0x90, 0xf9, 0x80, 0x6e, 0x80, 0xea, 0x80, - 0x6f, 0x70, 0xdb, 0x80, 0x70, 0x6a, 0x07, 0x00, 0x71, 0x59, 0xf8, 0x00, - 0x72, 0x49, 0xe9, 0x00, 0x73, 0x39, 0xda, 0x00, 0x74, 0x29, 0xcb, 0x00, - 0x75, 0x19, 0xbc, 0x00, 0x76, 0x09, 0xad, 0x00, 0x76, 0xf9, 0x9e, 0x00, - 0x77, 0xe9, 0x8f, 0x00, 0x78, 0xd9, 0x80, 0x00, 0x79, 0xc9, 0x71, 0x00, - 0x7a, 0xb9, 0x62, 0x00, 0x7b, 0xb2, 0x8d, 0x80, 0x7c, 0xa2, 0x7e, 0x80, - 0x7d, 0x92, 0x6f, 0x80, 0x7e, 0x82, 0x60, 0x80, 0x7f, 0x72, 0x51, 0x80, - 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, - 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x00, 0x00, - 0x8d, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00, - 0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00, - 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54, 0x00, 0x41, 0x45, 0x44, 0x54, - 0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x8f, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0e, - 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x73, 0x16, 0x7f, 0x3c, 0xff, 0xff, 0xff, 0xff, 0x9c, 0x4e, 0xa6, 0x9c, - 0xff, 0xff, 0xff, 0xff, 0x9c, 0xbc, 0x20, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xcb, 0x54, 0xb3, 0x00, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xc7, 0x57, 0x70, - 0xff, 0xff, 0xff, 0xff, 0xcc, 0xb7, 0x56, 0x80, 0xff, 0xff, 0xff, 0xff, - 0xcd, 0xa7, 0x39, 0x70, 0xff, 0xff, 0xff, 0xff, 0xce, 0xa0, 0x73, 0x00, - 0xff, 0xff, 0xff, 0xff, 0xcf, 0x87, 0x1b, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x03, 0x70, 0x39, 0x80, 0x00, 0x00, 0x00, 0x00, 0x04, 0x0d, 0x1c, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0x1b, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x05, 0xf6, 0x38, 0x80, 0x00, 0x00, 0x00, 0x00, 0x07, 0x2f, 0xfd, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x07, 0xd6, 0x1a, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x09, 0x0f, 0xdf, 0x80, 0x00, 0x00, 0x00, 0x00, 0x09, 0xb5, 0xfc, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x0a, 0xef, 0xc1, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x0b, 0x9f, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd8, 0xde, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0d, 0x7e, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x0e, 0xb8, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x5e, 0xdd, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x10, 0x98, 0xa2, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x11, 0x3e, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x78, 0x84, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x13, 0x1e, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x14, 0x58, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0xfe, 0x83, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x16, 0x38, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x17, 0x0c, 0x89, 0x80, 0x00, 0x00, 0x00, 0x00, 0x18, 0x21, 0x64, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x18, 0xc7, 0x81, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x1a, 0x01, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xa7, 0x63, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0x28, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x1c, 0x87, 0x45, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1, 0x0a, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x1e, 0x79, 0x9c, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x1f, 0x97, 0xb2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x59, 0x7e, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x21, 0x80, 0xce, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x22, 0x42, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x69, 0xeb, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x24, 0x22, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x25, 0x49, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0xef, 0xea, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x27, 0x29, 0xaf, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x27, 0xcf, 0xcc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x09, 0x91, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x29, 0xaf, 0xae, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x2a, 0xe9, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x98, 0xca, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd2, 0x8f, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x2d, 0x78, 0xac, 0x80, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb2, 0x71, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x2f, 0x58, 0x8e, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x30, 0x92, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00, 0x31, 0x5d, 0x5a, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0x35, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x33, 0x3d, 0x3c, 0x80, 0x00, 0x00, 0x00, 0x00, 0x34, 0x52, 0x17, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x35, 0x1d, 0x1e, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x36, 0x31, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x36, 0xfd, 0x00, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x38, 0xdc, 0xe2, 0x80, 0x00, 0x00, 0x00, 0x00, 0x39, 0xa7, 0xe9, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x3a, 0xbc, 0xc4, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x3b, 0xda, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xa5, 0xe1, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x3d, 0xba, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x3e, 0x85, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9a, 0x9e, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x40, 0x65, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x41, 0x83, 0xba, 0x80, 0x00, 0x00, 0x00, 0x00, 0x42, 0x45, 0x87, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x43, 0x63, 0x9c, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x44, 0x2e, 0xa3, 0x80, 0x00, 0x00, 0x00, 0x00, 0x45, 0x43, 0x7e, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x46, 0x05, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x47, 0x23, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x47, 0xf7, 0xa2, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x48, 0xe7, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x49, 0xd7, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xc7, 0x75, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x4b, 0xb7, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x4c, 0xa7, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x97, 0x48, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x4e, 0x87, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x4f, 0x77, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x70, 0x55, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x51, 0x60, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x52, 0x50, 0x37, 0x80, 0x00, 0x00, 0x00, 0x00, 0x53, 0x40, 0x28, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x54, 0x30, 0x19, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x55, 0x20, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x56, 0x0f, 0xfb, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x56, 0xff, 0xec, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x57, 0xef, 0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x58, 0xdf, 0xce, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x59, 0xcf, 0xbf, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x5a, 0xbf, 0xb0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xb8, 0xdc, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x5c, 0xa8, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x5d, 0x98, 0xbe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x88, 0xaf, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x5f, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x60, 0x68, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x58, 0x82, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x62, 0x48, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x63, 0x38, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x28, 0x55, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x65, 0x18, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x66, 0x11, 0x71, 0x80, 0x00, 0x00, 0x00, 0x00, 0x67, 0x01, 0x62, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x67, 0xf1, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x68, 0xe1, 0x44, 0x80, 0x00, 0x00, 0x00, 0x00, 0x69, 0xd1, 0x35, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x6a, 0xc1, 0x26, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x6b, 0xb1, 0x17, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xa1, 0x08, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x6d, 0x90, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x6e, 0x80, 0xea, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x70, 0xdb, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x70, 0x6a, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x71, 0x59, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x49, 0xe9, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x73, 0x39, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x74, 0x29, 0xcb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x19, 0xbc, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x76, 0x09, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x76, 0xf9, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0xe9, 0x8f, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x78, 0xd9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x79, 0xc9, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0xb9, 0x62, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x7b, 0xb2, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x7c, 0xa2, 0x7e, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7d, 0x92, 0x6f, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x7e, 0x82, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x7f, 0x72, 0x51, 0x80, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, - 0x01, 0x02, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, - 0x03, 0x04, 0x03, 0x00, 0x00, 0x8d, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x9a, - 0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x9a, - 0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54, - 0x00, 0x41, 0x45, 0x44, 0x54, 0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00, - 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x41, 0x45, - 0x53, 0x54, 0x2d, 0x31, 0x30, 0x41, 0x45, 0x44, 0x54, 0x2c, 0x4d, 0x31, - 0x30, 0x2e, 0x31, 0x2e, 0x30, 0x2c, 0x4d, 0x34, 0x2e, 0x31, 0x2e, 0x30, - 0x2f, 0x33, 0x0a -}; -unsigned int Australia_Sydney_len = 2223; diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time.cc index 1ec2026e25..7256a699d2 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time.cc @@ -297,7 +297,7 @@ timespec ToTimespec(Time t) { timespec ts; absl::Duration d = time_internal::ToUnixDuration(t); if (!time_internal::IsInfiniteDuration(d)) { - ts.tv_sec = time_internal::GetRepHi(d); + ts.tv_sec = static_cast(time_internal::GetRepHi(d)); if (ts.tv_sec == time_internal::GetRepHi(d)) { // no time_t narrowing ts.tv_nsec = time_internal::GetRepLo(d) / 4; // floor return ts; @@ -316,7 +316,7 @@ timespec ToTimespec(Time t) { timeval ToTimeval(Time t) { timeval tv; timespec ts = absl::ToTimespec(t); - tv.tv_sec = ts.tv_sec; + tv.tv_sec = static_cast(ts.tv_sec); if (tv.tv_sec != ts.tv_sec) { // narrowing if (ts.tv_sec < 0) { tv.tv_sec = std::numeric_limits::min(); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time.h index 5abd815a79..11796b4f0c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time.h @@ -120,7 +120,7 @@ using EnableIfFloat = // Duration // -// The `absl::Duration` class represents a signed, fixed-length span of time. +// The `absl::Duration` class represents a signed, fixed-length amount of time. // A `Duration` is generated using a unit-specific factory function, or is // the result of subtracting one `absl::Time` from another. Durations behave // like unit-safe integers and they support all the natural integer-like @@ -162,7 +162,7 @@ class Duration { constexpr Duration() : rep_hi_(0), rep_lo_(0) {} // zero-length duration // Copyable. -#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1910 +#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1930 // Explicitly defining the constexpr copy constructor avoids an MSVC bug. constexpr Duration(const Duration& d) : rep_hi_(d.rep_hi_), rep_lo_(d.rep_lo_) {} @@ -495,7 +495,7 @@ ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Seconds(Duration d); ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Minutes(Duration d); ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Hours(Duration d); -// ToDoubleNanoSeconds() +// ToDoubleNanoseconds() // ToDoubleMicroseconds() // ToDoubleMilliseconds() // ToDoubleSeconds() @@ -579,7 +579,7 @@ bool ParseDuration(absl::string_view dur_string, Duration* d); // AbslParseFlag() // -// Parses a command-line flag string representation `text` into a a Duration +// Parses a command-line flag string representation `text` into a Duration // value. Duration flags must be specified in a format that is valid input for // `absl::ParseDuration()`. bool AbslParseFlag(absl::string_view text, Duration* dst, std::string* error); @@ -750,23 +750,24 @@ constexpr Time UnixEpoch() { return Time(); } constexpr Time UniversalEpoch() { // 719162 is the number of days from 0001-01-01 to 1970-01-01, // assuming the Gregorian calendar. - return Time(time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, 0U)); + return Time( + time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, uint32_t{0})); } // InfiniteFuture() // // Returns an `absl::Time` that is infinitely far in the future. constexpr Time InfiniteFuture() { - return Time( - time_internal::MakeDuration((std::numeric_limits::max)(), ~0U)); + return Time(time_internal::MakeDuration((std::numeric_limits::max)(), + ~uint32_t{0})); } // InfinitePast() // // Returns an `absl::Time` that is infinitely far in the past. constexpr Time InfinitePast() { - return Time( - time_internal::MakeDuration((std::numeric_limits::min)(), ~0U)); + return Time(time_internal::MakeDuration((std::numeric_limits::min)(), + ~uint32_t{0})); } // FromUnixNanos() @@ -1422,14 +1423,17 @@ constexpr int64_t GetRepHi(Duration d) { return d.rep_hi_; } constexpr uint32_t GetRepLo(Duration d) { return d.rep_lo_; } // Returns true iff d is positive or negative infinity. -constexpr bool IsInfiniteDuration(Duration d) { return GetRepLo(d) == ~0U; } +constexpr bool IsInfiniteDuration(Duration d) { + return GetRepLo(d) == ~uint32_t{0}; +} // Returns an infinite Duration with the opposite sign. // REQUIRES: IsInfiniteDuration(d) constexpr Duration OppositeInfinity(Duration d) { return GetRepHi(d) < 0 - ? MakeDuration((std::numeric_limits::max)(), ~0U) - : MakeDuration((std::numeric_limits::min)(), ~0U); + ? MakeDuration((std::numeric_limits::max)(), ~uint32_t{0}) + : MakeDuration((std::numeric_limits::min)(), + ~uint32_t{0}); } // Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow. @@ -1568,7 +1572,7 @@ constexpr Duration operator-(Duration d) { constexpr Duration InfiniteDuration() { return time_internal::MakeDuration((std::numeric_limits::max)(), - ~0U); + ~uint32_t{0}); } constexpr Duration FromChrono(const std::chrono::nanoseconds& d) { diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time_test.cc index cde9423feb..d235e9ad0a 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/time/time_test.cc @@ -377,6 +377,11 @@ TEST(Time, FloorConversion) { } TEST(Time, RoundtripConversion) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + #define TEST_CONVERSION_ROUND_TRIP(SOURCE, FROM, TO, MATCHER) \ EXPECT_THAT(TO(FROM(SOURCE)), MATCHER(SOURCE)) @@ -558,6 +563,11 @@ TEST(Time, FromChrono) { } TEST(Time, ToChronoTime) { +#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \ + ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT + GTEST_SKIP(); +#endif + EXPECT_EQ(std::chrono::system_clock::from_time_t(-1), absl::ToChronoTime(absl::FromTimeT(-1))); EXPECT_EQ(std::chrono::system_clock::from_time_t(0), diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/CMakeLists.txt b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/CMakeLists.txt index d7e8614e0d..830953aece 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/CMakeLists.txt +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/CMakeLists.txt @@ -43,6 +43,7 @@ absl_cc_library( PUBLIC ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME bad_any_cast_impl @@ -239,6 +240,7 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly. absl_cc_library( NAME conformance_testing diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/any.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/any.h index fc5a07469f..204da26db8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/any.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/any.h @@ -81,18 +81,9 @@ ABSL_NAMESPACE_END #include #include "absl/base/internal/fast_type_id.h" -#include "absl/base/macros.h" #include "absl/meta/type_traits.h" #include "absl/types/bad_any_cast.h" -// NOTE: This macro is an implementation detail that is undefined at the bottom -// of the file. It is not intended for expansion directly from user code. -#ifdef ABSL_ANY_DETAIL_HAS_RTTI -#error ABSL_ANY_DETAIL_HAS_RTTI cannot be directly set -#elif !defined(__GNUC__) || defined(__GXX_RTTI) -#define ABSL_ANY_DETAIL_HAS_RTTI 1 -#endif // !defined(__GNUC__) || defined(__GXX_RTTI) - namespace absl { ABSL_NAMESPACE_BEGIN @@ -348,7 +339,7 @@ class any { // returns `false`. bool has_value() const noexcept { return obj_ != nullptr; } -#if ABSL_ANY_DETAIL_HAS_RTTI +#ifdef ABSL_INTERNAL_HAS_RTTI // Returns: typeid(T) if *this has a contained object of type T, otherwise // typeid(void). const std::type_info& type() const noexcept { @@ -358,7 +349,7 @@ class any { return typeid(void); } -#endif // ABSL_ANY_DETAIL_HAS_RTTI +#endif // ABSL_INTERNAL_HAS_RTTI private: // Tagged type-erased abstraction for holding a cloneable object. @@ -367,9 +358,9 @@ class any { virtual ~ObjInterface() = default; virtual std::unique_ptr Clone() const = 0; virtual const void* ObjTypeId() const noexcept = 0; -#if ABSL_ANY_DETAIL_HAS_RTTI +#ifdef ABSL_INTERNAL_HAS_RTTI virtual const std::type_info& Type() const noexcept = 0; -#endif // ABSL_ANY_DETAIL_HAS_RTTI +#endif // ABSL_INTERNAL_HAS_RTTI }; // Hold a value of some queryable type, with an ability to Clone it. @@ -386,9 +377,9 @@ class any { const void* ObjTypeId() const noexcept final { return IdForType(); } -#if ABSL_ANY_DETAIL_HAS_RTTI +#ifdef ABSL_INTERNAL_HAS_RTTI const std::type_info& Type() const noexcept final { return typeid(T); } -#endif // ABSL_ANY_DETAIL_HAS_RTTI +#endif // ABSL_INTERNAL_HAS_RTTI T value; }; @@ -521,8 +512,6 @@ T* any_cast(any* operand) noexcept { ABSL_NAMESPACE_END } // namespace absl -#undef ABSL_ANY_DETAIL_HAS_RTTI - #endif // ABSL_USES_STD_ANY #endif // ABSL_TYPES_ANY_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/any_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/any_test.cc index 70e4ba22b1..d382b927c2 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/any_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/any_test.cc @@ -754,26 +754,23 @@ TEST(AnyTest, FailedCopy) { // Test the guarantees regarding exceptions in emplace. TEST(AnyTest, FailedEmplace) { - { - BadCopyable bad; - absl::any target; - ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace(bad)); - } + BadCopyable bad; + absl::any target; + ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace(bad)); +} - { - BadCopyable bad; - absl::any target(absl::in_place_type); - ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace(bad)); -#if defined(ABSL_USES_STD_ANY) && defined(__GLIBCXX__) - // libstdc++ std::any::emplace() implementation (as of 7.2) has a bug: if an - // exception is thrown, *this contains a value. -#define ABSL_GLIBCXX_ANY_EMPLACE_EXCEPTION_BUG 1 +// GCC and Clang have a bug here. +// Ine some cases, the exception seems to be thrown at the wrong time, and +// target may contain a value. +#ifdef __GNUC__ +TEST(AnyTest, DISABLED_FailedEmplaceInPlace) { +#else +TEST(AnyTest, FailedEmplaceInPlace) { #endif -#if defined(ABSL_HAVE_EXCEPTIONS) && \ - !defined(ABSL_GLIBCXX_ANY_EMPLACE_EXCEPTION_BUG) - EXPECT_FALSE(target.has_value()); -#endif - } + BadCopyable bad; + absl::any target(absl::in_place_type); + ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace(bad)); + EXPECT_FALSE(target.has_value()); } } // namespace diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/conformance_profile.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/conformance_profile.h index cf64ff4fcd..37b017db47 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/conformance_profile.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/conformance_profile.h @@ -719,6 +719,7 @@ struct SyntacticConformanceProfileOf { type##_support); \ ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(bool, is_##type) +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(default_constructible); ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(move_constructible); ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(copy_constructible); @@ -733,6 +734,7 @@ ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_equal_comparable); ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_than_comparable); ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(swappable); ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(hashable); +#endif #undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF #undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/optional.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/optional.h index 92932b6001..6ed0c6699c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/optional.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/optional.h @@ -91,7 +91,15 @@ class optional_data_dtor_base { void destruct() noexcept { if (engaged_) { + // `data_` must be initialized if `engaged_` is true. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif data_.~T(); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif engaged_ = false; } } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/span.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/span.h index 112612f4bd..d653bb2c0c 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/span.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/span.h @@ -28,10 +28,10 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace span_internal { -// A constexpr min function -constexpr size_t Min(size_t a, size_t b) noexcept { return a < b ? a : b; } +template +class Span; +namespace span_internal { // Wrappers for access to container data pointers. template constexpr auto GetDataImpl(C& c, char) noexcept // NOLINT(runtime/references) @@ -121,6 +121,36 @@ struct IsConvertible : IsConvertibleHelper::type {}; template using EnableIfConvertibleTo = typename std::enable_if::value>::type; + +// IsView is true for types where the return type of .data() is the same for +// mutable and const instances. This isn't foolproof, but it's only used to +// enable a compiler warning. +template +struct IsView { + static constexpr bool value = false; +}; + +template +struct IsView< + T, absl::void_t()))>, + absl::void_t()))>> { + private: + using Container = std::remove_const_t; + using ConstData = + decltype(span_internal::GetData(std::declval())); + using MutData = decltype(span_internal::GetData(std::declval())); + public: + static constexpr bool value = std::is_same::value; +}; + +// These enablers result in 'int' so they can be used as typenames or defaults +// in template paramters lists. +template +using EnableIfIsView = std::enable_if_t::value, int>; + +template +using EnableIfNotIsView = std::enable_if_t::value, int>; + } // namespace span_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/variant.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/variant.h index 772008c74e..c82ded44f8 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/variant.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/internal/variant.h @@ -16,8 +16,8 @@ // separate file to avoid cluttering the top of the API header with // implementation details. -#ifndef ABSL_TYPES_variant_internal_H_ -#define ABSL_TYPES_variant_internal_H_ +#ifndef ABSL_TYPES_INTERNAL_VARIANT_H_ +#define ABSL_TYPES_INTERNAL_VARIANT_H_ #include #include @@ -449,7 +449,7 @@ struct FlattenIndices; template struct FlattenIndices { - template + template static constexpr std::size_t Run(std::size_t head, SizeType... tail) { return head + HeadSize * FlattenIndices::Run(tail...); } @@ -498,8 +498,8 @@ struct VisitIndicesVariadicImpl, EndIndices...> { }; template - static VisitIndicesResultT Run( - Op&& op, SizeType... i) { + static VisitIndicesResultT Run(Op&& op, + SizeType... i) { return VisitIndicesSwitch::value>::Run( FlattenedOp{absl::forward(op)}, FlattenIndices<(EndIndices + std::size_t{1})...>::Run( @@ -683,13 +683,13 @@ struct VariantCoreAccess { variant_internal::IndexOfConstructedType; void operator()(SizeT /*old_i*/ - ) const { + ) const { Access(*left) = absl::forward(other); } template void operator()(SizeT /*old_i*/ - ) const { + ) const { using New = typename absl::variant_alternative::type; if (std::is_nothrow_constructible::value || @@ -868,18 +868,6 @@ struct IsNeitherSelfNorInPlace> : std::false_type {}; template struct IsNeitherSelfNorInPlace> : std::false_type {}; -template -struct ConversionIsPossibleImpl : std::false_type {}; - -template -struct ConversionIsPossibleImpl< - Variant, T, - void_t::Run(std::declval(), {}))>> - : std::true_type {}; - -template -struct ConversionIsPossible : ConversionIsPossibleImpl::type {}; - template struct IndexOfConstructedType< Variant, T, @@ -1151,16 +1139,16 @@ struct VariantHelper> { // Type metafunction which returns the element type selected if // OverloadSet::Overload() is well-formed when called with argument type U. template - using BestMatch = decltype( - variant_internal::OverloadSet::Overload(std::declval())); + using BestMatch = decltype(variant_internal::OverloadSet::Overload( + std::declval())); // Type metafunction which returns true if OverloadSet::Overload() is // well-formed when called with argument type U. // CanAccept can't be just an alias because there is a MSVC bug on parameter // pack expansion involving decltype. template - struct CanAccept : - std::integral_constant>::value> {}; + struct CanAccept + : std::integral_constant>::value> {}; // Type metafunction which returns true if Other is an instantiation of // variant, and variants's converting constructor from Other will be @@ -1183,8 +1171,8 @@ struct TrivialMoveOnly { // A union's defaulted copy/move constructor is deleted if any variant member's // copy/move constructor is nontrivial. template -struct IsTriviallyMoveConstructible: - std::is_move_constructible> {}; +struct IsTriviallyMoveConstructible + : std::is_move_constructible> {}; // To guarantee triviality of all special-member functions that can be trivial, // we use a chain of conditional bases for each one. @@ -1419,14 +1407,14 @@ class VariantMoveAssignBaseNontrivial : protected VariantCopyBase { VariantMoveAssignBaseNontrivial& operator=( VariantMoveAssignBaseNontrivial const&) = default; - VariantMoveAssignBaseNontrivial& - operator=(VariantMoveAssignBaseNontrivial&& other) noexcept( - absl::conjunction..., - std::is_nothrow_move_assignable...>::value) { - VisitIndices::Run( - VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_); - return *this; - } + VariantMoveAssignBaseNontrivial& + operator=(VariantMoveAssignBaseNontrivial&& other) noexcept( + absl::conjunction..., + std::is_nothrow_move_assignable...>::value) { + VisitIndices::Run( + VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_); + return *this; + } protected: using Base::index_; @@ -1450,12 +1438,12 @@ class VariantCopyAssignBaseNontrivial : protected VariantMoveAssignBase { VariantCopyAssignBaseNontrivial& operator=( VariantCopyAssignBaseNontrivial&&) = default; - VariantCopyAssignBaseNontrivial& operator=( - const VariantCopyAssignBaseNontrivial& other) { - VisitIndices::Run( - VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_); - return *this; - } + VariantCopyAssignBaseNontrivial& operator=( + const VariantCopyAssignBaseNontrivial& other) { + VisitIndices::Run( + VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_); + return *this; + } protected: using Base::index_; @@ -1643,4 +1631,4 @@ ABSL_NAMESPACE_END } // namespace absl #endif // !defined(ABSL_USES_STD_VARIANT) -#endif // ABSL_TYPES_variant_internal_H_ +#endif // ABSL_TYPES_INTERNAL_VARIANT_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/optional.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/optional.h index 61540cfdb2..134b2aff42 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/optional.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/optional.h @@ -282,15 +282,16 @@ class optional : private optional_internal::optional_data, optional& operator=(optional&& src) = default; // Value assignment operators - template < - typename U = T, - typename = typename std::enable_if, typename std::decay::type>>, - absl::negation< - absl::conjunction, - std::is_same::type>>>, - std::is_constructible, std::is_assignable>::value>::type> + template , typename std::decay::type> >, + absl::negation, + std::is_same::type> > >, + std::is_constructible, + std::is_assignable >::value>::type> optional& operator=(U&& v) { this->assign(std::forward(v)); return *this; @@ -298,13 +299,14 @@ class optional : private optional_internal::optional_data, template < typename U, + int&..., // Workaround an internal compiler error in GCC 5 to 10. typename = typename std::enable_if>, + absl::negation >, std::is_constructible, std::is_assignable, absl::negation< optional_internal:: is_constructible_convertible_assignable_from_optional< - T, U>>>::value>::type> + T, U> > >::value>::type> optional& operator=(const optional& rhs) { if (rhs) { this->assign(*rhs); @@ -315,13 +317,14 @@ class optional : private optional_internal::optional_data, } template >, std::is_constructible, - std::is_assignable, + absl::negation >, + std::is_constructible, std::is_assignable, absl::negation< optional_internal:: is_constructible_convertible_assignable_from_optional< - T, U>>>::value>::type> + T, U> > >::value>::type> optional& operator=(optional&& rhs) { if (rhs) { this->assign(std::move(*rhs)); diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/optional_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/optional_test.cc index 7ef142cb99..21653a903e 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/optional_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/optional_test.cc @@ -27,6 +27,37 @@ #include "absl/meta/type_traits.h" #include "absl/strings/string_view.h" +#if defined(__cplusplus) && __cplusplus >= 202002L +// In C++20, volatile-qualified return types are deprecated. +#define ABSL_VOLATILE_RETURN_TYPES_DEPRECATED 1 +#endif + +// The following types help test an internal compiler error in GCC5 though +// GCC10. The case OptionalTest.InternalCompilerErrorInGcc5ToGcc10 crashes the +// compiler without a workaround. This test case should remain at the beginning +// of the file as the internal compiler error is sensitive to other constructs +// in this file. +template +using GccIceHelper1 = T; +template +struct GccIceHelper2 {}; +template +class GccIce { + template &, U>> + GccIce& operator=(GccIceHelper2 const&) {} +}; + +TEST(OptionalTest, InternalCompilerErrorInGcc5ToGcc10) { + GccIce instantiate_ice_with_same_type_as_optional; + static_cast(instantiate_ice_with_same_type_as_optional); + absl::optional val1; + absl::optional val2; + val1 = val2; +} + struct Hashable {}; namespace std { @@ -205,6 +236,7 @@ TEST(optionalTest, CopyConstructor) { EXPECT_TRUE(opt42_copy); EXPECT_EQ(42, *opt42_copy); } +#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) { absl::optional empty, opt42 = 42; absl::optional empty_copy(empty); @@ -213,6 +245,7 @@ TEST(optionalTest, CopyConstructor) { EXPECT_TRUE(opt42_copy); EXPECT_EQ(42, *opt42_copy); } +#endif // test copyablility EXPECT_TRUE(std::is_copy_constructible>::value); EXPECT_TRUE(std::is_copy_constructible>::value); @@ -224,18 +257,11 @@ TEST(optionalTest, CopyConstructor) { EXPECT_FALSE( absl::is_trivially_copy_constructible>::value); -#if defined(ABSL_USES_STD_OPTIONAL) && defined(__GLIBCXX__) - // libstdc++ std::optional implementation (as of 7.2) has a bug: when T is - // trivially copyable, optional is not trivially copyable (due to one of - // its base class is unconditionally nontrivial). -#define ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG 1 -#endif -#ifndef ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG EXPECT_TRUE( absl::is_trivially_copy_constructible>::value); EXPECT_TRUE( absl::is_trivially_copy_constructible>::value); -#ifndef _MSC_VER +#if !defined(_MSC_VER) && !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) // See defect report "Trivial copy/move constructor for class with volatile // member" at // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#2094 @@ -244,8 +270,7 @@ TEST(optionalTest, CopyConstructor) { // Also a cv-qualified scalar type should be trivially copyable. EXPECT_TRUE(absl::is_trivially_copy_constructible< absl::optional>::value); -#endif // _MSC_VER -#endif // ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG +#endif // !defined(_MSC_VER) && !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) // constexpr copy constructor for trivially copyable types { @@ -275,17 +300,10 @@ TEST(optionalTest, CopyConstructor) { EXPECT_TRUE(absl::is_trivially_copy_constructible< absl::optional>::value); #endif - // When testing with VS 2017 15.3, there seems to be a bug in MSVC - // std::optional when T is volatile-qualified. So skipping this test. - // Bug report: - // https://connect.microsoft.com/VisualStudio/feedback/details/3142534 -#if defined(ABSL_USES_STD_OPTIONAL) && defined(_MSC_VER) && _MSC_VER >= 1911 -#define ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG 1 -#endif -#ifndef ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG +#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) EXPECT_FALSE(std::is_copy_constructible< absl::optional>::value); -#endif +#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) } } @@ -305,11 +323,9 @@ TEST(optionalTest, MoveConstructor) { EXPECT_FALSE(std::is_move_constructible>::value); // test noexcept EXPECT_TRUE(std::is_nothrow_move_constructible>::value); -#ifndef ABSL_USES_STD_OPTIONAL EXPECT_EQ( absl::default_allocator_is_nothrow::value, std::is_nothrow_move_constructible>::value); -#endif EXPECT_TRUE(std::is_nothrow_move_constructible< absl::optional>::value); } @@ -638,8 +654,7 @@ TEST(optionalTest, CopyAssignment) { EXPECT_TRUE(absl::is_copy_assignable::value); EXPECT_FALSE(absl::is_trivially_copy_assignable::value); - // std::optional doesn't support volatile nontrivial types. -#ifndef ABSL_USES_STD_OPTIONAL +#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) { StructorListener listener; Listenable::listener = &listener; @@ -658,7 +673,7 @@ TEST(optionalTest, CopyAssignment) { EXPECT_EQ(1, listener.destruct); EXPECT_EQ(1, listener.volatile_copy_assign); } -#endif // ABSL_USES_STD_OPTIONAL +#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) } TEST(optionalTest, MoveAssignment) { @@ -681,8 +696,7 @@ TEST(optionalTest, MoveAssignment) { EXPECT_EQ(1, listener.destruct); EXPECT_EQ(1, listener.move_assign); } - // std::optional doesn't support volatile nontrivial types. -#ifndef ABSL_USES_STD_OPTIONAL +#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) { StructorListener listener; Listenable::listener = &listener; @@ -702,7 +716,7 @@ TEST(optionalTest, MoveAssignment) { EXPECT_EQ(1, listener.destruct); EXPECT_EQ(1, listener.volatile_move_assign); } -#endif // ABSL_USES_STD_OPTIONAL +#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) EXPECT_FALSE(absl::is_move_assignable>::value); EXPECT_TRUE(absl::is_move_assignable>::value); EXPECT_TRUE(absl::is_move_assignable>::value); @@ -974,8 +988,8 @@ TEST(optionalTest, PointerStuff) { EXPECT_EQ("foo", *opt); const auto& opt_const = opt; EXPECT_EQ("foo", *opt_const); - EXPECT_EQ(opt->size(), 3); - EXPECT_EQ(opt_const->size(), 3); + EXPECT_EQ(opt->size(), 3u); + EXPECT_EQ(opt_const->size(), 3u); constexpr absl::optional opt1(1); static_assert((*opt1).x == ConstexprType::kCtorInt, ""); @@ -1038,6 +1052,7 @@ TEST(optionalTest, Value) { #endif EXPECT_EQ("c&&", TypeQuals(OC(absl::in_place, "xvalue_c").value())); +#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) // test on volatile type using OV = absl::optional; OV lvalue_v(absl::in_place, 42); @@ -1045,6 +1060,7 @@ TEST(optionalTest, Value) { EXPECT_EQ(42, OV(42).value()); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); +#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) // test exception throw on value() absl::optional empty; @@ -1087,6 +1103,7 @@ TEST(optionalTest, DerefOperator) { #endif EXPECT_EQ("c&&", TypeQuals(*OC(absl::in_place, "xvalue_c"))); +#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) // test on volatile type using OV = absl::optional; OV lvalue_v(absl::in_place, 42); @@ -1094,6 +1111,7 @@ TEST(optionalTest, DerefOperator) { EXPECT_EQ(42, *OV(42)); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); +#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED) constexpr absl::optional opt1(1); static_assert(*opt1 == 1, ""); @@ -1505,7 +1523,7 @@ TEST(optionalTest, Hash) { for (int i = 0; i < 100; ++i) { hashcodes.insert(hash(i)); } - EXPECT_GT(hashcodes.size(), 90); + EXPECT_GT(hashcodes.size(), 90u); static_assert(is_hash_enabled_for>::value, ""); static_assert(is_hash_enabled_for>::value, ""); @@ -1558,12 +1576,10 @@ TEST(optionalTest, NoExcept) { static_assert( std::is_nothrow_move_constructible>::value, ""); -#ifndef ABSL_USES_STD_OPTIONAL static_assert(absl::default_allocator_is_nothrow::value == std::is_nothrow_move_constructible< absl::optional>::value, ""); -#endif std::vector> v; for (int i = 0; i < 10; ++i) v.emplace_back(); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/span.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/span.h index 6272bb7ad1..d7bdbb1fb5 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/span.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/span.h @@ -60,6 +60,7 @@ #include #include +#include "absl/base/attributes.h" #include "absl/base/internal/throw_delegate.h" #include "absl/base/macros.h" #include "absl/base/optimization.h" @@ -160,12 +161,12 @@ class Span { // Used to SFINAE-enable a function when the slice elements are const. template - using EnableIfConstView = + using EnableIfValueIsConst = typename std::enable_if::value, U>::type; // Used to SFINAE-enable a function when the slice elements are mutable. template - using EnableIfMutableView = + using EnableIfValueIsMutable = typename std::enable_if::value, U>::type; public: @@ -196,13 +197,34 @@ class Span { // Explicit reference constructor for a mutable `Span` type. Can be // replaced with MakeSpan() to infer the type parameter. template , - typename = EnableIfMutableView> - explicit Span(V& v) noexcept // NOLINT(runtime/references) + typename = EnableIfValueIsMutable, + typename = span_internal::EnableIfNotIsView> + explicit Span( + V& v + ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept // NOLINT(runtime/references) : Span(span_internal::GetData(v), v.size()) {} // Implicit reference constructor for a read-only `Span` type template , - typename = EnableIfConstView> + typename = EnableIfValueIsConst, + typename = span_internal::EnableIfNotIsView> + constexpr Span( + const V& v + ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept // NOLINT(runtime/explicit) + : Span(span_internal::GetData(v), v.size()) {} + + // Overloads of the above two functions that are only enabled for view types. + // This is so we can drop the ABSL_ATTRIBUTE_LIFETIME_BOUND annotation. These + // overloads must be made unique by using a different template parameter list + // (hence the = 0 for the IsView enabler). + template , + typename = EnableIfValueIsMutable, + span_internal::EnableIfIsView = 0> + explicit Span(V& v) noexcept // NOLINT(runtime/references) + : Span(span_internal::GetData(v), v.size()) {} + template , + typename = EnableIfValueIsConst, + span_internal::EnableIfIsView = 0> constexpr Span(const V& v) noexcept // NOLINT(runtime/explicit) : Span(span_internal::GetData(v), v.size()) {} @@ -242,7 +264,7 @@ class Span { // Process(ints); // template > + typename = EnableIfValueIsConst> Span(std::initializer_list v ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept // NOLINT(runtime/explicit) : Span(v.begin(), v.size()) {} @@ -398,7 +420,7 @@ class Span { // absl::MakeSpan(vec).subspan(5); // throws std::out_of_range constexpr Span subspan(size_type pos = 0, size_type len = npos) const { return (pos <= size()) - ? Span(data() + pos, span_internal::Min(size() - pos, len)) + ? Span(data() + pos, (std::min)(size() - pos, len)) : (base_internal::ThrowStdOutOfRange("pos > size()"), Span()); } @@ -664,7 +686,8 @@ constexpr Span MakeSpan(T* ptr, size_t size) noexcept { template Span MakeSpan(T* begin, T* end) noexcept { - return ABSL_HARDENING_ASSERT(begin <= end), Span(begin, end - begin); + return ABSL_HARDENING_ASSERT(begin <= end), + Span(begin, static_cast(end - begin)); } template diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/variant_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/variant_test.cc index cf237334da..4cd5b7a358 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/variant_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/types/variant_test.cc @@ -281,7 +281,7 @@ TEST(VariantTest, TestDefaultConstructor) { using X = variant; constexpr variant x{}; ASSERT_FALSE(x.valueless_by_exception()); - ASSERT_EQ(0, x.index()); + ASSERT_EQ(0u, x.index()); EXPECT_EQ(0, absl::get<0>(x)); EXPECT_TRUE(std::is_nothrow_default_constructible::value); } @@ -290,7 +290,7 @@ TEST(VariantTest, TestDefaultConstructor) { using X = variant; X x{}; ASSERT_FALSE(x.valueless_by_exception()); - ASSERT_EQ(0, x.index()); + ASSERT_EQ(0u, x.index()); EXPECT_EQ(5, absl::get<0>(x).value); EXPECT_FALSE(std::is_nothrow_default_constructible::value); } @@ -299,7 +299,7 @@ TEST(VariantTest, TestDefaultConstructor) { using X = variant; X x{}; ASSERT_FALSE(x.valueless_by_exception()); - ASSERT_EQ(0, x.index()); + ASSERT_EQ(0u, x.index()); EXPECT_EQ(0, absl::get<0>(x)); EXPECT_TRUE(std::is_nothrow_default_constructible::value); } @@ -308,7 +308,7 @@ TEST(VariantTest, TestDefaultConstructor) { using X = variant; X x{}; ASSERT_FALSE(x.valueless_by_exception()); - ASSERT_EQ(0, x.index()); + ASSERT_EQ(0u, x.index()); EXPECT_EQ(5, absl::get<0>(x).value); EXPECT_FALSE(std::is_nothrow_default_constructible::value); } @@ -480,7 +480,7 @@ TEST(VariantTest, InPlaceType) { ASSERT_TRUE(absl::holds_alternative(v2)); EXPECT_EQ("ABC", absl::get(v2)); - Var v3(in_place_type_t(), "ABC", 2); + Var v3(in_place_type_t(), "ABC", 2u); ASSERT_TRUE(absl::holds_alternative(v3)); EXPECT_EQ("AB", absl::get(v3)); @@ -503,7 +503,7 @@ TEST(VariantTest, InPlaceTypeVariableTemplate) { ASSERT_TRUE(absl::holds_alternative(v2)); EXPECT_EQ("ABC", absl::get(v2)); - Var v3(in_place_type, "ABC", 2); + Var v3(in_place_type, "ABC", 2u); ASSERT_TRUE(absl::holds_alternative(v3)); EXPECT_EQ("AB", absl::get(v3)); @@ -544,7 +544,7 @@ TEST(VariantTest, InPlaceIndex) { ASSERT_TRUE(absl::holds_alternative(v2)); EXPECT_EQ("ABC", absl::get(v2)); - Var v3(in_place_index_t<1>(), "ABC", 2); + Var v3(in_place_index_t<1>(), "ABC", 2u); ASSERT_TRUE(absl::holds_alternative(v3)); EXPECT_EQ("AB", absl::get(v3)); @@ -571,7 +571,7 @@ TEST(VariantTest, InPlaceIndexVariableTemplate) { ASSERT_TRUE(absl::holds_alternative(v2)); EXPECT_EQ("ABC", absl::get(v2)); - Var v3(in_place_index<1>, "ABC", 2); + Var v3(in_place_index<1>, "ABC", 2u); ASSERT_TRUE(absl::holds_alternative(v3)); EXPECT_EQ("AB", absl::get(v3)); @@ -688,11 +688,11 @@ TEST(VariantTest, TestSelfAssignment) { EXPECT_EQ(long_str, foo); variant so = long_str; - ASSERT_EQ(1, so.index()); + ASSERT_EQ(1u, so.index()); EXPECT_EQ(long_str, absl::get<1>(so)); so = *&so; - ASSERT_EQ(1, so.index()); + ASSERT_EQ(1u, so.index()); EXPECT_EQ(long_str, absl::get<1>(so)); } @@ -968,16 +968,16 @@ TEST(VariantTest, Index) { using Var = variant; Var v = 1; - EXPECT_EQ(0, v.index()); + EXPECT_EQ(0u, v.index()); v = "str"; - EXPECT_EQ(1, v.index()); + EXPECT_EQ(1u, v.index()); v = 0.; - EXPECT_EQ(2, v.index()); + EXPECT_EQ(2u, v.index()); Var v2 = v; - EXPECT_EQ(2, v2.index()); + EXPECT_EQ(2u, v2.index()); v2.emplace(3); - EXPECT_EQ(0, v2.index()); + EXPECT_EQ(0u, v2.index()); } TEST(VariantTest, NotValuelessByException) { @@ -1002,11 +1002,11 @@ TEST(VariantTest, IndexValuelessByException) { using Var = variant; Var v(absl::in_place_index<0>); - EXPECT_EQ(0, v.index()); + EXPECT_EQ(0u, v.index()); ToValuelessByException(v); EXPECT_EQ(absl::variant_npos, v.index()); v = "str"; - EXPECT_EQ(1, v.index()); + EXPECT_EQ(1u, v.index()); } TEST(VariantTest, ValuelessByException) { @@ -1084,18 +1084,18 @@ TEST(VariantTest, MemberSwap) { TEST(VariantTest, VariantSize) { { using Size1Variant = absl::variant; - EXPECT_EQ(1, absl::variant_size::value); - EXPECT_EQ(1, absl::variant_size::value); - EXPECT_EQ(1, absl::variant_size::value); - EXPECT_EQ(1, absl::variant_size::value); + EXPECT_EQ(1u, absl::variant_size::value); + EXPECT_EQ(1u, absl::variant_size::value); + EXPECT_EQ(1u, absl::variant_size::value); + EXPECT_EQ(1u, absl::variant_size::value); } { using Size3Variant = absl::variant; - EXPECT_EQ(3, absl::variant_size::value); - EXPECT_EQ(3, absl::variant_size::value); - EXPECT_EQ(3, absl::variant_size::value); - EXPECT_EQ(3, absl::variant_size::value); + EXPECT_EQ(3u, absl::variant_size::value); + EXPECT_EQ(3u, absl::variant_size::value); + EXPECT_EQ(3u, absl::variant_size::value); + EXPECT_EQ(3u, absl::variant_size::value); } } @@ -1799,14 +1799,14 @@ TEST(VariantTest, VisitSimple) { EXPECT_EQ("B", piece); struct StrLen { - int operator()(const char* s) const { return strlen(s); } - int operator()(const std::string& s) const { return s.size(); } + size_t operator()(const char* s) const { return strlen(s); } + size_t operator()(const std::string& s) const { return s.size(); } }; v = "SomeStr"; - EXPECT_EQ(7, absl::visit(StrLen{}, v)); + EXPECT_EQ(7u, absl::visit(StrLen{}, v)); v = std::string("VeryLargeThisTime"); - EXPECT_EQ(17, absl::visit(StrLen{}, v)); + EXPECT_EQ(17u, absl::visit(StrLen{}, v)); } TEST(VariantTest, VisitRValue) { @@ -1979,7 +1979,7 @@ TEST(VariantTest, MonostateBasic) { TEST(VariantTest, VariantMonostateDefaultConstruction) { absl::variant var; - EXPECT_EQ(var.index(), 0); + EXPECT_EQ(var.index(), 0u); } //////////////////////////////// @@ -2100,7 +2100,7 @@ TEST(VariantTest, Hash) { for (int i = 0; i < 100; ++i) { hashcodes.insert(hash(i)); } - EXPECT_GT(hashcodes.size(), 90); + EXPECT_GT(hashcodes.size(), 90u); // test const-qualified static_assert(type_traits_internal::IsHashable>::value, @@ -2312,9 +2312,9 @@ TEST(VariantTest, TestRvalueConversion) { EXPECT_EQ(42, absl::get(variant2)); variant2 = - ConvertVariantTo>(variant(42)); + ConvertVariantTo>(variant(42u)); ASSERT_TRUE(absl::holds_alternative(variant2)); - EXPECT_EQ(42, absl::get(variant2)); + EXPECT_EQ(42u, absl::get(variant2)); #endif // !ABSL_USES_STD_VARIANT variant variant3( @@ -2361,10 +2361,10 @@ TEST(VariantTest, TestLvalueConversion) { ASSERT_TRUE(absl::holds_alternative(variant2)); EXPECT_EQ(42, absl::get(variant2)); - variant source6(42); + variant source6(42u); variant2 = ConvertVariantTo>(source6); ASSERT_TRUE(absl::holds_alternative(variant2)); - EXPECT_EQ(42, absl::get(variant2)); + EXPECT_EQ(42u, absl::get(variant2)); #endif variant source7((Convertible1())); @@ -2455,8 +2455,8 @@ TEST(VariantTest, TestRvalueConversionViaConvertVariantTo) { EXPECT_THAT(absl::get_if(&variant2), Pointee(42)); variant2 = - ConvertVariantTo>(variant(42)); - EXPECT_THAT(absl::get_if(&variant2), Pointee(42)); + ConvertVariantTo>(variant(42u)); + EXPECT_THAT(absl::get_if(&variant2), Pointee(42u)); #endif variant variant3( @@ -2499,9 +2499,9 @@ TEST(VariantTest, TestLvalueConversionViaConvertVariantTo) { ConvertVariantTo>(source5)); EXPECT_THAT(absl::get_if(&variant2), Pointee(42)); - variant source6(42); + variant source6(42u); variant2 = ConvertVariantTo>(source6); - EXPECT_THAT(absl::get_if(&variant2), Pointee(42)); + EXPECT_THAT(absl::get_if(&variant2), Pointee(42u)); #endif // !ABSL_USES_STD_VARIANT variant source7((Convertible1())); @@ -2570,7 +2570,7 @@ TEST(VariantTest, TestVectorOfMoveonlyVariant) { vec.reserve(3); auto another_vec = absl::move(vec); // As a sanity check, verify vector contents. - ASSERT_EQ(2, another_vec.size()); + ASSERT_EQ(2u, another_vec.size()); EXPECT_EQ(42, *absl::get>(another_vec[0])); EXPECT_EQ("Hello", absl::get(another_vec[1])); } diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/utility/utility_test.cc b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/utility/utility_test.cc index f044ad644a..2f0509aa4d 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/utility/utility_test.cc +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/absl/utility/utility_test.cc @@ -1,4 +1,4 @@ -// Copyright 2017 The Abseil Authors. +// Copyright 2022 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,10 +14,12 @@ #include "absl/utility/utility.h" +#include #include #include #include #include +#include #include #include "gmock/gmock.h" @@ -35,10 +37,10 @@ namespace { // Both the unused variables and the name length warnings are due to calls // to absl::make_index_sequence with very large values, creating very long type // names. The resulting warnings are so long they make build output unreadable. -#pragma warning( push ) -#pragma warning( disable : 4503 ) // decorated name length exceeded -#pragma warning( disable : 4101 ) // unreferenced local variable -#endif // _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4503) // decorated name length exceeded +#pragma warning(disable : 4101) // unreferenced local variable +#endif // _MSC_VER using ::testing::ElementsAre; using ::testing::Pointee; @@ -227,8 +229,7 @@ TEST(ApplyTest, NonCopyableArgument) { } TEST(ApplyTest, NonCopyableResult) { - EXPECT_THAT(absl::apply(Factory, std::make_tuple(42)), - ::testing::Pointee(42)); + EXPECT_THAT(absl::apply(Factory, std::make_tuple(42)), Pointee(42)); } TEST(ApplyTest, VoidResult) { absl::apply(NoOp, std::tuple<>()); } @@ -373,4 +374,3 @@ TEST(MakeFromTupleTest, Pair) { } } // namespace - diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/absl_alternate_options.h b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/absl_alternate_options.h index 29b020d9fa..82d2ecf864 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/absl_alternate_options.h +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/absl_alternate_options.h @@ -15,8 +15,8 @@ // Alternate options.h file, used in continuous integration testing to exercise // option settings not used by default. -#ifndef ABSL_BASE_OPTIONS_H_ -#define ABSL_BASE_OPTIONS_H_ +#ifndef ABSL_CI_ABSL_ALTERNATE_OPTIONS_H_ +#define ABSL_CI_ABSL_ALTERNATE_OPTIONS_H_ #define ABSL_OPTION_USE_STD_ANY 0 #define ABSL_OPTION_USE_STD_OPTIONAL 0 @@ -26,4 +26,4 @@ #define ABSL_OPTION_INLINE_NAMESPACE_NAME ns #define ABSL_OPTION_HARDENED 1 -#endif // ABSL_BASE_OPTIONS_H_ +#endif // ABSL_CI_ABSL_ALTERNATE_OPTIONS_H_ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/cmake_common.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/cmake_common.sh index 51f310693e..372038a520 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/cmake_common.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/cmake_common.sh @@ -14,7 +14,7 @@ # The commit of GoogleTest to be used in the CMake tests in this directory. # Keep this in sync with the commit in the WORKSPACE file. -readonly ABSL_GOOGLETEST_COMMIT="8d51ffdfab10b3fba636ae69bc03da4b54f8c235" +readonly ABSL_GOOGLETEST_COMMIT="86add13493e5c881d7e4ba77fb91c1f57752b3a4" # Avoid depending on GitHub by looking for a cached copy of the commit first. if [[ -r "${KOKORO_GFILE_DIR:-}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip" ]]; then diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/cmake_install_test.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/cmake_install_test.sh index 97ed8478e0..ab3b86f042 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/cmake_install_test.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/cmake_install_test.sh @@ -29,6 +29,18 @@ source "${ABSEIL_ROOT}/ci/cmake_common.sh" source "${ABSEIL_ROOT}/ci/linux_docker_containers.sh" readonly DOCKER_CONTAINER=${LINUX_GCC_LATEST_CONTAINER} +# Verify that everything works with the standard "cmake && make && make install" +# without building tests or requiring GoogleTest. +time docker run \ + --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp-ro,readonly \ + --tmpfs=/buildfs:exec \ + --workdir=/buildfs \ + --rm \ + ${DOCKER_EXTRA_ARGS:-} \ + ${DOCKER_CONTAINER} \ + /bin/bash -c "cmake /abseil-cpp-ro && make -j$(nproc) && make install" + +# Verify that a more complicated project works. for link_type in ${LINK_TYPE}; do time docker run \ --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp-ro,readonly \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh index 5245933a5f..f9c146b05b 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh @@ -25,7 +25,7 @@ if [[ -z ${ABSEIL_ROOT:-} ]]; then fi if [[ -z ${STD:-} ]]; then - STD="c++11 c++14 c++17 c++20" + STD="c++14 c++17 c++20" fi if [[ -z ${COMPILATION_MODE:-} ]]; then @@ -70,13 +70,14 @@ for std in ${STD}; do --rm \ -e CC="/opt/llvm/clang/bin/clang" \ -e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \ - -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib" \ - -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/c++/v1" \ + -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu" \ + -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx/include/c++/v1" \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ /usr/local/bin/bazel test ... \ --compilation_mode="${compilation_mode}" \ --copt="${exceptions_mode}" \ + --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ --copt="-fsanitize=address" \ --copt="-fsanitize=float-divide-by-zero" \ --copt="-fsanitize=nullability" \ @@ -84,6 +85,7 @@ for std in ${STD}; do --copt="-fno-sanitize-blacklist" \ --copt=-Werror \ --distdir="/bazel-distdir" \ + --features=external_include_paths \ --keep_going \ --linkopt="-fsanitize=address" \ --linkopt="-fsanitize-link-c++-runtime" \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh index e0fe653de7..38b2d74401 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh @@ -25,7 +25,7 @@ if [[ -z ${ABSEIL_ROOT:-} ]]; then fi if [[ -z ${STD:-} ]]; then - STD="c++11 c++14 c++17 c++20" + STD="c++14 c++17 c++20" fi if [[ -z ${COMPILATION_MODE:-} ]]; then @@ -71,8 +71,8 @@ for std in ${STD}; do --rm \ -e CC="/opt/llvm/clang/bin/clang" \ -e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \ - -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib" \ - -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/c++/v1" \ + -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu" \ + -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx/include/c++/v1" \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ /bin/sh -c " @@ -83,9 +83,11 @@ for std in ${STD}; do /usr/local/bin/bazel test ... \ --compilation_mode=\"${compilation_mode}\" \ --copt=\"${exceptions_mode}\" \ + --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \ --copt=-Werror \ --define=\"absl=1\" \ --distdir=\"/bazel-distdir\" \ + --features=external_include_paths \ --keep_going \ --show_timestamps \ --test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh index 555f6b1c2a..5652ebe14d 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh @@ -25,7 +25,7 @@ if [[ -z ${ABSEIL_ROOT:-} ]]; then fi if [[ -z ${STD:-} ]]; then - STD="c++11 c++14 c++17 c++20" + STD="c++14 c++17 c++20" fi if [[ -z ${COMPILATION_MODE:-} ]]; then @@ -70,18 +70,20 @@ for std in ${STD}; do --rm \ -e CC="/opt/llvm/clang/bin/clang" \ -e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \ - -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx-tsan/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx-tsan/lib" \ - -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx-tsan/include/c++/v1" \ + -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx-tsan/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx-tsan/lib/x86_64-unknown-linux-gnu" \ + -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx-tsan/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx-tsan/include/c++/v1" \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ /usr/local/bin/bazel test ... \ --build_tag_filters="-notsan" \ --compilation_mode="${compilation_mode}" \ --copt="${exceptions_mode}" \ + --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ --copt="-fsanitize=thread" \ --copt="-fno-sanitize-blacklist" \ --copt=-Werror \ --distdir="/bazel-distdir" \ + --features=external_include_paths \ --keep_going \ --linkopt="-fsanitize=thread" \ --show_timestamps \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh index 36fdf82c1d..720e776deb 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh @@ -25,7 +25,7 @@ if [[ -z ${ABSEIL_ROOT:-} ]]; then fi if [[ -z ${STD:-} ]]; then - STD="c++11 c++14 c++17" + STD="c++14 c++17" fi if [[ -z ${COMPILATION_MODE:-} ]]; then @@ -75,10 +75,12 @@ for std in ${STD}; do /usr/local/bin/bazel test ... \ --compilation_mode="${compilation_mode}" \ --copt="--gcc-toolchain=/usr/local" \ + --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ --copt="${exceptions_mode}" \ --copt=-Werror \ --define="absl=1" \ --distdir="/bazel-distdir" \ + --features=external_include_paths \ --keep_going \ --linkopt="--gcc-toolchain=/usr/local" \ --show_timestamps \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_docker_containers.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_docker_containers.sh index 32865b8348..f55e153b85 100644 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_docker_containers.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_docker_containers.sh @@ -16,6 +16,6 @@ # Test scripts should source this file to get the identifiers. readonly LINUX_ALPINE_CONTAINER="gcr.io/google.com/absl-177019/alpine:20201026" -readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20210617" -readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20210617" -readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20210617" +readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20220217" +readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20220217" +readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20220621" diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-floor_libstdcxx_bazel.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-floor_libstdcxx_bazel.sh index 54ab68a37e..68b3999485 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-floor_libstdcxx_bazel.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-floor_libstdcxx_bazel.sh @@ -25,7 +25,7 @@ if [[ -z ${ABSEIL_ROOT:-} ]]; then fi if [[ -z ${STD:-} ]]; then - STD="c++11 c++14" + STD="c++14" fi if [[ -z ${COMPILATION_MODE:-} ]]; then @@ -75,9 +75,11 @@ for std in ${STD}; do /usr/local/bin/bazel test ... \ --compilation_mode="${compilation_mode}" \ --copt="${exceptions_mode}" \ + --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ --copt=-Werror \ --define="absl=1" \ --distdir="/bazel-distdir" \ + --features=external_include_paths \ --keep_going \ --show_timestamps \ --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh index 0555ecedb5..091acb3362 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh @@ -25,7 +25,7 @@ if [[ -z ${ABSEIL_ROOT:-} ]]; then fi if [[ -z ${STD:-} ]]; then - STD="c++11 c++14 c++17 c++20" + STD="c++14 c++17 c++20" fi if [[ -z ${COMPILATION_MODE:-} ]]; then @@ -81,9 +81,11 @@ for std in ${STD}; do /usr/local/bin/bazel test ... \ --compilation_mode=\"${compilation_mode}\" \ --copt=\"${exceptions_mode}\" \ + --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \ --copt=-Werror \ --define=\"absl=1\" \ --distdir=\"/bazel-distdir\" \ + --features=external_include_paths \ --keep_going \ --show_timestamps \ --test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \ diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh index ab06aa0574..1f72123613 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh @@ -23,7 +23,7 @@ fi source "${ABSEIL_ROOT}/ci/cmake_common.sh" if [[ -z ${ABSL_CMAKE_CXX_STANDARDS:-} ]]; then - ABSL_CMAKE_CXX_STANDARDS="11 14 17 20" + ABSL_CMAKE_CXX_STANDARDS="14 17 20" fi if [[ -z ${ABSL_CMAKE_BUILD_TYPES:-} ]]; then @@ -54,11 +54,12 @@ for std in ${ABSL_CMAKE_CXX_STANDARDS}; do cmake /abseil-cpp \ -DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \ -DBUILD_SHARED_LIBS=${build_shared} \ - -DBUILD_TESTING=ON \ + -DABSL_BUILD_TESTING=ON \ -DCMAKE_BUILD_TYPE=${compilation_mode} \ -DCMAKE_CXX_STANDARD=${std} \ -DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \ make -j$(nproc) && \ + TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo \ ctest -j$(nproc) --output-on-failure" done done diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc_alpine_cmake.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc_alpine_cmake.sh index bce27d295f..b784456fb7 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc_alpine_cmake.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/linux_gcc_alpine_cmake.sh @@ -23,7 +23,7 @@ fi source "${ABSEIL_ROOT}/ci/cmake_common.sh" if [[ -z ${ABSL_CMAKE_CXX_STANDARDS:-} ]]; then - ABSL_CMAKE_CXX_STANDARDS="11 14 17" + ABSL_CMAKE_CXX_STANDARDS="14 17" fi if [[ -z ${ABSL_CMAKE_BUILD_TYPES:-} ]]; then @@ -53,11 +53,12 @@ for std in ${ABSL_CMAKE_CXX_STANDARDS}; do /bin/sh -c " cmake /abseil-cpp \ -DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \ - -DBUILD_TESTING=ON \ + -DABSL_BUILD_TESTING=ON \ -DCMAKE_BUILD_TYPE=${compilation_mode} \ -DCMAKE_CXX_STANDARD=${std} \ -DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \ make -j$(nproc) && \ + TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo \ ctest -j$(nproc) --output-on-failure" done done diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/macos_xcode_bazel.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/macos_xcode_bazel.sh index 9e14e66039..04c9a1a2be 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/macos_xcode_bazel.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/macos_xcode_bazel.sh @@ -24,7 +24,7 @@ if [[ -z ${ABSEIL_ROOT:-} ]]; then fi # If we are running on Kokoro, check for a versioned Bazel binary. -KOKORO_GFILE_BAZEL_BIN="bazel-3.7.0-darwin-x86_64" +KOKORO_GFILE_BAZEL_BIN="bazel-5.1.1-darwin-x86_64" if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f ${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN} ]]; then BAZEL_BIN="${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN}" chmod +x ${BAZEL_BIN} @@ -32,6 +32,13 @@ else BAZEL_BIN="bazel" fi +# Avoid depending on external sites like GitHub by checking --distdir for +# external dependencies first. +# https://docs.bazel.build/versions/master/guide.html#distdir +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then + BAZEL_EXTRA_ARGS="--distdir=${KOKORO_GFILE_DIR}/distdir ${BAZEL_EXTRA_ARGS:-}" +fi + # Print the compiler and Bazel versions. echo "---------------" gcc -v @@ -46,9 +53,13 @@ if [[ -n "${ALTERNATE_OPTIONS:-}" ]]; then fi ${BAZEL_BIN} test ... \ - --copt=-Werror \ + --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ + --copt="-Werror" \ + --cxxopt="-std=c++14" \ + --features=external_include_paths \ --keep_going \ --show_timestamps \ --test_env="TZDIR=${ABSEIL_ROOT}/absl/time/internal/cctz/testdata/zoneinfo" \ --test_output=errors \ - --test_tag_filters=-benchmark + --test_tag_filters=-benchmark \ + ${BAZEL_EXTRA_ARGS:-} diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/macos_xcode_cmake.sh b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/macos_xcode_cmake.sh index 2a870cf4e9..690f86b842 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/macos_xcode_cmake.sh +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/ci/macos_xcode_cmake.sh @@ -45,12 +45,13 @@ for compilation_mode in ${ABSL_CMAKE_BUILD_TYPES}; do time cmake ${ABSEIL_ROOT} \ -GXcode \ -DBUILD_SHARED_LIBS=${build_shared} \ - -DBUILD_TESTING=ON \ + -DABSL_BUILD_TESTING=ON \ -DCMAKE_BUILD_TYPE=${compilation_mode} \ - -DCMAKE_CXX_STANDARD=11 \ + -DCMAKE_CXX_STANDARD=14 \ -DCMAKE_MODULE_LINKER_FLAGS="-Wl,--no-undefined" \ -DABSL_GOOGLETEST_DOWNLOAD_URL="${ABSL_GOOGLETEST_DOWNLOAD_URL}" time cmake --build . - time ctest -C ${compilation_mode} --output-on-failure + time TZDIR=${ABSEIL_ROOT}/absl/time/internal/cctz/testdata/zoneinfo \ + ctest -C ${compilation_mode} --output-on-failure done done diff --git a/third-party/webrtc/dependencies/third_party/abseil-cpp/conanfile.py b/third-party/webrtc/dependencies/third_party/abseil-cpp/conanfile.py index 926ec5ccd6..4bbc62eedd 100755 --- a/third-party/webrtc/dependencies/third_party/abseil-cpp/conanfile.py +++ b/third-party/webrtc/dependencies/third_party/abseil-cpp/conanfile.py @@ -30,7 +30,7 @@ class AbseilConan(ConanFile): raise ConanInvalidConfiguration("Abseil does not support MSVC < 14") def build(self): - tools.replace_in_file("CMakeLists.txt", "project(absl CXX)", "project(absl CXX)\ninclude(conanbuildinfo.cmake)\nconan_basic_setup()") + tools.replace_in_file("CMakeLists.txt", "project(absl LANGUAGES CXX)", "project(absl LANGUAGES CXX)\ninclude(conanbuildinfo.cmake)\nconan_basic_setup()") cmake = CMake(self) cmake.definitions["BUILD_TESTING"] = False cmake.configure() diff --git a/third-party/webrtc/dependencies/third_party/opus/src/include/opus.h b/third-party/webrtc/dependencies/third_party/opus/src/include/opus.h new file mode 100644 index 0000000000..d282f21d25 --- /dev/null +++ b/third-party/webrtc/dependencies/third_party/opus/src/include/opus.h @@ -0,0 +1,981 @@ +/* Copyright (c) 2010-2011 Xiph.Org Foundation, Skype Limited + Written by Jean-Marc Valin and Koen Vos */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus.h + * @brief Opus reference implementation API + */ + +#ifndef OPUS_H +#define OPUS_H + +#include "opus_types.h" +#include "opus_defines.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @mainpage Opus + * + * The Opus codec is designed for interactive speech and audio transmission over the Internet. + * It is designed by the IETF Codec Working Group and incorporates technology from + * Skype's SILK codec and Xiph.Org's CELT codec. + * + * The Opus codec is designed to handle a wide range of interactive audio applications, + * including Voice over IP, videoconferencing, in-game chat, and even remote live music + * performances. It can scale from low bit-rate narrowband speech to very high quality + * stereo music. Its main features are: + + * @li Sampling rates from 8 to 48 kHz + * @li Bit-rates from 6 kb/s to 510 kb/s + * @li Support for both constant bit-rate (CBR) and variable bit-rate (VBR) + * @li Audio bandwidth from narrowband to full-band + * @li Support for speech and music + * @li Support for mono and stereo + * @li Support for multichannel (up to 255 channels) + * @li Frame sizes from 2.5 ms to 60 ms + * @li Good loss robustness and packet loss concealment (PLC) + * @li Floating point and fixed-point implementation + * + * Documentation sections: + * @li @ref opus_encoder + * @li @ref opus_decoder + * @li @ref opus_repacketizer + * @li @ref opus_multistream + * @li @ref opus_libinfo + * @li @ref opus_custom + */ + +/** @defgroup opus_encoder Opus Encoder + * @{ + * + * @brief This page describes the process and functions used to encode Opus. + * + * Since Opus is a stateful codec, the encoding process starts with creating an encoder + * state. This can be done with: + * + * @code + * int error; + * OpusEncoder *enc; + * enc = opus_encoder_create(Fs, channels, application, &error); + * @endcode + * + * From this point, @c enc can be used for encoding an audio stream. An encoder state + * @b must @b not be used for more than one stream at the same time. Similarly, the encoder + * state @b must @b not be re-initialized for each frame. + * + * While opus_encoder_create() allocates memory for the state, it's also possible + * to initialize pre-allocated memory: + * + * @code + * int size; + * int error; + * OpusEncoder *enc; + * size = opus_encoder_get_size(channels); + * enc = malloc(size); + * error = opus_encoder_init(enc, Fs, channels, application); + * @endcode + * + * where opus_encoder_get_size() returns the required size for the encoder state. Note that + * future versions of this code may change the size, so no assuptions should be made about it. + * + * The encoder state is always continuous in memory and only a shallow copy is sufficient + * to copy it (e.g. memcpy()) + * + * It is possible to change some of the encoder's settings using the opus_encoder_ctl() + * interface. All these settings already default to the recommended value, so they should + * only be changed when necessary. The most common settings one may want to change are: + * + * @code + * opus_encoder_ctl(enc, OPUS_SET_BITRATE(bitrate)); + * opus_encoder_ctl(enc, OPUS_SET_COMPLEXITY(complexity)); + * opus_encoder_ctl(enc, OPUS_SET_SIGNAL(signal_type)); + * @endcode + * + * where + * + * @arg bitrate is in bits per second (b/s) + * @arg complexity is a value from 1 to 10, where 1 is the lowest complexity and 10 is the highest + * @arg signal_type is either OPUS_AUTO (default), OPUS_SIGNAL_VOICE, or OPUS_SIGNAL_MUSIC + * + * See @ref opus_encoderctls and @ref opus_genericctls for a complete list of parameters that can be set or queried. Most parameters can be set or changed at any time during a stream. + * + * To encode a frame, opus_encode() or opus_encode_float() must be called with exactly one frame (2.5, 5, 10, 20, 40 or 60 ms) of audio data: + * @code + * len = opus_encode(enc, audio_frame, frame_size, packet, max_packet); + * @endcode + * + * where + *